blob: 735402f61aba943da1ce839c00f7b525b5d8434c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050074#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050075#include <linux/hdreg.h>
Wayne Boyerf72919e2010-02-19 13:24:21 -080076#include <linux/reboot.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080077#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <asm/io.h>
79#include <asm/irq.h>
80#include <asm/processor.h>
81#include <scsi/scsi.h>
82#include <scsi/scsi_host.h>
83#include <scsi/scsi_tcq.h>
84#include <scsi/scsi_eh.h>
85#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#include "ipr.h"
87
88/*
89 * Global Data
90 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080091static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
93static unsigned int ipr_max_speed = 1;
94static int ipr_testmode = 0;
95static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050096static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060097static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080098static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -050099static unsigned int ipr_dual_ioa_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100static DEFINE_SPINLOCK(ipr_driver_lock);
101
102/* This table describes the differences between DMA controller chips */
103static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600104 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 .mailbox = 0x0042C,
106 .cache_line_size = 0x20,
107 {
108 .set_interrupt_mask_reg = 0x0022C,
109 .clr_interrupt_mask_reg = 0x00230,
Wayne Boyer214777b2010-02-19 13:24:26 -0800110 .clr_interrupt_mask_reg32 = 0x00230,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 .sense_interrupt_mask_reg = 0x0022C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800112 .sense_interrupt_mask_reg32 = 0x0022C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 .clr_interrupt_reg = 0x00228,
Wayne Boyer214777b2010-02-19 13:24:26 -0800114 .clr_interrupt_reg32 = 0x00228,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 .sense_interrupt_reg = 0x00224,
Wayne Boyer214777b2010-02-19 13:24:26 -0800116 .sense_interrupt_reg32 = 0x00224,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 .ioarrin_reg = 0x00404,
118 .sense_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800119 .sense_uproc_interrupt_reg32 = 0x00214,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 .set_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800121 .set_uproc_interrupt_reg32 = 0x00214,
122 .clr_uproc_interrupt_reg = 0x00218,
123 .clr_uproc_interrupt_reg32 = 0x00218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 }
125 },
126 { /* Snipe and Scamp */
127 .mailbox = 0x0052C,
128 .cache_line_size = 0x20,
129 {
130 .set_interrupt_mask_reg = 0x00288,
131 .clr_interrupt_mask_reg = 0x0028C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800132 .clr_interrupt_mask_reg32 = 0x0028C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 .sense_interrupt_mask_reg = 0x00288,
Wayne Boyer214777b2010-02-19 13:24:26 -0800134 .sense_interrupt_mask_reg32 = 0x00288,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 .clr_interrupt_reg = 0x00284,
Wayne Boyer214777b2010-02-19 13:24:26 -0800136 .clr_interrupt_reg32 = 0x00284,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 .sense_interrupt_reg = 0x00280,
Wayne Boyer214777b2010-02-19 13:24:26 -0800138 .sense_interrupt_reg32 = 0x00280,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 .ioarrin_reg = 0x00504,
140 .sense_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800141 .sense_uproc_interrupt_reg32 = 0x00290,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 .set_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800143 .set_uproc_interrupt_reg32 = 0x00290,
144 .clr_uproc_interrupt_reg = 0x00294,
145 .clr_uproc_interrupt_reg32 = 0x00294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 }
147 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800148 { /* CRoC */
149 .mailbox = 0x00040,
150 .cache_line_size = 0x20,
151 {
152 .set_interrupt_mask_reg = 0x00010,
153 .clr_interrupt_mask_reg = 0x00018,
Wayne Boyer214777b2010-02-19 13:24:26 -0800154 .clr_interrupt_mask_reg32 = 0x0001C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800155 .sense_interrupt_mask_reg = 0x00010,
Wayne Boyer214777b2010-02-19 13:24:26 -0800156 .sense_interrupt_mask_reg32 = 0x00014,
Wayne Boyera74c1632010-02-19 13:23:51 -0800157 .clr_interrupt_reg = 0x00008,
Wayne Boyer214777b2010-02-19 13:24:26 -0800158 .clr_interrupt_reg32 = 0x0000C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800159 .sense_interrupt_reg = 0x00000,
Wayne Boyer214777b2010-02-19 13:24:26 -0800160 .sense_interrupt_reg32 = 0x00004,
Wayne Boyera74c1632010-02-19 13:23:51 -0800161 .ioarrin_reg = 0x00070,
162 .sense_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800163 .sense_uproc_interrupt_reg32 = 0x00024,
Wayne Boyera74c1632010-02-19 13:23:51 -0800164 .set_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800165 .set_uproc_interrupt_reg32 = 0x00024,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800166 .clr_uproc_interrupt_reg = 0x00028,
Wayne Boyer214777b2010-02-19 13:24:26 -0800167 .clr_uproc_interrupt_reg32 = 0x0002C,
168 .init_feedback_reg = 0x0005C,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800169 .dump_addr_reg = 0x00064,
170 .dump_data_reg = 0x00068
Wayne Boyera74c1632010-02-19 13:23:51 -0800171 }
172 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173};
174
175static const struct ipr_chip_t ipr_chip[] = {
Wayne Boyera32c0552010-02-19 13:23:36 -0800176 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
177 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
181 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
Wayne Boyerd7b46272010-02-19 13:24:38 -0800182 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185};
186
187static int ipr_max_bus_speeds [] = {
188 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
189};
190
191MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
192MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
193module_param_named(max_speed, ipr_max_speed, uint, 0);
194MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
195module_param_named(log_level, ipr_log_level, uint, 0);
196MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
197module_param_named(testmode, ipr_testmode, int, 0);
198MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800199module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
201module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
202MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800203module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600204MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500205module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
206MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800207module_param_named(max_devs, ipr_max_devs, int, 0);
208MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
209 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210MODULE_LICENSE("GPL");
211MODULE_VERSION(IPR_DRIVER_VERSION);
212
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213/* A constant array of IOASCs/URCs/Error Messages */
214static const
215struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500216 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 "8155: An unknown error was received"},
218 {0x00330000, 0, 0,
219 "Soft underlength error"},
220 {0x005A0000, 0, 0,
221 "Command to be cancelled not found"},
222 {0x00808000, 0, 0,
223 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500224 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500226 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500227 "4101: Soft device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800228 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
229 "FFFC: Logical block guard error recovered by the device"},
230 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
231 "FFFC: Logical block reference tag error recovered by the device"},
232 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
233 "4171: Recovered scatter list tag / sequence number error"},
234 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
235 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
236 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
237 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
238 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
239 "FFFD: Recovered logical block reference tag error detected by the IOA"},
240 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "FFFD: Logical block guard error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500242 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500244 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500246 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500248 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500250 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500252 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500254 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500256 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500258 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500260 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500262 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500264 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500265 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500266 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500268 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 "8009: Impending cache battery pack failure"},
270 {0x02040400, 0, 0,
271 "34FF: Disk device format in progress"},
Brian King65f56472007-04-26 16:00:12 -0500272 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
273 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 {0x023F0000, 0, 0,
275 "Synchronization required"},
276 {0x024E0000, 0, 0,
277 "No ready, IOA shutdown"},
278 {0x025A0000, 0, 0,
279 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500280 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 "3020: Storage subsystem configuration error"},
282 {0x03110B00, 0, 0,
283 "FFF5: Medium error, data unreadable, recommend reassign"},
284 {0x03110C00, 0, 0,
285 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500286 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500288 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500290 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500292 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 "3109: IOA timed out a device command"},
294 {0x04088000, 0, 0,
295 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500296 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500297 "4100: Hard device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800298 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
299 "310C: Logical block guard error detected by the device"},
300 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
301 "310C: Logical block reference tag error detected by the device"},
302 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
303 "4170: Scatter list tag / sequence number error"},
304 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
305 "8150: Logical block CRC error on IOA to Host transfer"},
306 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
307 "4170: Logical block sequence number error on IOA to Host transfer"},
308 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
309 "310D: Logical block reference tag error detected by the IOA"},
310 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
311 "310D: Logical block guard error detected by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500312 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500314 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500316 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 "9002: IOA reserved area LRC error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800318 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
319 "Hardware Error, IOA metadata access error"},
Brian King933916f2007-03-29 12:43:30 -0500320 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500322 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500324 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500326 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500328 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500330 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 "8150: PCI bus error"},
332 {0x04430000, 1, 0,
333 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500334 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500336 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500338 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500340 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 "8151: IOA microcode error"},
342 {0x04448500, 0, 0,
343 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500344 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500346 {0x04448700, 0, 0,
347 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 {0x04490000, 0, 0,
349 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500350 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500352 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500354 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500356 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500358 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500360 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500361 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500362 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500364 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600365 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500366 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500367 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500368 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500369 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500370 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500371 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500372 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500373 "4110: Unsupported enclosure function"},
Brian King933916f2007-03-29 12:43:30 -0500374 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 "FFF4: Command to logical unit failed"},
376 {0x05240000, 1, 0,
377 "Illegal request, invalid request type or request packet"},
378 {0x05250000, 0, 0,
379 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600380 {0x05258000, 0, 0,
381 "Illegal request, commands not allowed to this device"},
382 {0x05258100, 0, 0,
383 "Illegal request, command not allowed to a secondary adapter"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800384 {0x05258200, 0, 0,
385 "Illegal request, command not allowed to a non-optimized resource"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 {0x05260000, 0, 0,
387 "Illegal request, invalid field in parameter list"},
388 {0x05260100, 0, 0,
389 "Illegal request, parameter not supported"},
390 {0x05260200, 0, 0,
391 "Illegal request, parameter value invalid"},
392 {0x052C0000, 0, 0,
393 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600394 {0x052C8000, 1, 0,
395 "Illegal request, dual adapter support not enabled"},
Brian King933916f2007-03-29 12:43:30 -0500396 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500398 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 "9040: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500400 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500401 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500402 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 "FFFB: SCSI bus was reset"},
404 {0x06290500, 0, 0,
405 "FFFE: SCSI bus transition to single ended"},
406 {0x06290600, 0, 0,
407 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500408 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500410 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 "3029: A device replacement has occurred"},
Brian King933916f2007-03-29 12:43:30 -0500412 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500414 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600415 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500416 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500418 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500420 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500422 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600423 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500424 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500425 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500426 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500427 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500428 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500429 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500430 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500431 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500432 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500433 "4050: Enclosure does not support a required multipath function"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800434 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500436 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500438 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500440 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500442 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600443 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500444 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600445 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500446 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 "9032: Array exposed but still protected"},
Brian Kinge4353402007-03-29 12:43:37 -0500448 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
449 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500450 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500451 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500452 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500453 "4060: Multipath redundancy level got worse"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 {0x07270000, 0, 0,
455 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500456 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500458 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500460 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500462 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500464 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500466 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500468 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500470 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500472 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500474 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500476 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500478 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500480 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500482 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500484 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500486 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500488 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500490 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500492 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500494 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 "9063: Maximum number of functional arrays has been exceeded"},
496 {0x0B260000, 0, 0,
497 "Aborted command, invalid descriptor"},
498 {0x0B5A0000, 0, 0,
499 "Command terminated by host"}
500};
501
502static const struct ipr_ses_table_entry ipr_ses_table[] = {
503 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
504 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
505 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
506 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
507 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
508 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
509 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
510 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
511 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
512 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
513 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
514 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
515 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
516};
517
518/*
519 * Function Prototypes
520 */
521static int ipr_reset_alert(struct ipr_cmnd *);
522static void ipr_process_ccn(struct ipr_cmnd *);
523static void ipr_process_error(struct ipr_cmnd *);
524static void ipr_reset_ioa_job(struct ipr_cmnd *);
525static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
526 enum ipr_shutdown_type);
527
528#ifdef CONFIG_SCSI_IPR_TRACE
529/**
530 * ipr_trc_hook - Add a trace entry to the driver trace
531 * @ipr_cmd: ipr command struct
532 * @type: trace type
533 * @add_data: additional data
534 *
535 * Return value:
536 * none
537 **/
538static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
539 u8 type, u32 add_data)
540{
541 struct ipr_trace_entry *trace_entry;
542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
543
544 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
545 trace_entry->time = jiffies;
546 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
547 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800548 if (ipr_cmd->ioa_cfg->sis64)
549 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
550 else
551 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500552 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
554 trace_entry->u.add_data = add_data;
555}
556#else
557#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
558#endif
559
560/**
561 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
562 * @ipr_cmd: ipr command struct
563 *
564 * Return value:
565 * none
566 **/
567static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
568{
569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
570 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -0800571 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
573 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -0800574 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800576 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800578
579 if (ipr_cmd->ioa_cfg->sis64)
580 ioarcb->u.sis64_addr_data.data_ioadl_addr =
581 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
582 else {
583 ioarcb->write_ioadl_addr =
584 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
585 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
586 }
587
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 ioasa->ioasc = 0;
589 ioasa->residual_data_len = 0;
Brian King35a39692006-09-25 12:39:20 -0500590 ioasa->u.gata.status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
592 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500593 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 ipr_cmd->sense_buffer[0] = 0;
595 ipr_cmd->dma_use_sg = 0;
596}
597
598/**
599 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
600 * @ipr_cmd: ipr command struct
601 *
602 * Return value:
603 * none
604 **/
605static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
606{
607 ipr_reinit_ipr_cmnd(ipr_cmd);
608 ipr_cmd->u.scratch = 0;
609 ipr_cmd->sibling = NULL;
610 init_timer(&ipr_cmd->timer);
611}
612
613/**
614 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
615 * @ioa_cfg: ioa config struct
616 *
617 * Return value:
618 * pointer to ipr command struct
619 **/
620static
621struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
622{
623 struct ipr_cmnd *ipr_cmd;
624
625 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
626 list_del(&ipr_cmd->queue);
627 ipr_init_ipr_cmnd(ipr_cmd);
628
629 return ipr_cmd;
630}
631
632/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
634 * @ioa_cfg: ioa config struct
635 * @clr_ints: interrupts to clear
636 *
637 * This function masks all interrupts on the adapter, then clears the
638 * interrupts specified in the mask
639 *
640 * Return value:
641 * none
642 **/
643static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
644 u32 clr_ints)
645{
646 volatile u32 int_reg;
647
648 /* Stop new interrupts */
649 ioa_cfg->allow_interrupts = 0;
650
651 /* Set interrupt mask to stop all new interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800652 if (ioa_cfg->sis64)
653 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
654 else
655 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
657 /* Clear any pending interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800658 if (ioa_cfg->sis64)
659 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
660 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
662}
663
664/**
665 * ipr_save_pcix_cmd_reg - Save PCI-X command register
666 * @ioa_cfg: ioa config struct
667 *
668 * Return value:
669 * 0 on success / -EIO on failure
670 **/
671static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
672{
673 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
674
Brian King7dce0e12007-01-23 11:25:30 -0600675 if (pcix_cmd_reg == 0)
676 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
679 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
680 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
681 return -EIO;
682 }
683
684 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
685 return 0;
686}
687
688/**
689 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
690 * @ioa_cfg: ioa config struct
691 *
692 * Return value:
693 * 0 on success / -EIO on failure
694 **/
695static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
696{
697 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
698
699 if (pcix_cmd_reg) {
700 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
701 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
702 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
703 return -EIO;
704 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 }
706
707 return 0;
708}
709
710/**
Brian King35a39692006-09-25 12:39:20 -0500711 * ipr_sata_eh_done - done function for aborted SATA commands
712 * @ipr_cmd: ipr command struct
713 *
714 * This function is invoked for ops generated to SATA
715 * devices which are being aborted.
716 *
717 * Return value:
718 * none
719 **/
720static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
721{
722 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
723 struct ata_queued_cmd *qc = ipr_cmd->qc;
724 struct ipr_sata_port *sata_port = qc->ap->private_data;
725
726 qc->err_mask |= AC_ERR_OTHER;
727 sata_port->ioasa.status |= ATA_BUSY;
728 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
729 ata_qc_complete(qc);
730}
731
732/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 * ipr_scsi_eh_done - mid-layer done function for aborted ops
734 * @ipr_cmd: ipr command struct
735 *
736 * This function is invoked by the interrupt handler for
737 * ops generated by the SCSI mid-layer which are being aborted.
738 *
739 * Return value:
740 * none
741 **/
742static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
743{
744 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
745 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
746
747 scsi_cmd->result |= (DID_ERROR << 16);
748
FUJITA Tomonori63015bc2007-05-26 00:26:59 +0900749 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 scsi_cmd->scsi_done(scsi_cmd);
751 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
752}
753
754/**
755 * ipr_fail_all_ops - Fails all outstanding ops.
756 * @ioa_cfg: ioa config struct
757 *
758 * This function fails all outstanding ops.
759 *
760 * Return value:
761 * none
762 **/
763static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
764{
765 struct ipr_cmnd *ipr_cmd, *temp;
766
767 ENTER;
768 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
769 list_del(&ipr_cmd->queue);
770
771 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
772 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
773
774 if (ipr_cmd->scsi_cmd)
775 ipr_cmd->done = ipr_scsi_eh_done;
Brian King35a39692006-09-25 12:39:20 -0500776 else if (ipr_cmd->qc)
777 ipr_cmd->done = ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
779 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
780 del_timer(&ipr_cmd->timer);
781 ipr_cmd->done(ipr_cmd);
782 }
783
784 LEAVE;
785}
786
787/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800788 * ipr_send_command - Send driver initiated requests.
789 * @ipr_cmd: ipr command struct
790 *
791 * This function sends a command to the adapter using the correct write call.
792 * In the case of sis64, calculate the ioarcb size required. Then or in the
793 * appropriate bits.
794 *
795 * Return value:
796 * none
797 **/
798static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
799{
800 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
801 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
802
803 if (ioa_cfg->sis64) {
804 /* The default size is 256 bytes */
805 send_dma_addr |= 0x1;
806
807 /* If the number of ioadls * size of ioadl > 128 bytes,
808 then use a 512 byte ioarcb */
809 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
810 send_dma_addr |= 0x4;
811 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
812 } else
813 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
814}
815
816/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 * ipr_do_req - Send driver initiated requests.
818 * @ipr_cmd: ipr command struct
819 * @done: done function
820 * @timeout_func: timeout function
821 * @timeout: timeout value
822 *
823 * This function sends the specified command to the adapter with the
824 * timeout given. The done function is invoked on command completion.
825 *
826 * Return value:
827 * none
828 **/
829static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
830 void (*done) (struct ipr_cmnd *),
831 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
832{
833 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
834
835 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
836
837 ipr_cmd->done = done;
838
839 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
840 ipr_cmd->timer.expires = jiffies + timeout;
841 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
842
843 add_timer(&ipr_cmd->timer);
844
845 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
846
847 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -0800848
849 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850}
851
852/**
853 * ipr_internal_cmd_done - Op done function for an internally generated op.
854 * @ipr_cmd: ipr command struct
855 *
856 * This function is the op done function for an internally generated,
857 * blocking op. It simply wakes the sleeping thread.
858 *
859 * Return value:
860 * none
861 **/
862static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
863{
864 if (ipr_cmd->sibling)
865 ipr_cmd->sibling = NULL;
866 else
867 complete(&ipr_cmd->completion);
868}
869
870/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800871 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
872 * @ipr_cmd: ipr command struct
873 * @dma_addr: dma address
874 * @len: transfer length
875 * @flags: ioadl flag value
876 *
877 * This function initializes an ioadl in the case where there is only a single
878 * descriptor.
879 *
880 * Return value:
881 * nothing
882 **/
883static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
884 u32 len, int flags)
885{
886 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
887 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
888
889 ipr_cmd->dma_use_sg = 1;
890
891 if (ipr_cmd->ioa_cfg->sis64) {
892 ioadl64->flags = cpu_to_be32(flags);
893 ioadl64->data_len = cpu_to_be32(len);
894 ioadl64->address = cpu_to_be64(dma_addr);
895
896 ipr_cmd->ioarcb.ioadl_len =
897 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
898 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
899 } else {
900 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
901 ioadl->address = cpu_to_be32(dma_addr);
902
903 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
904 ipr_cmd->ioarcb.read_ioadl_len =
905 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
906 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
907 } else {
908 ipr_cmd->ioarcb.ioadl_len =
909 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
910 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
911 }
912 }
913}
914
915/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 * ipr_send_blocking_cmd - Send command and sleep on its completion.
917 * @ipr_cmd: ipr command struct
918 * @timeout_func: function to invoke if command times out
919 * @timeout: timeout
920 *
921 * Return value:
922 * none
923 **/
924static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
925 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
926 u32 timeout)
927{
928 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
929
930 init_completion(&ipr_cmd->completion);
931 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
932
933 spin_unlock_irq(ioa_cfg->host->host_lock);
934 wait_for_completion(&ipr_cmd->completion);
935 spin_lock_irq(ioa_cfg->host->host_lock);
936}
937
938/**
939 * ipr_send_hcam - Send an HCAM to the adapter.
940 * @ioa_cfg: ioa config struct
941 * @type: HCAM type
942 * @hostrcb: hostrcb struct
943 *
944 * This function will send a Host Controlled Async command to the adapter.
945 * If HCAMs are currently not allowed to be issued to the adapter, it will
946 * place the hostrcb on the free queue.
947 *
948 * Return value:
949 * none
950 **/
951static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
952 struct ipr_hostrcb *hostrcb)
953{
954 struct ipr_cmnd *ipr_cmd;
955 struct ipr_ioarcb *ioarcb;
956
957 if (ioa_cfg->allow_cmds) {
958 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
959 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
960 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
961
962 ipr_cmd->u.hostrcb = hostrcb;
963 ioarcb = &ipr_cmd->ioarcb;
964
965 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
966 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
967 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
968 ioarcb->cmd_pkt.cdb[1] = type;
969 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
970 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
971
Wayne Boyera32c0552010-02-19 13:23:36 -0800972 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
973 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
975 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
976 ipr_cmd->done = ipr_process_ccn;
977 else
978 ipr_cmd->done = ipr_process_error;
979
980 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
981
982 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -0800983
984 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 } else {
986 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
987 }
988}
989
990/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800991 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800993 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 *
995 * Return value:
996 * none
997 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800998static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999{
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001000 switch(proto) {
1001 case IPR_PROTO_SATA:
1002 case IPR_PROTO_SAS_STP:
1003 res->ata_class = ATA_DEV_ATA;
1004 break;
1005 case IPR_PROTO_SATA_ATAPI:
1006 case IPR_PROTO_SAS_STP_ATAPI:
1007 res->ata_class = ATA_DEV_ATAPI;
1008 break;
1009 default:
1010 res->ata_class = ATA_DEV_UNKNOWN;
1011 break;
1012 };
1013}
1014
1015/**
1016 * ipr_init_res_entry - Initialize a resource entry struct.
1017 * @res: resource entry struct
1018 * @cfgtew: config table entry wrapper struct
1019 *
1020 * Return value:
1021 * none
1022 **/
1023static void ipr_init_res_entry(struct ipr_resource_entry *res,
1024 struct ipr_config_table_entry_wrapper *cfgtew)
1025{
1026 int found = 0;
1027 unsigned int proto;
1028 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1029 struct ipr_resource_entry *gscsi_res = NULL;
1030
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06001031 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 res->in_erp = 0;
1033 res->add_to_ml = 0;
1034 res->del_from_ml = 0;
1035 res->resetting_device = 0;
1036 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05001037 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001038
1039 if (ioa_cfg->sis64) {
1040 proto = cfgtew->u.cfgte64->proto;
1041 res->res_flags = cfgtew->u.cfgte64->res_flags;
1042 res->qmodel = IPR_QUEUEING_MODEL64(res);
Wayne Boyer438b0332010-05-10 09:13:00 -07001043 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001044
1045 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1046 sizeof(res->res_path));
1047
1048 res->bus = 0;
1049 res->lun = scsilun_to_int(&res->dev_lun);
1050
1051 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1052 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1053 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1054 found = 1;
1055 res->target = gscsi_res->target;
1056 break;
1057 }
1058 }
1059 if (!found) {
1060 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1061 ioa_cfg->max_devs_supported);
1062 set_bit(res->target, ioa_cfg->target_ids);
1063 }
1064
1065 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1066 sizeof(res->dev_lun.scsi_lun));
1067 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1068 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1069 res->target = 0;
1070 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1071 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1072 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1073 ioa_cfg->max_devs_supported);
1074 set_bit(res->target, ioa_cfg->array_ids);
1075 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1076 res->bus = IPR_VSET_VIRTUAL_BUS;
1077 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1078 ioa_cfg->max_devs_supported);
1079 set_bit(res->target, ioa_cfg->vset_ids);
1080 } else {
1081 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1082 ioa_cfg->max_devs_supported);
1083 set_bit(res->target, ioa_cfg->target_ids);
1084 }
1085 } else {
1086 proto = cfgtew->u.cfgte->proto;
1087 res->qmodel = IPR_QUEUEING_MODEL(res);
1088 res->flags = cfgtew->u.cfgte->flags;
1089 if (res->flags & IPR_IS_IOA_RESOURCE)
1090 res->type = IPR_RES_TYPE_IOAFP;
1091 else
1092 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1093
1094 res->bus = cfgtew->u.cfgte->res_addr.bus;
1095 res->target = cfgtew->u.cfgte->res_addr.target;
1096 res->lun = cfgtew->u.cfgte->res_addr.lun;
1097 }
1098
1099 ipr_update_ata_class(res, proto);
1100}
1101
1102/**
1103 * ipr_is_same_device - Determine if two devices are the same.
1104 * @res: resource entry struct
1105 * @cfgtew: config table entry wrapper struct
1106 *
1107 * Return value:
1108 * 1 if the devices are the same / 0 otherwise
1109 **/
1110static int ipr_is_same_device(struct ipr_resource_entry *res,
1111 struct ipr_config_table_entry_wrapper *cfgtew)
1112{
1113 if (res->ioa_cfg->sis64) {
1114 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1115 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1116 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1117 sizeof(cfgtew->u.cfgte64->lun))) {
1118 return 1;
1119 }
1120 } else {
1121 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1122 res->target == cfgtew->u.cfgte->res_addr.target &&
1123 res->lun == cfgtew->u.cfgte->res_addr.lun)
1124 return 1;
1125 }
1126
1127 return 0;
1128}
1129
1130/**
1131 * ipr_format_resource_path - Format the resource path for printing.
1132 * @res_path: resource path
1133 * @buf: buffer
1134 *
1135 * Return value:
1136 * pointer to buffer
1137 **/
1138static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1139{
1140 int i;
1141
1142 sprintf(buffer, "%02X", res_path[0]);
1143 for (i=1; res_path[i] != 0xff; i++)
Wayne Boyer4565e372010-02-19 13:24:07 -08001144 sprintf(buffer, "%s-%02X", buffer, res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001145
1146 return buffer;
1147}
1148
1149/**
1150 * ipr_update_res_entry - Update the resource entry.
1151 * @res: resource entry struct
1152 * @cfgtew: config table entry wrapper struct
1153 *
1154 * Return value:
1155 * none
1156 **/
1157static void ipr_update_res_entry(struct ipr_resource_entry *res,
1158 struct ipr_config_table_entry_wrapper *cfgtew)
1159{
1160 char buffer[IPR_MAX_RES_PATH_LENGTH];
1161 unsigned int proto;
1162 int new_path = 0;
1163
1164 if (res->ioa_cfg->sis64) {
1165 res->flags = cfgtew->u.cfgte64->flags;
1166 res->res_flags = cfgtew->u.cfgte64->res_flags;
1167 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1168
1169 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1170 sizeof(struct ipr_std_inq_data));
1171
1172 res->qmodel = IPR_QUEUEING_MODEL64(res);
1173 proto = cfgtew->u.cfgte64->proto;
1174 res->res_handle = cfgtew->u.cfgte64->res_handle;
1175 res->dev_id = cfgtew->u.cfgte64->dev_id;
1176
1177 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1178 sizeof(res->dev_lun.scsi_lun));
1179
1180 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1181 sizeof(res->res_path))) {
1182 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1183 sizeof(res->res_path));
1184 new_path = 1;
1185 }
1186
1187 if (res->sdev && new_path)
1188 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1189 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1190 } else {
1191 res->flags = cfgtew->u.cfgte->flags;
1192 if (res->flags & IPR_IS_IOA_RESOURCE)
1193 res->type = IPR_RES_TYPE_IOAFP;
1194 else
1195 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1196
1197 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1198 sizeof(struct ipr_std_inq_data));
1199
1200 res->qmodel = IPR_QUEUEING_MODEL(res);
1201 proto = cfgtew->u.cfgte->proto;
1202 res->res_handle = cfgtew->u.cfgte->res_handle;
1203 }
1204
1205 ipr_update_ata_class(res, proto);
1206}
1207
1208/**
1209 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1210 * for the resource.
1211 * @res: resource entry struct
1212 * @cfgtew: config table entry wrapper struct
1213 *
1214 * Return value:
1215 * none
1216 **/
1217static void ipr_clear_res_target(struct ipr_resource_entry *res)
1218{
1219 struct ipr_resource_entry *gscsi_res = NULL;
1220 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1221
1222 if (!ioa_cfg->sis64)
1223 return;
1224
1225 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1226 clear_bit(res->target, ioa_cfg->array_ids);
1227 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1228 clear_bit(res->target, ioa_cfg->vset_ids);
1229 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1230 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1231 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1232 return;
1233 clear_bit(res->target, ioa_cfg->target_ids);
1234
1235 } else if (res->bus == 0)
1236 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237}
1238
1239/**
1240 * ipr_handle_config_change - Handle a config change from the adapter
1241 * @ioa_cfg: ioa config struct
1242 * @hostrcb: hostrcb
1243 *
1244 * Return value:
1245 * none
1246 **/
1247static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001248 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249{
1250 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001251 struct ipr_config_table_entry_wrapper cfgtew;
1252 __be32 cc_res_handle;
1253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 u32 is_ndn = 1;
1255
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001256 if (ioa_cfg->sis64) {
1257 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1258 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1259 } else {
1260 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1261 cc_res_handle = cfgtew.u.cfgte->res_handle;
1262 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
1264 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001265 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 is_ndn = 0;
1267 break;
1268 }
1269 }
1270
1271 if (is_ndn) {
1272 if (list_empty(&ioa_cfg->free_res_q)) {
1273 ipr_send_hcam(ioa_cfg,
1274 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1275 hostrcb);
1276 return;
1277 }
1278
1279 res = list_entry(ioa_cfg->free_res_q.next,
1280 struct ipr_resource_entry, queue);
1281
1282 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001283 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1285 }
1286
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001287 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
1289 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1290 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001292 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 if (ioa_cfg->allow_ml_add_del)
1294 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001295 } else {
1296 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 } else if (!res->sdev) {
1300 res->add_to_ml = 1;
1301 if (ioa_cfg->allow_ml_add_del)
1302 schedule_work(&ioa_cfg->work_q);
1303 }
1304
1305 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1306}
1307
1308/**
1309 * ipr_process_ccn - Op done function for a CCN.
1310 * @ipr_cmd: ipr command struct
1311 *
1312 * This function is the op done function for a configuration
1313 * change notification host controlled async from the adapter.
1314 *
1315 * Return value:
1316 * none
1317 **/
1318static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1319{
1320 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1321 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1322 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1323
1324 list_del(&hostrcb->queue);
1325 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1326
1327 if (ioasc) {
1328 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1329 dev_err(&ioa_cfg->pdev->dev,
1330 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1331
1332 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1333 } else {
1334 ipr_handle_config_change(ioa_cfg, hostrcb);
1335 }
1336}
1337
1338/**
Brian King8cf093e2007-04-26 16:00:14 -05001339 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1340 * @i: index into buffer
1341 * @buf: string to modify
1342 *
1343 * This function will strip all trailing whitespace, pad the end
1344 * of the string with a single space, and NULL terminate the string.
1345 *
1346 * Return value:
1347 * new length of string
1348 **/
1349static int strip_and_pad_whitespace(int i, char *buf)
1350{
1351 while (i && buf[i] == ' ')
1352 i--;
1353 buf[i+1] = ' ';
1354 buf[i+2] = '\0';
1355 return i + 2;
1356}
1357
1358/**
1359 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1360 * @prefix: string to print at start of printk
1361 * @hostrcb: hostrcb pointer
1362 * @vpd: vendor/product id/sn struct
1363 *
1364 * Return value:
1365 * none
1366 **/
1367static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1368 struct ipr_vpd *vpd)
1369{
1370 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1371 int i = 0;
1372
1373 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1374 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1375
1376 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1377 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1378
1379 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1380 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1381
1382 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1383}
1384
1385/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001387 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 *
1389 * Return value:
1390 * none
1391 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001392static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393{
1394 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1395 + IPR_SERIAL_NUM_LEN];
1396
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001397 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1398 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 IPR_PROD_ID_LEN);
1400 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1401 ipr_err("Vendor/Product ID: %s\n", buffer);
1402
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001403 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1405 ipr_err(" Serial Number: %s\n", buffer);
1406}
1407
1408/**
Brian King8cf093e2007-04-26 16:00:14 -05001409 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1410 * @prefix: string to print at start of printk
1411 * @hostrcb: hostrcb pointer
1412 * @vpd: vendor/product id/sn/wwn struct
1413 *
1414 * Return value:
1415 * none
1416 **/
1417static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1418 struct ipr_ext_vpd *vpd)
1419{
1420 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1421 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1422 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1423}
1424
1425/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001426 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1427 * @vpd: vendor/product id/sn/wwn struct
1428 *
1429 * Return value:
1430 * none
1431 **/
1432static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1433{
1434 ipr_log_vpd(&vpd->vpd);
1435 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1436 be32_to_cpu(vpd->wwid[1]));
1437}
1438
1439/**
1440 * ipr_log_enhanced_cache_error - Log a cache error.
1441 * @ioa_cfg: ioa config struct
1442 * @hostrcb: hostrcb struct
1443 *
1444 * Return value:
1445 * none
1446 **/
1447static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1448 struct ipr_hostrcb *hostrcb)
1449{
Wayne Boyer4565e372010-02-19 13:24:07 -08001450 struct ipr_hostrcb_type_12_error *error;
1451
1452 if (ioa_cfg->sis64)
1453 error = &hostrcb->hcam.u.error64.u.type_12_error;
1454 else
1455 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001456
1457 ipr_err("-----Current Configuration-----\n");
1458 ipr_err("Cache Directory Card Information:\n");
1459 ipr_log_ext_vpd(&error->ioa_vpd);
1460 ipr_err("Adapter Card Information:\n");
1461 ipr_log_ext_vpd(&error->cfc_vpd);
1462
1463 ipr_err("-----Expected Configuration-----\n");
1464 ipr_err("Cache Directory Card Information:\n");
1465 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1466 ipr_err("Adapter Card Information:\n");
1467 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1468
1469 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1470 be32_to_cpu(error->ioa_data[0]),
1471 be32_to_cpu(error->ioa_data[1]),
1472 be32_to_cpu(error->ioa_data[2]));
1473}
1474
1475/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 * ipr_log_cache_error - Log a cache error.
1477 * @ioa_cfg: ioa config struct
1478 * @hostrcb: hostrcb struct
1479 *
1480 * Return value:
1481 * none
1482 **/
1483static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1484 struct ipr_hostrcb *hostrcb)
1485{
1486 struct ipr_hostrcb_type_02_error *error =
1487 &hostrcb->hcam.u.error.u.type_02_error;
1488
1489 ipr_err("-----Current Configuration-----\n");
1490 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001491 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001493 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
1495 ipr_err("-----Expected Configuration-----\n");
1496 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001497 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001499 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
1501 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1502 be32_to_cpu(error->ioa_data[0]),
1503 be32_to_cpu(error->ioa_data[1]),
1504 be32_to_cpu(error->ioa_data[2]));
1505}
1506
1507/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001508 * ipr_log_enhanced_config_error - Log a configuration error.
1509 * @ioa_cfg: ioa config struct
1510 * @hostrcb: hostrcb struct
1511 *
1512 * Return value:
1513 * none
1514 **/
1515static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1516 struct ipr_hostrcb *hostrcb)
1517{
1518 int errors_logged, i;
1519 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1520 struct ipr_hostrcb_type_13_error *error;
1521
1522 error = &hostrcb->hcam.u.error.u.type_13_error;
1523 errors_logged = be32_to_cpu(error->errors_logged);
1524
1525 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1526 be32_to_cpu(error->errors_detected), errors_logged);
1527
1528 dev_entry = error->dev;
1529
1530 for (i = 0; i < errors_logged; i++, dev_entry++) {
1531 ipr_err_separator;
1532
1533 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1534 ipr_log_ext_vpd(&dev_entry->vpd);
1535
1536 ipr_err("-----New Device Information-----\n");
1537 ipr_log_ext_vpd(&dev_entry->new_vpd);
1538
1539 ipr_err("Cache Directory Card Information:\n");
1540 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1541
1542 ipr_err("Adapter Card Information:\n");
1543 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1544 }
1545}
1546
1547/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001548 * ipr_log_sis64_config_error - Log a device error.
1549 * @ioa_cfg: ioa config struct
1550 * @hostrcb: hostrcb struct
1551 *
1552 * Return value:
1553 * none
1554 **/
1555static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1556 struct ipr_hostrcb *hostrcb)
1557{
1558 int errors_logged, i;
1559 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1560 struct ipr_hostrcb_type_23_error *error;
1561 char buffer[IPR_MAX_RES_PATH_LENGTH];
1562
1563 error = &hostrcb->hcam.u.error64.u.type_23_error;
1564 errors_logged = be32_to_cpu(error->errors_logged);
1565
1566 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1567 be32_to_cpu(error->errors_detected), errors_logged);
1568
1569 dev_entry = error->dev;
1570
1571 for (i = 0; i < errors_logged; i++, dev_entry++) {
1572 ipr_err_separator;
1573
1574 ipr_err("Device %d : %s", i + 1,
1575 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1576 ipr_log_ext_vpd(&dev_entry->vpd);
1577
1578 ipr_err("-----New Device Information-----\n");
1579 ipr_log_ext_vpd(&dev_entry->new_vpd);
1580
1581 ipr_err("Cache Directory Card Information:\n");
1582 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1583
1584 ipr_err("Adapter Card Information:\n");
1585 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1586 }
1587}
1588
1589/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 * ipr_log_config_error - Log a configuration error.
1591 * @ioa_cfg: ioa config struct
1592 * @hostrcb: hostrcb struct
1593 *
1594 * Return value:
1595 * none
1596 **/
1597static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1598 struct ipr_hostrcb *hostrcb)
1599{
1600 int errors_logged, i;
1601 struct ipr_hostrcb_device_data_entry *dev_entry;
1602 struct ipr_hostrcb_type_03_error *error;
1603
1604 error = &hostrcb->hcam.u.error.u.type_03_error;
1605 errors_logged = be32_to_cpu(error->errors_logged);
1606
1607 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1608 be32_to_cpu(error->errors_detected), errors_logged);
1609
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001610 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
1612 for (i = 0; i < errors_logged; i++, dev_entry++) {
1613 ipr_err_separator;
1614
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001615 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001616 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617
1618 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001619 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
1621 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001622 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623
1624 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001625 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
1627 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1628 be32_to_cpu(dev_entry->ioa_data[0]),
1629 be32_to_cpu(dev_entry->ioa_data[1]),
1630 be32_to_cpu(dev_entry->ioa_data[2]),
1631 be32_to_cpu(dev_entry->ioa_data[3]),
1632 be32_to_cpu(dev_entry->ioa_data[4]));
1633 }
1634}
1635
1636/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001637 * ipr_log_enhanced_array_error - Log an array configuration error.
1638 * @ioa_cfg: ioa config struct
1639 * @hostrcb: hostrcb struct
1640 *
1641 * Return value:
1642 * none
1643 **/
1644static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1645 struct ipr_hostrcb *hostrcb)
1646{
1647 int i, num_entries;
1648 struct ipr_hostrcb_type_14_error *error;
1649 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1650 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1651
1652 error = &hostrcb->hcam.u.error.u.type_14_error;
1653
1654 ipr_err_separator;
1655
1656 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1657 error->protection_level,
1658 ioa_cfg->host->host_no,
1659 error->last_func_vset_res_addr.bus,
1660 error->last_func_vset_res_addr.target,
1661 error->last_func_vset_res_addr.lun);
1662
1663 ipr_err_separator;
1664
1665 array_entry = error->array_member;
1666 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1667 sizeof(error->array_member));
1668
1669 for (i = 0; i < num_entries; i++, array_entry++) {
1670 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1671 continue;
1672
1673 if (be32_to_cpu(error->exposed_mode_adn) == i)
1674 ipr_err("Exposed Array Member %d:\n", i);
1675 else
1676 ipr_err("Array Member %d:\n", i);
1677
1678 ipr_log_ext_vpd(&array_entry->vpd);
1679 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1680 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1681 "Expected Location");
1682
1683 ipr_err_separator;
1684 }
1685}
1686
1687/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 * ipr_log_array_error - Log an array configuration error.
1689 * @ioa_cfg: ioa config struct
1690 * @hostrcb: hostrcb struct
1691 *
1692 * Return value:
1693 * none
1694 **/
1695static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1696 struct ipr_hostrcb *hostrcb)
1697{
1698 int i;
1699 struct ipr_hostrcb_type_04_error *error;
1700 struct ipr_hostrcb_array_data_entry *array_entry;
1701 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1702
1703 error = &hostrcb->hcam.u.error.u.type_04_error;
1704
1705 ipr_err_separator;
1706
1707 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1708 error->protection_level,
1709 ioa_cfg->host->host_no,
1710 error->last_func_vset_res_addr.bus,
1711 error->last_func_vset_res_addr.target,
1712 error->last_func_vset_res_addr.lun);
1713
1714 ipr_err_separator;
1715
1716 array_entry = error->array_member;
1717
1718 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001719 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 continue;
1721
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001722 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001724 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001727 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001729 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1730 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1731 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
1733 ipr_err_separator;
1734
1735 if (i == 9)
1736 array_entry = error->array_member2;
1737 else
1738 array_entry++;
1739 }
1740}
1741
1742/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001743 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001744 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001745 * @data: IOA error data
1746 * @len: data length
1747 *
1748 * Return value:
1749 * none
1750 **/
Brian Kingac719ab2006-11-21 10:28:42 -06001751static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001752{
1753 int i;
1754
1755 if (len == 0)
1756 return;
1757
Brian Kingac719ab2006-11-21 10:28:42 -06001758 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1759 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1760
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001761 for (i = 0; i < len / 4; i += 4) {
1762 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1763 be32_to_cpu(data[i]),
1764 be32_to_cpu(data[i+1]),
1765 be32_to_cpu(data[i+2]),
1766 be32_to_cpu(data[i+3]));
1767 }
1768}
1769
1770/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001771 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1772 * @ioa_cfg: ioa config struct
1773 * @hostrcb: hostrcb struct
1774 *
1775 * Return value:
1776 * none
1777 **/
1778static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1779 struct ipr_hostrcb *hostrcb)
1780{
1781 struct ipr_hostrcb_type_17_error *error;
1782
Wayne Boyer4565e372010-02-19 13:24:07 -08001783 if (ioa_cfg->sis64)
1784 error = &hostrcb->hcam.u.error64.u.type_17_error;
1785 else
1786 error = &hostrcb->hcam.u.error.u.type_17_error;
1787
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001788 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001789 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001790
Brian King8cf093e2007-04-26 16:00:14 -05001791 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1792 be32_to_cpu(hostrcb->hcam.u.error.prc));
1793 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001794 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001795 be32_to_cpu(hostrcb->hcam.length) -
1796 (offsetof(struct ipr_hostrcb_error, u) +
1797 offsetof(struct ipr_hostrcb_type_17_error, data)));
1798}
1799
1800/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001801 * ipr_log_dual_ioa_error - Log a dual adapter error.
1802 * @ioa_cfg: ioa config struct
1803 * @hostrcb: hostrcb struct
1804 *
1805 * Return value:
1806 * none
1807 **/
1808static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1809 struct ipr_hostrcb *hostrcb)
1810{
1811 struct ipr_hostrcb_type_07_error *error;
1812
1813 error = &hostrcb->hcam.u.error.u.type_07_error;
1814 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001815 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001816
Brian King8cf093e2007-04-26 16:00:14 -05001817 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1818 be32_to_cpu(hostrcb->hcam.u.error.prc));
1819 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001820 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001821 be32_to_cpu(hostrcb->hcam.length) -
1822 (offsetof(struct ipr_hostrcb_error, u) +
1823 offsetof(struct ipr_hostrcb_type_07_error, data)));
1824}
1825
Brian King49dc6a12006-11-21 10:28:35 -06001826static const struct {
1827 u8 active;
1828 char *desc;
1829} path_active_desc[] = {
1830 { IPR_PATH_NO_INFO, "Path" },
1831 { IPR_PATH_ACTIVE, "Active path" },
1832 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1833};
1834
1835static const struct {
1836 u8 state;
1837 char *desc;
1838} path_state_desc[] = {
1839 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1840 { IPR_PATH_HEALTHY, "is healthy" },
1841 { IPR_PATH_DEGRADED, "is degraded" },
1842 { IPR_PATH_FAILED, "is failed" }
1843};
1844
1845/**
1846 * ipr_log_fabric_path - Log a fabric path error
1847 * @hostrcb: hostrcb struct
1848 * @fabric: fabric descriptor
1849 *
1850 * Return value:
1851 * none
1852 **/
1853static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1854 struct ipr_hostrcb_fabric_desc *fabric)
1855{
1856 int i, j;
1857 u8 path_state = fabric->path_state;
1858 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1859 u8 state = path_state & IPR_PATH_STATE_MASK;
1860
1861 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1862 if (path_active_desc[i].active != active)
1863 continue;
1864
1865 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1866 if (path_state_desc[j].state != state)
1867 continue;
1868
1869 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1870 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1871 path_active_desc[i].desc, path_state_desc[j].desc,
1872 fabric->ioa_port);
1873 } else if (fabric->cascaded_expander == 0xff) {
1874 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1875 path_active_desc[i].desc, path_state_desc[j].desc,
1876 fabric->ioa_port, fabric->phy);
1877 } else if (fabric->phy == 0xff) {
1878 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1879 path_active_desc[i].desc, path_state_desc[j].desc,
1880 fabric->ioa_port, fabric->cascaded_expander);
1881 } else {
1882 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1883 path_active_desc[i].desc, path_state_desc[j].desc,
1884 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1885 }
1886 return;
1887 }
1888 }
1889
1890 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1891 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1892}
1893
Wayne Boyer4565e372010-02-19 13:24:07 -08001894/**
1895 * ipr_log64_fabric_path - Log a fabric path error
1896 * @hostrcb: hostrcb struct
1897 * @fabric: fabric descriptor
1898 *
1899 * Return value:
1900 * none
1901 **/
1902static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1903 struct ipr_hostrcb64_fabric_desc *fabric)
1904{
1905 int i, j;
1906 u8 path_state = fabric->path_state;
1907 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1908 u8 state = path_state & IPR_PATH_STATE_MASK;
1909 char buffer[IPR_MAX_RES_PATH_LENGTH];
1910
1911 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1912 if (path_active_desc[i].active != active)
1913 continue;
1914
1915 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1916 if (path_state_desc[j].state != state)
1917 continue;
1918
1919 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1920 path_active_desc[i].desc, path_state_desc[j].desc,
1921 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1922 return;
1923 }
1924 }
1925
1926 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1927 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1928}
1929
Brian King49dc6a12006-11-21 10:28:35 -06001930static const struct {
1931 u8 type;
1932 char *desc;
1933} path_type_desc[] = {
1934 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1935 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1936 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1937 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1938};
1939
1940static const struct {
1941 u8 status;
1942 char *desc;
1943} path_status_desc[] = {
1944 { IPR_PATH_CFG_NO_PROB, "Functional" },
1945 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1946 { IPR_PATH_CFG_FAILED, "Failed" },
1947 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1948 { IPR_PATH_NOT_DETECTED, "Missing" },
1949 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1950};
1951
1952static const char *link_rate[] = {
1953 "unknown",
1954 "disabled",
1955 "phy reset problem",
1956 "spinup hold",
1957 "port selector",
1958 "unknown",
1959 "unknown",
1960 "unknown",
1961 "1.5Gbps",
1962 "3.0Gbps",
1963 "unknown",
1964 "unknown",
1965 "unknown",
1966 "unknown",
1967 "unknown",
1968 "unknown"
1969};
1970
1971/**
1972 * ipr_log_path_elem - Log a fabric path element.
1973 * @hostrcb: hostrcb struct
1974 * @cfg: fabric path element struct
1975 *
1976 * Return value:
1977 * none
1978 **/
1979static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1980 struct ipr_hostrcb_config_element *cfg)
1981{
1982 int i, j;
1983 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1984 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1985
1986 if (type == IPR_PATH_CFG_NOT_EXIST)
1987 return;
1988
1989 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1990 if (path_type_desc[i].type != type)
1991 continue;
1992
1993 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1994 if (path_status_desc[j].status != status)
1995 continue;
1996
1997 if (type == IPR_PATH_CFG_IOA_PORT) {
1998 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1999 path_status_desc[j].desc, path_type_desc[i].desc,
2000 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2001 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2002 } else {
2003 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2004 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2005 path_status_desc[j].desc, path_type_desc[i].desc,
2006 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2007 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2008 } else if (cfg->cascaded_expander == 0xff) {
2009 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2010 "WWN=%08X%08X\n", path_status_desc[j].desc,
2011 path_type_desc[i].desc, cfg->phy,
2012 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2013 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2014 } else if (cfg->phy == 0xff) {
2015 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2016 "WWN=%08X%08X\n", path_status_desc[j].desc,
2017 path_type_desc[i].desc, cfg->cascaded_expander,
2018 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2019 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2020 } else {
2021 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2022 "WWN=%08X%08X\n", path_status_desc[j].desc,
2023 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2024 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2025 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2026 }
2027 }
2028 return;
2029 }
2030 }
2031
2032 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2033 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2034 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2035 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2036}
2037
2038/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002039 * ipr_log64_path_elem - Log a fabric path element.
2040 * @hostrcb: hostrcb struct
2041 * @cfg: fabric path element struct
2042 *
2043 * Return value:
2044 * none
2045 **/
2046static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2047 struct ipr_hostrcb64_config_element *cfg)
2048{
2049 int i, j;
2050 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2051 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2052 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2053 char buffer[IPR_MAX_RES_PATH_LENGTH];
2054
2055 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2056 return;
2057
2058 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2059 if (path_type_desc[i].type != type)
2060 continue;
2061
2062 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2063 if (path_status_desc[j].status != status)
2064 continue;
2065
2066 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2067 path_status_desc[j].desc, path_type_desc[i].desc,
2068 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2069 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2070 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2071 return;
2072 }
2073 }
2074 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2075 "WWN=%08X%08X\n", cfg->type_status,
2076 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2077 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2078 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2079}
2080
2081/**
Brian King49dc6a12006-11-21 10:28:35 -06002082 * ipr_log_fabric_error - Log a fabric error.
2083 * @ioa_cfg: ioa config struct
2084 * @hostrcb: hostrcb struct
2085 *
2086 * Return value:
2087 * none
2088 **/
2089static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2090 struct ipr_hostrcb *hostrcb)
2091{
2092 struct ipr_hostrcb_type_20_error *error;
2093 struct ipr_hostrcb_fabric_desc *fabric;
2094 struct ipr_hostrcb_config_element *cfg;
2095 int i, add_len;
2096
2097 error = &hostrcb->hcam.u.error.u.type_20_error;
2098 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2099 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2100
2101 add_len = be32_to_cpu(hostrcb->hcam.length) -
2102 (offsetof(struct ipr_hostrcb_error, u) +
2103 offsetof(struct ipr_hostrcb_type_20_error, desc));
2104
2105 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2106 ipr_log_fabric_path(hostrcb, fabric);
2107 for_each_fabric_cfg(fabric, cfg)
2108 ipr_log_path_elem(hostrcb, cfg);
2109
2110 add_len -= be16_to_cpu(fabric->length);
2111 fabric = (struct ipr_hostrcb_fabric_desc *)
2112 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2113 }
2114
Brian Kingac719ab2006-11-21 10:28:42 -06002115 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002116}
2117
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002118/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002119 * ipr_log_sis64_array_error - Log a sis64 array error.
2120 * @ioa_cfg: ioa config struct
2121 * @hostrcb: hostrcb struct
2122 *
2123 * Return value:
2124 * none
2125 **/
2126static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2127 struct ipr_hostrcb *hostrcb)
2128{
2129 int i, num_entries;
2130 struct ipr_hostrcb_type_24_error *error;
2131 struct ipr_hostrcb64_array_data_entry *array_entry;
2132 char buffer[IPR_MAX_RES_PATH_LENGTH];
2133 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2134
2135 error = &hostrcb->hcam.u.error64.u.type_24_error;
2136
2137 ipr_err_separator;
2138
2139 ipr_err("RAID %s Array Configuration: %s\n",
2140 error->protection_level,
2141 ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2142
2143 ipr_err_separator;
2144
2145 array_entry = error->array_member;
2146 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2147 sizeof(error->array_member));
2148
2149 for (i = 0; i < num_entries; i++, array_entry++) {
2150
2151 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2152 continue;
2153
2154 if (error->exposed_mode_adn == i)
2155 ipr_err("Exposed Array Member %d:\n", i);
2156 else
2157 ipr_err("Array Member %d:\n", i);
2158
2159 ipr_err("Array Member %d:\n", i);
2160 ipr_log_ext_vpd(&array_entry->vpd);
2161 ipr_err("Current Location: %s",
2162 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2163 ipr_err("Expected Location: %s",
2164 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2165
2166 ipr_err_separator;
2167 }
2168}
2169
2170/**
2171 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2172 * @ioa_cfg: ioa config struct
2173 * @hostrcb: hostrcb struct
2174 *
2175 * Return value:
2176 * none
2177 **/
2178static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2179 struct ipr_hostrcb *hostrcb)
2180{
2181 struct ipr_hostrcb_type_30_error *error;
2182 struct ipr_hostrcb64_fabric_desc *fabric;
2183 struct ipr_hostrcb64_config_element *cfg;
2184 int i, add_len;
2185
2186 error = &hostrcb->hcam.u.error64.u.type_30_error;
2187
2188 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2189 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2190
2191 add_len = be32_to_cpu(hostrcb->hcam.length) -
2192 (offsetof(struct ipr_hostrcb64_error, u) +
2193 offsetof(struct ipr_hostrcb_type_30_error, desc));
2194
2195 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2196 ipr_log64_fabric_path(hostrcb, fabric);
2197 for_each_fabric_cfg(fabric, cfg)
2198 ipr_log64_path_elem(hostrcb, cfg);
2199
2200 add_len -= be16_to_cpu(fabric->length);
2201 fabric = (struct ipr_hostrcb64_fabric_desc *)
2202 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2203 }
2204
2205 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2206}
2207
2208/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 * ipr_log_generic_error - Log an adapter error.
2210 * @ioa_cfg: ioa config struct
2211 * @hostrcb: hostrcb struct
2212 *
2213 * Return value:
2214 * none
2215 **/
2216static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2217 struct ipr_hostrcb *hostrcb)
2218{
Brian Kingac719ab2006-11-21 10:28:42 -06002219 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002220 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221}
2222
2223/**
2224 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2225 * @ioasc: IOASC
2226 *
2227 * This function will return the index of into the ipr_error_table
2228 * for the specified IOASC. If the IOASC is not in the table,
2229 * 0 will be returned, which points to the entry used for unknown errors.
2230 *
2231 * Return value:
2232 * index into the ipr_error_table
2233 **/
2234static u32 ipr_get_error(u32 ioasc)
2235{
2236 int i;
2237
2238 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002239 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 return i;
2241
2242 return 0;
2243}
2244
2245/**
2246 * ipr_handle_log_data - Log an adapter error.
2247 * @ioa_cfg: ioa config struct
2248 * @hostrcb: hostrcb struct
2249 *
2250 * This function logs an adapter error to the system.
2251 *
2252 * Return value:
2253 * none
2254 **/
2255static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2256 struct ipr_hostrcb *hostrcb)
2257{
2258 u32 ioasc;
2259 int error_index;
2260
2261 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2262 return;
2263
2264 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2265 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2266
Wayne Boyer4565e372010-02-19 13:24:07 -08002267 if (ioa_cfg->sis64)
2268 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2269 else
2270 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
Wayne Boyer4565e372010-02-19 13:24:07 -08002272 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2273 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2275 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002276 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 }
2278
2279 error_index = ipr_get_error(ioasc);
2280
2281 if (!ipr_error_table[error_index].log_hcam)
2282 return;
2283
Brian King49dc6a12006-11-21 10:28:35 -06002284 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285
2286 /* Set indication we have logged an error */
2287 ioa_cfg->errors_logged++;
2288
Brian King933916f2007-03-29 12:43:30 -05002289 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002291 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2292 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293
2294 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 case IPR_HOST_RCB_OVERLAY_ID_2:
2296 ipr_log_cache_error(ioa_cfg, hostrcb);
2297 break;
2298 case IPR_HOST_RCB_OVERLAY_ID_3:
2299 ipr_log_config_error(ioa_cfg, hostrcb);
2300 break;
2301 case IPR_HOST_RCB_OVERLAY_ID_4:
2302 case IPR_HOST_RCB_OVERLAY_ID_6:
2303 ipr_log_array_error(ioa_cfg, hostrcb);
2304 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002305 case IPR_HOST_RCB_OVERLAY_ID_7:
2306 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2307 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002308 case IPR_HOST_RCB_OVERLAY_ID_12:
2309 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2310 break;
2311 case IPR_HOST_RCB_OVERLAY_ID_13:
2312 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2313 break;
2314 case IPR_HOST_RCB_OVERLAY_ID_14:
2315 case IPR_HOST_RCB_OVERLAY_ID_16:
2316 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2317 break;
2318 case IPR_HOST_RCB_OVERLAY_ID_17:
2319 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2320 break;
Brian King49dc6a12006-11-21 10:28:35 -06002321 case IPR_HOST_RCB_OVERLAY_ID_20:
2322 ipr_log_fabric_error(ioa_cfg, hostrcb);
2323 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002324 case IPR_HOST_RCB_OVERLAY_ID_23:
2325 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2326 break;
2327 case IPR_HOST_RCB_OVERLAY_ID_24:
2328 case IPR_HOST_RCB_OVERLAY_ID_26:
2329 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2330 break;
2331 case IPR_HOST_RCB_OVERLAY_ID_30:
2332 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2333 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002334 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002337 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 break;
2339 }
2340}
2341
2342/**
2343 * ipr_process_error - Op done function for an adapter error log.
2344 * @ipr_cmd: ipr command struct
2345 *
2346 * This function is the op done function for an error log host
2347 * controlled async from the adapter. It will log the error and
2348 * send the HCAM back to the adapter.
2349 *
2350 * Return value:
2351 * none
2352 **/
2353static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2354{
2355 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2356 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2357 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002358 u32 fd_ioasc;
2359
2360 if (ioa_cfg->sis64)
2361 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2362 else
2363 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364
2365 list_del(&hostrcb->queue);
2366 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2367
2368 if (!ioasc) {
2369 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002370 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2371 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2373 dev_err(&ioa_cfg->pdev->dev,
2374 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2375 }
2376
2377 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2378}
2379
2380/**
2381 * ipr_timeout - An internally generated op has timed out.
2382 * @ipr_cmd: ipr command struct
2383 *
2384 * This function blocks host requests and initiates an
2385 * adapter reset.
2386 *
2387 * Return value:
2388 * none
2389 **/
2390static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2391{
2392 unsigned long lock_flags = 0;
2393 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2394
2395 ENTER;
2396 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2397
2398 ioa_cfg->errors_logged++;
2399 dev_err(&ioa_cfg->pdev->dev,
2400 "Adapter being reset due to command timeout.\n");
2401
2402 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2403 ioa_cfg->sdt_state = GET_DUMP;
2404
2405 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2406 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2407
2408 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2409 LEAVE;
2410}
2411
2412/**
2413 * ipr_oper_timeout - Adapter timed out transitioning to operational
2414 * @ipr_cmd: ipr command struct
2415 *
2416 * This function blocks host requests and initiates an
2417 * adapter reset.
2418 *
2419 * Return value:
2420 * none
2421 **/
2422static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2423{
2424 unsigned long lock_flags = 0;
2425 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2426
2427 ENTER;
2428 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2429
2430 ioa_cfg->errors_logged++;
2431 dev_err(&ioa_cfg->pdev->dev,
2432 "Adapter timed out transitioning to operational.\n");
2433
2434 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2435 ioa_cfg->sdt_state = GET_DUMP;
2436
2437 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2438 if (ipr_fastfail)
2439 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2440 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2441 }
2442
2443 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2444 LEAVE;
2445}
2446
2447/**
2448 * ipr_reset_reload - Reset/Reload the IOA
2449 * @ioa_cfg: ioa config struct
2450 * @shutdown_type: shutdown type
2451 *
2452 * This function resets the adapter and re-initializes it.
2453 * This function assumes that all new host commands have been stopped.
2454 * Return value:
2455 * SUCCESS / FAILED
2456 **/
2457static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2458 enum ipr_shutdown_type shutdown_type)
2459{
2460 if (!ioa_cfg->in_reset_reload)
2461 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2462
2463 spin_unlock_irq(ioa_cfg->host->host_lock);
2464 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2465 spin_lock_irq(ioa_cfg->host->host_lock);
2466
2467 /* If we got hit with a host reset while we were already resetting
2468 the adapter for some reason, and the reset failed. */
2469 if (ioa_cfg->ioa_is_dead) {
2470 ipr_trace;
2471 return FAILED;
2472 }
2473
2474 return SUCCESS;
2475}
2476
2477/**
2478 * ipr_find_ses_entry - Find matching SES in SES table
2479 * @res: resource entry struct of SES
2480 *
2481 * Return value:
2482 * pointer to SES table entry / NULL on failure
2483 **/
2484static const struct ipr_ses_table_entry *
2485ipr_find_ses_entry(struct ipr_resource_entry *res)
2486{
2487 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002488 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2490
2491 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2492 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2493 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002494 vpids = &res->std_inq_data.vpids;
2495 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 matches++;
2497 else
2498 break;
2499 } else
2500 matches++;
2501 }
2502
2503 if (matches == IPR_PROD_ID_LEN)
2504 return ste;
2505 }
2506
2507 return NULL;
2508}
2509
2510/**
2511 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2512 * @ioa_cfg: ioa config struct
2513 * @bus: SCSI bus
2514 * @bus_width: bus width
2515 *
2516 * Return value:
2517 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2518 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2519 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2520 * max 160MHz = max 320MB/sec).
2521 **/
2522static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2523{
2524 struct ipr_resource_entry *res;
2525 const struct ipr_ses_table_entry *ste;
2526 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2527
2528 /* Loop through each config table entry in the config table buffer */
2529 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002530 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 continue;
2532
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002533 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 continue;
2535
2536 if (!(ste = ipr_find_ses_entry(res)))
2537 continue;
2538
2539 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2540 }
2541
2542 return max_xfer_rate;
2543}
2544
2545/**
2546 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2547 * @ioa_cfg: ioa config struct
2548 * @max_delay: max delay in micro-seconds to wait
2549 *
2550 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2551 *
2552 * Return value:
2553 * 0 on success / other on failure
2554 **/
2555static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2556{
2557 volatile u32 pcii_reg;
2558 int delay = 1;
2559
2560 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2561 while (delay < max_delay) {
2562 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2563
2564 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2565 return 0;
2566
2567 /* udelay cannot be used if delay is more than a few milliseconds */
2568 if ((delay / 1000) > MAX_UDELAY_MS)
2569 mdelay(delay / 1000);
2570 else
2571 udelay(delay);
2572
2573 delay += delay;
2574 }
2575 return -EIO;
2576}
2577
2578/**
Wayne Boyerdcbad002010-02-19 13:24:14 -08002579 * ipr_get_sis64_dump_data_section - Dump IOA memory
2580 * @ioa_cfg: ioa config struct
2581 * @start_addr: adapter address to dump
2582 * @dest: destination kernel buffer
2583 * @length_in_words: length to dump in 4 byte words
2584 *
2585 * Return value:
2586 * 0 on success
2587 **/
2588static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2589 u32 start_addr,
2590 __be32 *dest, u32 length_in_words)
2591{
2592 int i;
2593
2594 for (i = 0; i < length_in_words; i++) {
2595 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2596 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2597 dest++;
2598 }
2599
2600 return 0;
2601}
2602
2603/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 * ipr_get_ldump_data_section - Dump IOA memory
2605 * @ioa_cfg: ioa config struct
2606 * @start_addr: adapter address to dump
2607 * @dest: destination kernel buffer
2608 * @length_in_words: length to dump in 4 byte words
2609 *
2610 * Return value:
2611 * 0 on success / -EIO on failure
2612 **/
2613static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2614 u32 start_addr,
2615 __be32 *dest, u32 length_in_words)
2616{
2617 volatile u32 temp_pcii_reg;
2618 int i, delay = 0;
2619
Wayne Boyerdcbad002010-02-19 13:24:14 -08002620 if (ioa_cfg->sis64)
2621 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2622 dest, length_in_words);
2623
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 /* Write IOA interrupt reg starting LDUMP state */
2625 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
Wayne Boyer214777b2010-02-19 13:24:26 -08002626 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627
2628 /* Wait for IO debug acknowledge */
2629 if (ipr_wait_iodbg_ack(ioa_cfg,
2630 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2631 dev_err(&ioa_cfg->pdev->dev,
2632 "IOA dump long data transfer timeout\n");
2633 return -EIO;
2634 }
2635
2636 /* Signal LDUMP interlocked - clear IO debug ack */
2637 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2638 ioa_cfg->regs.clr_interrupt_reg);
2639
2640 /* Write Mailbox with starting address */
2641 writel(start_addr, ioa_cfg->ioa_mailbox);
2642
2643 /* Signal address valid - clear IOA Reset alert */
2644 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002645 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646
2647 for (i = 0; i < length_in_words; i++) {
2648 /* Wait for IO debug acknowledge */
2649 if (ipr_wait_iodbg_ack(ioa_cfg,
2650 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2651 dev_err(&ioa_cfg->pdev->dev,
2652 "IOA dump short data transfer timeout\n");
2653 return -EIO;
2654 }
2655
2656 /* Read data from mailbox and increment destination pointer */
2657 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2658 dest++;
2659
2660 /* For all but the last word of data, signal data received */
2661 if (i < (length_in_words - 1)) {
2662 /* Signal dump data received - Clear IO debug Ack */
2663 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2664 ioa_cfg->regs.clr_interrupt_reg);
2665 }
2666 }
2667
2668 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2669 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002670 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671
2672 writel(IPR_UPROCI_IO_DEBUG_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002673 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674
2675 /* Signal dump data received - Clear IO debug Ack */
2676 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2677 ioa_cfg->regs.clr_interrupt_reg);
2678
2679 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2680 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2681 temp_pcii_reg =
Wayne Boyer214777b2010-02-19 13:24:26 -08002682 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
2684 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2685 return 0;
2686
2687 udelay(10);
2688 delay += 10;
2689 }
2690
2691 return 0;
2692}
2693
2694#ifdef CONFIG_SCSI_IPR_DUMP
2695/**
2696 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2697 * @ioa_cfg: ioa config struct
2698 * @pci_address: adapter address
2699 * @length: length of data to copy
2700 *
2701 * Copy data from PCI adapter to kernel buffer.
2702 * Note: length MUST be a 4 byte multiple
2703 * Return value:
2704 * 0 on success / other on failure
2705 **/
2706static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2707 unsigned long pci_address, u32 length)
2708{
2709 int bytes_copied = 0;
2710 int cur_len, rc, rem_len, rem_page_len;
2711 __be32 *page;
2712 unsigned long lock_flags = 0;
2713 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2714
2715 while (bytes_copied < length &&
2716 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2717 if (ioa_dump->page_offset >= PAGE_SIZE ||
2718 ioa_dump->page_offset == 0) {
2719 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2720
2721 if (!page) {
2722 ipr_trace;
2723 return bytes_copied;
2724 }
2725
2726 ioa_dump->page_offset = 0;
2727 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2728 ioa_dump->next_page_index++;
2729 } else
2730 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2731
2732 rem_len = length - bytes_copied;
2733 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2734 cur_len = min(rem_len, rem_page_len);
2735
2736 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2737 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2738 rc = -EIO;
2739 } else {
2740 rc = ipr_get_ldump_data_section(ioa_cfg,
2741 pci_address + bytes_copied,
2742 &page[ioa_dump->page_offset / 4],
2743 (cur_len / sizeof(u32)));
2744 }
2745 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2746
2747 if (!rc) {
2748 ioa_dump->page_offset += cur_len;
2749 bytes_copied += cur_len;
2750 } else {
2751 ipr_trace;
2752 break;
2753 }
2754 schedule();
2755 }
2756
2757 return bytes_copied;
2758}
2759
2760/**
2761 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2762 * @hdr: dump entry header struct
2763 *
2764 * Return value:
2765 * nothing
2766 **/
2767static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2768{
2769 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2770 hdr->num_elems = 1;
2771 hdr->offset = sizeof(*hdr);
2772 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2773}
2774
2775/**
2776 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2777 * @ioa_cfg: ioa config struct
2778 * @driver_dump: driver dump struct
2779 *
2780 * Return value:
2781 * nothing
2782 **/
2783static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2784 struct ipr_driver_dump *driver_dump)
2785{
2786 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2787
2788 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2789 driver_dump->ioa_type_entry.hdr.len =
2790 sizeof(struct ipr_dump_ioa_type_entry) -
2791 sizeof(struct ipr_dump_entry_header);
2792 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2793 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2794 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2795 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2796 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2797 ucode_vpd->minor_release[1];
2798 driver_dump->hdr.num_entries++;
2799}
2800
2801/**
2802 * ipr_dump_version_data - Fill in the driver version in the dump.
2803 * @ioa_cfg: ioa config struct
2804 * @driver_dump: driver dump struct
2805 *
2806 * Return value:
2807 * nothing
2808 **/
2809static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2810 struct ipr_driver_dump *driver_dump)
2811{
2812 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2813 driver_dump->version_entry.hdr.len =
2814 sizeof(struct ipr_dump_version_entry) -
2815 sizeof(struct ipr_dump_entry_header);
2816 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2817 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2818 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2819 driver_dump->hdr.num_entries++;
2820}
2821
2822/**
2823 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2824 * @ioa_cfg: ioa config struct
2825 * @driver_dump: driver dump struct
2826 *
2827 * Return value:
2828 * nothing
2829 **/
2830static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2831 struct ipr_driver_dump *driver_dump)
2832{
2833 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2834 driver_dump->trace_entry.hdr.len =
2835 sizeof(struct ipr_dump_trace_entry) -
2836 sizeof(struct ipr_dump_entry_header);
2837 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2838 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2839 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2840 driver_dump->hdr.num_entries++;
2841}
2842
2843/**
2844 * ipr_dump_location_data - Fill in the IOA location in the dump.
2845 * @ioa_cfg: ioa config struct
2846 * @driver_dump: driver dump struct
2847 *
2848 * Return value:
2849 * nothing
2850 **/
2851static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2852 struct ipr_driver_dump *driver_dump)
2853{
2854 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2855 driver_dump->location_entry.hdr.len =
2856 sizeof(struct ipr_dump_location_entry) -
2857 sizeof(struct ipr_dump_entry_header);
2858 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2859 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01002860 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 driver_dump->hdr.num_entries++;
2862}
2863
2864/**
2865 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2866 * @ioa_cfg: ioa config struct
2867 * @dump: dump struct
2868 *
2869 * Return value:
2870 * nothing
2871 **/
2872static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2873{
2874 unsigned long start_addr, sdt_word;
2875 unsigned long lock_flags = 0;
2876 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2877 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2878 u32 num_entries, start_off, end_off;
2879 u32 bytes_to_copy, bytes_copied, rc;
2880 struct ipr_sdt *sdt;
Wayne Boyerdcbad002010-02-19 13:24:14 -08002881 int valid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 int i;
2883
2884 ENTER;
2885
2886 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2887
2888 if (ioa_cfg->sdt_state != GET_DUMP) {
2889 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2890 return;
2891 }
2892
2893 start_addr = readl(ioa_cfg->ioa_mailbox);
2894
Wayne Boyerdcbad002010-02-19 13:24:14 -08002895 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 dev_err(&ioa_cfg->pdev->dev,
2897 "Invalid dump table format: %lx\n", start_addr);
2898 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2899 return;
2900 }
2901
2902 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2903
2904 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2905
2906 /* Initialize the overall dump header */
2907 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2908 driver_dump->hdr.num_entries = 1;
2909 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2910 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2911 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2912 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2913
2914 ipr_dump_version_data(ioa_cfg, driver_dump);
2915 ipr_dump_location_data(ioa_cfg, driver_dump);
2916 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2917 ipr_dump_trace_data(ioa_cfg, driver_dump);
2918
2919 /* Update dump_header */
2920 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2921
2922 /* IOA Dump entry */
2923 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 ioa_dump->hdr.len = 0;
2925 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2926 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2927
2928 /* First entries in sdt are actually a list of dump addresses and
2929 lengths to gather the real dump data. sdt represents the pointer
2930 to the ioa generated dump table. Dump data will be extracted based
2931 on entries in this table */
2932 sdt = &ioa_dump->sdt;
2933
2934 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2935 sizeof(struct ipr_sdt) / sizeof(__be32));
2936
2937 /* Smart Dump table is ready to use and the first entry is valid */
Wayne Boyerdcbad002010-02-19 13:24:14 -08002938 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2939 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 dev_err(&ioa_cfg->pdev->dev,
2941 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2942 rc, be32_to_cpu(sdt->hdr.state));
2943 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2944 ioa_cfg->sdt_state = DUMP_OBTAINED;
2945 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2946 return;
2947 }
2948
2949 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2950
2951 if (num_entries > IPR_NUM_SDT_ENTRIES)
2952 num_entries = IPR_NUM_SDT_ENTRIES;
2953
2954 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2955
2956 for (i = 0; i < num_entries; i++) {
2957 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2958 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2959 break;
2960 }
2961
2962 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
Wayne Boyerdcbad002010-02-19 13:24:14 -08002963 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2964 if (ioa_cfg->sis64)
2965 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2966 else {
2967 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2968 end_off = be32_to_cpu(sdt->entry[i].end_token);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969
Wayne Boyerdcbad002010-02-19 13:24:14 -08002970 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2971 bytes_to_copy = end_off - start_off;
2972 else
2973 valid = 0;
2974 }
2975 if (valid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2977 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2978 continue;
2979 }
2980
2981 /* Copy data from adapter to driver buffers */
2982 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2983 bytes_to_copy);
2984
2985 ioa_dump->hdr.len += bytes_copied;
2986
2987 if (bytes_copied != bytes_to_copy) {
2988 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2989 break;
2990 }
2991 }
2992 }
2993 }
2994
2995 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2996
2997 /* Update dump_header */
2998 driver_dump->hdr.len += ioa_dump->hdr.len;
2999 wmb();
3000 ioa_cfg->sdt_state = DUMP_OBTAINED;
3001 LEAVE;
3002}
3003
3004#else
3005#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3006#endif
3007
3008/**
3009 * ipr_release_dump - Free adapter dump memory
3010 * @kref: kref struct
3011 *
3012 * Return value:
3013 * nothing
3014 **/
3015static void ipr_release_dump(struct kref *kref)
3016{
3017 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3018 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3019 unsigned long lock_flags = 0;
3020 int i;
3021
3022 ENTER;
3023 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3024 ioa_cfg->dump = NULL;
3025 ioa_cfg->sdt_state = INACTIVE;
3026 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3027
3028 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3029 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3030
3031 kfree(dump);
3032 LEAVE;
3033}
3034
3035/**
3036 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00003037 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 *
3039 * Called at task level from a work thread. This function takes care
3040 * of adding and removing device from the mid-layer as configuration
3041 * changes are detected by the adapter.
3042 *
3043 * Return value:
3044 * nothing
3045 **/
David Howellsc4028952006-11-22 14:57:56 +00003046static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047{
3048 unsigned long lock_flags;
3049 struct ipr_resource_entry *res;
3050 struct scsi_device *sdev;
3051 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00003052 struct ipr_ioa_cfg *ioa_cfg =
3053 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054 u8 bus, target, lun;
3055 int did_work;
3056
3057 ENTER;
3058 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3059
3060 if (ioa_cfg->sdt_state == GET_DUMP) {
3061 dump = ioa_cfg->dump;
3062 if (!dump) {
3063 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3064 return;
3065 }
3066 kref_get(&dump->kref);
3067 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3068 ipr_get_ioa_dump(ioa_cfg, dump);
3069 kref_put(&dump->kref, ipr_release_dump);
3070
3071 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3072 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3073 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3074 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3075 return;
3076 }
3077
3078restart:
3079 do {
3080 did_work = 0;
3081 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3082 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3083 return;
3084 }
3085
3086 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3087 if (res->del_from_ml && res->sdev) {
3088 did_work = 1;
3089 sdev = res->sdev;
3090 if (!scsi_device_get(sdev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3092 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3093 scsi_remove_device(sdev);
3094 scsi_device_put(sdev);
3095 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3096 }
3097 break;
3098 }
3099 }
3100 } while(did_work);
3101
3102 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3103 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003104 bus = res->bus;
3105 target = res->target;
3106 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003107 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3109 scsi_add_device(ioa_cfg->host, bus, target, lun);
3110 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3111 goto restart;
3112 }
3113 }
3114
3115 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003116 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 LEAVE;
3118}
3119
3120#ifdef CONFIG_SCSI_IPR_TRACE
3121/**
3122 * ipr_read_trace - Dump the adapter trace
Chris Wright2c3c8be2010-05-12 18:28:57 -07003123 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003125 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126 * @buf: buffer
3127 * @off: offset
3128 * @count: buffer size
3129 *
3130 * Return value:
3131 * number of bytes printed to buffer
3132 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003133static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003134 struct bin_attribute *bin_attr,
3135 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136{
Tony Jonesee959b02008-02-22 00:13:36 +01003137 struct device *dev = container_of(kobj, struct device, kobj);
3138 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3140 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003141 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142
3143 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003144 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3145 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003147
3148 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149}
3150
3151static struct bin_attribute ipr_trace_attr = {
3152 .attr = {
3153 .name = "trace",
3154 .mode = S_IRUGO,
3155 },
3156 .size = 0,
3157 .read = ipr_read_trace,
3158};
3159#endif
3160
3161/**
3162 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003163 * @dev: class device struct
3164 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 *
3166 * Return value:
3167 * number of bytes printed to buffer
3168 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003169static ssize_t ipr_show_fw_version(struct device *dev,
3170 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171{
Tony Jonesee959b02008-02-22 00:13:36 +01003172 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3174 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3175 unsigned long lock_flags = 0;
3176 int len;
3177
3178 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3179 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3180 ucode_vpd->major_release, ucode_vpd->card_type,
3181 ucode_vpd->minor_release[0],
3182 ucode_vpd->minor_release[1]);
3183 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3184 return len;
3185}
3186
Tony Jonesee959b02008-02-22 00:13:36 +01003187static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 .attr = {
3189 .name = "fw_version",
3190 .mode = S_IRUGO,
3191 },
3192 .show = ipr_show_fw_version,
3193};
3194
3195/**
3196 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003197 * @dev: class device struct
3198 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199 *
3200 * Return value:
3201 * number of bytes printed to buffer
3202 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003203static ssize_t ipr_show_log_level(struct device *dev,
3204 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205{
Tony Jonesee959b02008-02-22 00:13:36 +01003206 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3208 unsigned long lock_flags = 0;
3209 int len;
3210
3211 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3212 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3213 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3214 return len;
3215}
3216
3217/**
3218 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003219 * @dev: class device struct
3220 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 *
3222 * Return value:
3223 * number of bytes printed to buffer
3224 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003225static ssize_t ipr_store_log_level(struct device *dev,
3226 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227 const char *buf, size_t count)
3228{
Tony Jonesee959b02008-02-22 00:13:36 +01003229 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3231 unsigned long lock_flags = 0;
3232
3233 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3234 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3235 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3236 return strlen(buf);
3237}
3238
Tony Jonesee959b02008-02-22 00:13:36 +01003239static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 .attr = {
3241 .name = "log_level",
3242 .mode = S_IRUGO | S_IWUSR,
3243 },
3244 .show = ipr_show_log_level,
3245 .store = ipr_store_log_level
3246};
3247
3248/**
3249 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003250 * @dev: device struct
3251 * @buf: buffer
3252 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 *
3254 * This function will reset the adapter and wait a reasonable
3255 * amount of time for any errors that the adapter might log.
3256 *
3257 * Return value:
3258 * count on success / other on failure
3259 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003260static ssize_t ipr_store_diagnostics(struct device *dev,
3261 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 const char *buf, size_t count)
3263{
Tony Jonesee959b02008-02-22 00:13:36 +01003264 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3266 unsigned long lock_flags = 0;
3267 int rc = count;
3268
3269 if (!capable(CAP_SYS_ADMIN))
3270 return -EACCES;
3271
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05003273 while(ioa_cfg->in_reset_reload) {
3274 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3275 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3276 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3277 }
3278
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279 ioa_cfg->errors_logged = 0;
3280 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3281
3282 if (ioa_cfg->in_reset_reload) {
3283 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3284 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3285
3286 /* Wait for a second for any errors to be logged */
3287 msleep(1000);
3288 } else {
3289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3290 return -EIO;
3291 }
3292
3293 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3294 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3295 rc = -EIO;
3296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3297
3298 return rc;
3299}
3300
Tony Jonesee959b02008-02-22 00:13:36 +01003301static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 .attr = {
3303 .name = "run_diagnostics",
3304 .mode = S_IWUSR,
3305 },
3306 .store = ipr_store_diagnostics
3307};
3308
3309/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003310 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003311 * @class_dev: device struct
3312 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003313 *
3314 * Return value:
3315 * number of bytes printed to buffer
3316 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003317static ssize_t ipr_show_adapter_state(struct device *dev,
3318 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003319{
Tony Jonesee959b02008-02-22 00:13:36 +01003320 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003321 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3322 unsigned long lock_flags = 0;
3323 int len;
3324
3325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3326 if (ioa_cfg->ioa_is_dead)
3327 len = snprintf(buf, PAGE_SIZE, "offline\n");
3328 else
3329 len = snprintf(buf, PAGE_SIZE, "online\n");
3330 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3331 return len;
3332}
3333
3334/**
3335 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003336 * @dev: device struct
3337 * @buf: buffer
3338 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003339 *
3340 * This function will change the adapter's state.
3341 *
3342 * Return value:
3343 * count on success / other on failure
3344 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003345static ssize_t ipr_store_adapter_state(struct device *dev,
3346 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003347 const char *buf, size_t count)
3348{
Tony Jonesee959b02008-02-22 00:13:36 +01003349 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003350 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3351 unsigned long lock_flags;
3352 int result = count;
3353
3354 if (!capable(CAP_SYS_ADMIN))
3355 return -EACCES;
3356
3357 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3358 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3359 ioa_cfg->ioa_is_dead = 0;
3360 ioa_cfg->reset_retries = 0;
3361 ioa_cfg->in_ioa_bringdown = 0;
3362 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3363 }
3364 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3365 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3366
3367 return result;
3368}
3369
Tony Jonesee959b02008-02-22 00:13:36 +01003370static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003371 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003372 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003373 .mode = S_IRUGO | S_IWUSR,
3374 },
3375 .show = ipr_show_adapter_state,
3376 .store = ipr_store_adapter_state
3377};
3378
3379/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003381 * @dev: device struct
3382 * @buf: buffer
3383 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 *
3385 * This function will reset the adapter.
3386 *
3387 * Return value:
3388 * count on success / other on failure
3389 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003390static ssize_t ipr_store_reset_adapter(struct device *dev,
3391 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 const char *buf, size_t count)
3393{
Tony Jonesee959b02008-02-22 00:13:36 +01003394 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3396 unsigned long lock_flags;
3397 int result = count;
3398
3399 if (!capable(CAP_SYS_ADMIN))
3400 return -EACCES;
3401
3402 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3403 if (!ioa_cfg->in_reset_reload)
3404 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3405 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3406 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3407
3408 return result;
3409}
3410
Tony Jonesee959b02008-02-22 00:13:36 +01003411static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412 .attr = {
3413 .name = "reset_host",
3414 .mode = S_IWUSR,
3415 },
3416 .store = ipr_store_reset_adapter
3417};
3418
3419/**
3420 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3421 * @buf_len: buffer length
3422 *
3423 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3424 * list to use for microcode download
3425 *
3426 * Return value:
3427 * pointer to sglist / NULL on failure
3428 **/
3429static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3430{
3431 int sg_size, order, bsize_elem, num_elem, i, j;
3432 struct ipr_sglist *sglist;
3433 struct scatterlist *scatterlist;
3434 struct page *page;
3435
3436 /* Get the minimum size per scatter/gather element */
3437 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3438
3439 /* Get the actual size per element */
3440 order = get_order(sg_size);
3441
3442 /* Determine the actual number of bytes per element */
3443 bsize_elem = PAGE_SIZE * (1 << order);
3444
3445 /* Determine the actual number of sg entries needed */
3446 if (buf_len % bsize_elem)
3447 num_elem = (buf_len / bsize_elem) + 1;
3448 else
3449 num_elem = buf_len / bsize_elem;
3450
3451 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003452 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 (sizeof(struct scatterlist) * (num_elem - 1)),
3454 GFP_KERNEL);
3455
3456 if (sglist == NULL) {
3457 ipr_trace;
3458 return NULL;
3459 }
3460
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 scatterlist = sglist->scatterlist;
Jens Axboe45711f12007-10-22 21:19:53 +02003462 sg_init_table(scatterlist, num_elem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463
3464 sglist->order = order;
3465 sglist->num_sg = num_elem;
3466
3467 /* Allocate a bunch of sg elements */
3468 for (i = 0; i < num_elem; i++) {
3469 page = alloc_pages(GFP_KERNEL, order);
3470 if (!page) {
3471 ipr_trace;
3472
3473 /* Free up what we already allocated */
3474 for (j = i - 1; j >= 0; j--)
Jens Axboe45711f12007-10-22 21:19:53 +02003475 __free_pages(sg_page(&scatterlist[j]), order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 kfree(sglist);
3477 return NULL;
3478 }
3479
Jens Axboe642f1492007-10-24 11:20:47 +02003480 sg_set_page(&scatterlist[i], page, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481 }
3482
3483 return sglist;
3484}
3485
3486/**
3487 * ipr_free_ucode_buffer - Frees a microcode download buffer
3488 * @p_dnld: scatter/gather list pointer
3489 *
3490 * Free a DMA'able ucode download buffer previously allocated with
3491 * ipr_alloc_ucode_buffer
3492 *
3493 * Return value:
3494 * nothing
3495 **/
3496static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3497{
3498 int i;
3499
3500 for (i = 0; i < sglist->num_sg; i++)
Jens Axboe45711f12007-10-22 21:19:53 +02003501 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502
3503 kfree(sglist);
3504}
3505
3506/**
3507 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3508 * @sglist: scatter/gather list pointer
3509 * @buffer: buffer pointer
3510 * @len: buffer length
3511 *
3512 * Copy a microcode image from a user buffer into a buffer allocated by
3513 * ipr_alloc_ucode_buffer
3514 *
3515 * Return value:
3516 * 0 on success / other on failure
3517 **/
3518static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3519 u8 *buffer, u32 len)
3520{
3521 int bsize_elem, i, result = 0;
3522 struct scatterlist *scatterlist;
3523 void *kaddr;
3524
3525 /* Determine the actual number of bytes per element */
3526 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3527
3528 scatterlist = sglist->scatterlist;
3529
3530 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003531 struct page *page = sg_page(&scatterlist[i]);
3532
3533 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003535 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536
3537 scatterlist[i].length = bsize_elem;
3538
3539 if (result != 0) {
3540 ipr_trace;
3541 return result;
3542 }
3543 }
3544
3545 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003546 struct page *page = sg_page(&scatterlist[i]);
3547
3548 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003550 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551
3552 scatterlist[i].length = len % bsize_elem;
3553 }
3554
3555 sglist->buffer_len = len;
3556 return result;
3557}
3558
3559/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003560 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3561 * @ipr_cmd: ipr command struct
3562 * @sglist: scatter/gather list
3563 *
3564 * Builds a microcode download IOA data list (IOADL).
3565 *
3566 **/
3567static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3568 struct ipr_sglist *sglist)
3569{
3570 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3571 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3572 struct scatterlist *scatterlist = sglist->scatterlist;
3573 int i;
3574
3575 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3576 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3577 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3578
3579 ioarcb->ioadl_len =
3580 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3581 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3582 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3583 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3584 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3585 }
3586
3587 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3588}
3589
3590/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003591 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592 * @ipr_cmd: ipr command struct
3593 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003595 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003598static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3599 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003602 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 struct scatterlist *scatterlist = sglist->scatterlist;
3604 int i;
3605
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003606 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08003608 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3609
3610 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3612
3613 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3614 ioadl[i].flags_and_data_len =
3615 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3616 ioadl[i].address =
3617 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3618 }
3619
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003620 ioadl[i-1].flags_and_data_len |=
3621 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3622}
3623
3624/**
3625 * ipr_update_ioa_ucode - Update IOA's microcode
3626 * @ioa_cfg: ioa config struct
3627 * @sglist: scatter/gather list
3628 *
3629 * Initiate an adapter reset to update the IOA's microcode
3630 *
3631 * Return value:
3632 * 0 on success / -EIO on failure
3633 **/
3634static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3635 struct ipr_sglist *sglist)
3636{
3637 unsigned long lock_flags;
3638
3639 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05003640 while(ioa_cfg->in_reset_reload) {
3641 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3642 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3643 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3644 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003645
3646 if (ioa_cfg->ucode_sglist) {
3647 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3648 dev_err(&ioa_cfg->pdev->dev,
3649 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650 return -EIO;
3651 }
3652
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003653 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3654 sglist->num_sg, DMA_TO_DEVICE);
3655
3656 if (!sglist->num_dma_sg) {
3657 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3658 dev_err(&ioa_cfg->pdev->dev,
3659 "Failed to map microcode download buffer!\n");
3660 return -EIO;
3661 }
3662
3663 ioa_cfg->ucode_sglist = sglist;
3664 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3665 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3666 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3667
3668 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3669 ioa_cfg->ucode_sglist = NULL;
3670 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671 return 0;
3672}
3673
3674/**
3675 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003676 * @class_dev: device struct
3677 * @buf: buffer
3678 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679 *
3680 * This function will update the firmware on the adapter.
3681 *
3682 * Return value:
3683 * count on success / other on failure
3684 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003685static ssize_t ipr_store_update_fw(struct device *dev,
3686 struct device_attribute *attr,
3687 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688{
Tony Jonesee959b02008-02-22 00:13:36 +01003689 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3691 struct ipr_ucode_image_header *image_hdr;
3692 const struct firmware *fw_entry;
3693 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694 char fname[100];
3695 char *src;
3696 int len, result, dnld_size;
3697
3698 if (!capable(CAP_SYS_ADMIN))
3699 return -EACCES;
3700
3701 len = snprintf(fname, 99, "%s", buf);
3702 fname[len-1] = '\0';
3703
3704 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3705 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3706 return -EIO;
3707 }
3708
3709 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3710
3711 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3712 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3713 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3714 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3715 release_firmware(fw_entry);
3716 return -EINVAL;
3717 }
3718
3719 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3720 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3721 sglist = ipr_alloc_ucode_buffer(dnld_size);
3722
3723 if (!sglist) {
3724 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3725 release_firmware(fw_entry);
3726 return -ENOMEM;
3727 }
3728
3729 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3730
3731 if (result) {
3732 dev_err(&ioa_cfg->pdev->dev,
3733 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003734 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 }
3736
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003737 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003739 if (!result)
3740 result = count;
3741out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742 ipr_free_ucode_buffer(sglist);
3743 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003744 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745}
3746
Tony Jonesee959b02008-02-22 00:13:36 +01003747static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748 .attr = {
3749 .name = "update_fw",
3750 .mode = S_IWUSR,
3751 },
3752 .store = ipr_store_update_fw
3753};
3754
Tony Jonesee959b02008-02-22 00:13:36 +01003755static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756 &ipr_fw_version_attr,
3757 &ipr_log_level_attr,
3758 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003759 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760 &ipr_ioa_reset_attr,
3761 &ipr_update_fw_attr,
3762 NULL,
3763};
3764
3765#ifdef CONFIG_SCSI_IPR_DUMP
3766/**
3767 * ipr_read_dump - Dump the adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07003768 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003770 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771 * @buf: buffer
3772 * @off: offset
3773 * @count: buffer size
3774 *
3775 * Return value:
3776 * number of bytes printed to buffer
3777 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003778static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003779 struct bin_attribute *bin_attr,
3780 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781{
Tony Jonesee959b02008-02-22 00:13:36 +01003782 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783 struct Scsi_Host *shost = class_to_shost(cdev);
3784 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3785 struct ipr_dump *dump;
3786 unsigned long lock_flags = 0;
3787 char *src;
3788 int len;
3789 size_t rc = count;
3790
3791 if (!capable(CAP_SYS_ADMIN))
3792 return -EACCES;
3793
3794 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3795 dump = ioa_cfg->dump;
3796
3797 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3798 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3799 return 0;
3800 }
3801 kref_get(&dump->kref);
3802 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3803
3804 if (off > dump->driver_dump.hdr.len) {
3805 kref_put(&dump->kref, ipr_release_dump);
3806 return 0;
3807 }
3808
3809 if (off + count > dump->driver_dump.hdr.len) {
3810 count = dump->driver_dump.hdr.len - off;
3811 rc = count;
3812 }
3813
3814 if (count && off < sizeof(dump->driver_dump)) {
3815 if (off + count > sizeof(dump->driver_dump))
3816 len = sizeof(dump->driver_dump) - off;
3817 else
3818 len = count;
3819 src = (u8 *)&dump->driver_dump + off;
3820 memcpy(buf, src, len);
3821 buf += len;
3822 off += len;
3823 count -= len;
3824 }
3825
3826 off -= sizeof(dump->driver_dump);
3827
3828 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3829 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3830 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3831 else
3832 len = count;
3833 src = (u8 *)&dump->ioa_dump + off;
3834 memcpy(buf, src, len);
3835 buf += len;
3836 off += len;
3837 count -= len;
3838 }
3839
3840 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3841
3842 while (count) {
3843 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3844 len = PAGE_ALIGN(off) - off;
3845 else
3846 len = count;
3847 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3848 src += off & ~PAGE_MASK;
3849 memcpy(buf, src, len);
3850 buf += len;
3851 off += len;
3852 count -= len;
3853 }
3854
3855 kref_put(&dump->kref, ipr_release_dump);
3856 return rc;
3857}
3858
3859/**
3860 * ipr_alloc_dump - Prepare for adapter dump
3861 * @ioa_cfg: ioa config struct
3862 *
3863 * Return value:
3864 * 0 on success / other on failure
3865 **/
3866static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3867{
3868 struct ipr_dump *dump;
3869 unsigned long lock_flags = 0;
3870
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003871 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872
3873 if (!dump) {
3874 ipr_err("Dump memory allocation failed\n");
3875 return -ENOMEM;
3876 }
3877
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878 kref_init(&dump->kref);
3879 dump->ioa_cfg = ioa_cfg;
3880
3881 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3882
3883 if (INACTIVE != ioa_cfg->sdt_state) {
3884 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3885 kfree(dump);
3886 return 0;
3887 }
3888
3889 ioa_cfg->dump = dump;
3890 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3891 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3892 ioa_cfg->dump_taken = 1;
3893 schedule_work(&ioa_cfg->work_q);
3894 }
3895 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3896
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897 return 0;
3898}
3899
3900/**
3901 * ipr_free_dump - Free adapter dump memory
3902 * @ioa_cfg: ioa config struct
3903 *
3904 * Return value:
3905 * 0 on success / other on failure
3906 **/
3907static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3908{
3909 struct ipr_dump *dump;
3910 unsigned long lock_flags = 0;
3911
3912 ENTER;
3913
3914 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3915 dump = ioa_cfg->dump;
3916 if (!dump) {
3917 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3918 return 0;
3919 }
3920
3921 ioa_cfg->dump = NULL;
3922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3923
3924 kref_put(&dump->kref, ipr_release_dump);
3925
3926 LEAVE;
3927 return 0;
3928}
3929
3930/**
3931 * ipr_write_dump - Setup dump state of adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07003932 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003934 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 * @buf: buffer
3936 * @off: offset
3937 * @count: buffer size
3938 *
3939 * Return value:
3940 * number of bytes printed to buffer
3941 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003942static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003943 struct bin_attribute *bin_attr,
3944 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945{
Tony Jonesee959b02008-02-22 00:13:36 +01003946 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 struct Scsi_Host *shost = class_to_shost(cdev);
3948 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3949 int rc;
3950
3951 if (!capable(CAP_SYS_ADMIN))
3952 return -EACCES;
3953
3954 if (buf[0] == '1')
3955 rc = ipr_alloc_dump(ioa_cfg);
3956 else if (buf[0] == '0')
3957 rc = ipr_free_dump(ioa_cfg);
3958 else
3959 return -EINVAL;
3960
3961 if (rc)
3962 return rc;
3963 else
3964 return count;
3965}
3966
3967static struct bin_attribute ipr_dump_attr = {
3968 .attr = {
3969 .name = "dump",
3970 .mode = S_IRUSR | S_IWUSR,
3971 },
3972 .size = 0,
3973 .read = ipr_read_dump,
3974 .write = ipr_write_dump
3975};
3976#else
3977static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3978#endif
3979
3980/**
3981 * ipr_change_queue_depth - Change the device's queue depth
3982 * @sdev: scsi device struct
3983 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07003984 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 *
3986 * Return value:
3987 * actual depth set
3988 **/
Mike Christiee881a172009-10-15 17:46:39 -07003989static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3990 int reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003991{
Brian King35a39692006-09-25 12:39:20 -05003992 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3993 struct ipr_resource_entry *res;
3994 unsigned long lock_flags = 0;
3995
Mike Christiee881a172009-10-15 17:46:39 -07003996 if (reason != SCSI_QDEPTH_DEFAULT)
3997 return -EOPNOTSUPP;
3998
Brian King35a39692006-09-25 12:39:20 -05003999 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4000 res = (struct ipr_resource_entry *)sdev->hostdata;
4001
4002 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4003 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4004 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4005
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4007 return sdev->queue_depth;
4008}
4009
4010/**
4011 * ipr_change_queue_type - Change the device's queue type
4012 * @dsev: scsi device struct
4013 * @tag_type: type of tags to use
4014 *
4015 * Return value:
4016 * actual queue type set
4017 **/
4018static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4019{
4020 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4021 struct ipr_resource_entry *res;
4022 unsigned long lock_flags = 0;
4023
4024 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4025 res = (struct ipr_resource_entry *)sdev->hostdata;
4026
4027 if (res) {
4028 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4029 /*
4030 * We don't bother quiescing the device here since the
4031 * adapter firmware does it for us.
4032 */
4033 scsi_set_tag_type(sdev, tag_type);
4034
4035 if (tag_type)
4036 scsi_activate_tcq(sdev, sdev->queue_depth);
4037 else
4038 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4039 } else
4040 tag_type = 0;
4041 } else
4042 tag_type = 0;
4043
4044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4045 return tag_type;
4046}
4047
4048/**
4049 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4050 * @dev: device struct
4051 * @buf: buffer
4052 *
4053 * Return value:
4054 * number of bytes printed to buffer
4055 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04004056static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057{
4058 struct scsi_device *sdev = to_scsi_device(dev);
4059 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4060 struct ipr_resource_entry *res;
4061 unsigned long lock_flags = 0;
4062 ssize_t len = -ENXIO;
4063
4064 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4065 res = (struct ipr_resource_entry *)sdev->hostdata;
4066 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004067 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4069 return len;
4070}
4071
4072static struct device_attribute ipr_adapter_handle_attr = {
4073 .attr = {
4074 .name = "adapter_handle",
4075 .mode = S_IRUSR,
4076 },
4077 .show = ipr_show_adapter_handle
4078};
4079
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004080/**
4081 * ipr_show_resource_path - Show the resource path for this device.
4082 * @dev: device struct
4083 * @buf: buffer
4084 *
4085 * Return value:
4086 * number of bytes printed to buffer
4087 **/
4088static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4089{
4090 struct scsi_device *sdev = to_scsi_device(dev);
4091 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4092 struct ipr_resource_entry *res;
4093 unsigned long lock_flags = 0;
4094 ssize_t len = -ENXIO;
4095 char buffer[IPR_MAX_RES_PATH_LENGTH];
4096
4097 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4098 res = (struct ipr_resource_entry *)sdev->hostdata;
4099 if (res)
4100 len = snprintf(buf, PAGE_SIZE, "%s\n",
4101 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4102 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4103 return len;
4104}
4105
4106static struct device_attribute ipr_resource_path_attr = {
4107 .attr = {
4108 .name = "resource_path",
4109 .mode = S_IRUSR,
4110 },
4111 .show = ipr_show_resource_path
4112};
4113
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114static struct device_attribute *ipr_dev_attrs[] = {
4115 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004116 &ipr_resource_path_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117 NULL,
4118};
4119
4120/**
4121 * ipr_biosparam - Return the HSC mapping
4122 * @sdev: scsi device struct
4123 * @block_device: block device pointer
4124 * @capacity: capacity of the device
4125 * @parm: Array containing returned HSC values.
4126 *
4127 * This function generates the HSC parms that fdisk uses.
4128 * We want to make sure we return something that places partitions
4129 * on 4k boundaries for best performance with the IOA.
4130 *
4131 * Return value:
4132 * 0 on success
4133 **/
4134static int ipr_biosparam(struct scsi_device *sdev,
4135 struct block_device *block_device,
4136 sector_t capacity, int *parm)
4137{
4138 int heads, sectors;
4139 sector_t cylinders;
4140
4141 heads = 128;
4142 sectors = 32;
4143
4144 cylinders = capacity;
4145 sector_div(cylinders, (128 * 32));
4146
4147 /* return result */
4148 parm[0] = heads;
4149 parm[1] = sectors;
4150 parm[2] = cylinders;
4151
4152 return 0;
4153}
4154
4155/**
Brian King35a39692006-09-25 12:39:20 -05004156 * ipr_find_starget - Find target based on bus/target.
4157 * @starget: scsi target struct
4158 *
4159 * Return value:
4160 * resource entry pointer if found / NULL if not found
4161 **/
4162static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4163{
4164 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4165 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4166 struct ipr_resource_entry *res;
4167
4168 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004169 if ((res->bus == starget->channel) &&
4170 (res->target == starget->id) &&
4171 (res->lun == 0)) {
Brian King35a39692006-09-25 12:39:20 -05004172 return res;
4173 }
4174 }
4175
4176 return NULL;
4177}
4178
4179static struct ata_port_info sata_port_info;
4180
4181/**
4182 * ipr_target_alloc - Prepare for commands to a SCSI target
4183 * @starget: scsi target struct
4184 *
4185 * If the device is a SATA device, this function allocates an
4186 * ATA port with libata, else it does nothing.
4187 *
4188 * Return value:
4189 * 0 on success / non-0 on failure
4190 **/
4191static int ipr_target_alloc(struct scsi_target *starget)
4192{
4193 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4194 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4195 struct ipr_sata_port *sata_port;
4196 struct ata_port *ap;
4197 struct ipr_resource_entry *res;
4198 unsigned long lock_flags;
4199
4200 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4201 res = ipr_find_starget(starget);
4202 starget->hostdata = NULL;
4203
4204 if (res && ipr_is_gata(res)) {
4205 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4206 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4207 if (!sata_port)
4208 return -ENOMEM;
4209
4210 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4211 if (ap) {
4212 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4213 sata_port->ioa_cfg = ioa_cfg;
4214 sata_port->ap = ap;
4215 sata_port->res = res;
4216
4217 res->sata_port = sata_port;
4218 ap->private_data = sata_port;
4219 starget->hostdata = sata_port;
4220 } else {
4221 kfree(sata_port);
4222 return -ENOMEM;
4223 }
4224 }
4225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4226
4227 return 0;
4228}
4229
4230/**
4231 * ipr_target_destroy - Destroy a SCSI target
4232 * @starget: scsi target struct
4233 *
4234 * If the device was a SATA device, this function frees the libata
4235 * ATA port, else it does nothing.
4236 *
4237 **/
4238static void ipr_target_destroy(struct scsi_target *starget)
4239{
4240 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004241 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4242 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4243
4244 if (ioa_cfg->sis64) {
4245 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4246 clear_bit(starget->id, ioa_cfg->array_ids);
4247 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4248 clear_bit(starget->id, ioa_cfg->vset_ids);
4249 else if (starget->channel == 0)
4250 clear_bit(starget->id, ioa_cfg->target_ids);
4251 }
Brian King35a39692006-09-25 12:39:20 -05004252
4253 if (sata_port) {
4254 starget->hostdata = NULL;
4255 ata_sas_port_destroy(sata_port->ap);
4256 kfree(sata_port);
4257 }
4258}
4259
4260/**
4261 * ipr_find_sdev - Find device based on bus/target/lun.
4262 * @sdev: scsi device struct
4263 *
4264 * Return value:
4265 * resource entry pointer if found / NULL if not found
4266 **/
4267static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4268{
4269 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4270 struct ipr_resource_entry *res;
4271
4272 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004273 if ((res->bus == sdev->channel) &&
4274 (res->target == sdev->id) &&
4275 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004276 return res;
4277 }
4278
4279 return NULL;
4280}
4281
4282/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 * ipr_slave_destroy - Unconfigure a SCSI device
4284 * @sdev: scsi device struct
4285 *
4286 * Return value:
4287 * nothing
4288 **/
4289static void ipr_slave_destroy(struct scsi_device *sdev)
4290{
4291 struct ipr_resource_entry *res;
4292 struct ipr_ioa_cfg *ioa_cfg;
4293 unsigned long lock_flags = 0;
4294
4295 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4296
4297 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4298 res = (struct ipr_resource_entry *) sdev->hostdata;
4299 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004300 if (res->sata_port)
Tejun Heo3e4ec342010-05-10 21:41:30 +02004301 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302 sdev->hostdata = NULL;
4303 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004304 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305 }
4306 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4307}
4308
4309/**
4310 * ipr_slave_configure - Configure a SCSI device
4311 * @sdev: scsi device struct
4312 *
4313 * This function configures the specified scsi device.
4314 *
4315 * Return value:
4316 * 0 on success
4317 **/
4318static int ipr_slave_configure(struct scsi_device *sdev)
4319{
4320 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4321 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004322 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004324 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325
4326 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4327 res = sdev->hostdata;
4328 if (res) {
4329 if (ipr_is_af_dasd_device(res))
4330 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004331 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004333 sdev->no_uld_attach = 1;
4334 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335 if (ipr_is_vset_device(res)) {
Jens Axboe242f9dc2008-09-14 05:55:09 -07004336 blk_queue_rq_timeout(sdev->request_queue,
4337 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004338 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339 }
Brian Kinge4fbf442006-03-29 09:37:22 -06004340 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341 sdev->allow_restart = 1;
Brian Kingdd406ef2009-04-22 08:58:02 -05004342 if (ipr_is_gata(res) && res->sata_port)
4343 ap = res->sata_port->ap;
4344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4345
4346 if (ap) {
Brian King35a39692006-09-25 12:39:20 -05004347 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004348 ata_sas_slave_configure(sdev, ap);
4349 } else
Brian King35a39692006-09-25 12:39:20 -05004350 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004351 if (ioa_cfg->sis64)
4352 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4353 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
Brian Kingdd406ef2009-04-22 08:58:02 -05004354 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355 }
4356 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4357 return 0;
4358}
4359
4360/**
Brian King35a39692006-09-25 12:39:20 -05004361 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4362 * @sdev: scsi device struct
4363 *
4364 * This function initializes an ATA port so that future commands
4365 * sent through queuecommand will work.
4366 *
4367 * Return value:
4368 * 0 on success
4369 **/
4370static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4371{
4372 struct ipr_sata_port *sata_port = NULL;
4373 int rc = -ENXIO;
4374
4375 ENTER;
4376 if (sdev->sdev_target)
4377 sata_port = sdev->sdev_target->hostdata;
4378 if (sata_port)
4379 rc = ata_sas_port_init(sata_port->ap);
4380 if (rc)
4381 ipr_slave_destroy(sdev);
4382
4383 LEAVE;
4384 return rc;
4385}
4386
4387/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388 * ipr_slave_alloc - Prepare for commands to a device.
4389 * @sdev: scsi device struct
4390 *
4391 * This function saves a pointer to the resource entry
4392 * in the scsi device struct if the device exists. We
4393 * can then use this pointer in ipr_queuecommand when
4394 * handling new commands.
4395 *
4396 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004397 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07004398 **/
4399static int ipr_slave_alloc(struct scsi_device *sdev)
4400{
4401 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4402 struct ipr_resource_entry *res;
4403 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004404 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405
4406 sdev->hostdata = NULL;
4407
4408 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4409
Brian King35a39692006-09-25 12:39:20 -05004410 res = ipr_find_sdev(sdev);
4411 if (res) {
4412 res->sdev = sdev;
4413 res->add_to_ml = 0;
4414 res->in_erp = 0;
4415 sdev->hostdata = res;
4416 if (!ipr_is_naca_model(res))
4417 res->needs_sync_complete = 1;
4418 rc = 0;
4419 if (ipr_is_gata(res)) {
4420 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4421 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422 }
4423 }
4424
4425 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4426
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004427 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004428}
4429
4430/**
4431 * ipr_eh_host_reset - Reset the host adapter
4432 * @scsi_cmd: scsi command struct
4433 *
4434 * Return value:
4435 * SUCCESS / FAILED
4436 **/
Jeff Garzik df0ae242005-05-28 07:57:14 -04004437static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438{
4439 struct ipr_ioa_cfg *ioa_cfg;
4440 int rc;
4441
4442 ENTER;
4443 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4444
4445 dev_err(&ioa_cfg->pdev->dev,
4446 "Adapter being reset as a result of error recovery.\n");
4447
4448 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4449 ioa_cfg->sdt_state = GET_DUMP;
4450
4451 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4452
4453 LEAVE;
4454 return rc;
4455}
4456
Jeff Garzik df0ae242005-05-28 07:57:14 -04004457static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4458{
4459 int rc;
4460
4461 spin_lock_irq(cmd->device->host->host_lock);
4462 rc = __ipr_eh_host_reset(cmd);
4463 spin_unlock_irq(cmd->device->host->host_lock);
4464
4465 return rc;
4466}
4467
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468/**
Brian Kingc6513092006-03-29 09:37:43 -06004469 * ipr_device_reset - Reset the device
4470 * @ioa_cfg: ioa config struct
4471 * @res: resource entry struct
4472 *
4473 * This function issues a device reset to the affected device.
4474 * If the device is a SCSI device, a LUN reset will be sent
4475 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05004476 * will be sent. If the device is a SATA device, a PHY reset will
4477 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06004478 *
4479 * Return value:
4480 * 0 on success / non-zero on failure
4481 **/
4482static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4483 struct ipr_resource_entry *res)
4484{
4485 struct ipr_cmnd *ipr_cmd;
4486 struct ipr_ioarcb *ioarcb;
4487 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05004488 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06004489 u32 ioasc;
4490
4491 ENTER;
4492 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4493 ioarcb = &ipr_cmd->ioarcb;
4494 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08004495
4496 if (ipr_cmd->ioa_cfg->sis64) {
4497 regs = &ipr_cmd->i.ata_ioadl.regs;
4498 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4499 } else
4500 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06004501
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004502 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06004503 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4504 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05004505 if (ipr_is_gata(res)) {
4506 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08004507 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05004508 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4509 }
Brian Kingc6513092006-03-29 09:37:43 -06004510
4511 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4512 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4513 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
Brian King35a39692006-09-25 12:39:20 -05004514 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
4515 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
4516 sizeof(struct ipr_ioasa_gata));
Brian Kingc6513092006-03-29 09:37:43 -06004517
4518 LEAVE;
4519 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4520}
4521
4522/**
Brian King35a39692006-09-25 12:39:20 -05004523 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09004524 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05004525 * @classes: class of the attached device
4526 *
Tejun Heocc0680a2007-08-06 18:36:23 +09004527 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05004528 *
4529 * Return value:
4530 * 0 on success / non-zero on failure
4531 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09004532static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07004533 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05004534{
Tejun Heocc0680a2007-08-06 18:36:23 +09004535 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05004536 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4537 struct ipr_resource_entry *res;
4538 unsigned long lock_flags = 0;
4539 int rc = -ENXIO;
4540
4541 ENTER;
4542 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King73d98ff2006-11-21 10:27:58 -06004543 while(ioa_cfg->in_reset_reload) {
4544 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4545 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4546 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4547 }
4548
Brian King35a39692006-09-25 12:39:20 -05004549 res = sata_port->res;
4550 if (res) {
4551 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004552 *classes = res->ata_class;
Brian King35a39692006-09-25 12:39:20 -05004553 }
4554
4555 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4556 LEAVE;
4557 return rc;
4558}
4559
4560/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561 * ipr_eh_dev_reset - Reset the device
4562 * @scsi_cmd: scsi command struct
4563 *
4564 * This function issues a device reset to the affected device.
4565 * A LUN reset will be sent to the device first. If that does
4566 * not work, a target reset will be sent.
4567 *
4568 * Return value:
4569 * SUCCESS / FAILED
4570 **/
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04004571static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004572{
4573 struct ipr_cmnd *ipr_cmd;
4574 struct ipr_ioa_cfg *ioa_cfg;
4575 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05004576 struct ata_port *ap;
4577 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578
4579 ENTER;
4580 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4581 res = scsi_cmd->device->hostdata;
4582
brking@us.ibm.comeeb883072005-11-01 17:02:29 -06004583 if (!res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584 return FAILED;
4585
4586 /*
4587 * If we are currently going through reset/reload, return failed. This will force the
4588 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4589 * reset to complete
4590 */
4591 if (ioa_cfg->in_reset_reload)
4592 return FAILED;
4593 if (ioa_cfg->ioa_is_dead)
4594 return FAILED;
4595
4596 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004597 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598 if (ipr_cmd->scsi_cmd)
4599 ipr_cmd->done = ipr_scsi_eh_done;
Brian King24d6f2b2007-03-29 12:43:17 -05004600 if (ipr_cmd->qc)
4601 ipr_cmd->done = ipr_sata_eh_done;
Brian King7402ece2006-11-21 10:28:23 -06004602 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4603 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4604 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4605 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606 }
4607 }
4608
4609 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004610 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05004611
4612 if (ipr_is_gata(res) && res->sata_port) {
4613 ap = res->sata_port->ap;
4614 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09004615 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05004616 spin_lock_irq(scsi_cmd->device->host->host_lock);
Brian King5af23d22007-05-09 15:36:35 -05004617
4618 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004619 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
Brian King5af23d22007-05-09 15:36:35 -05004620 rc = -EIO;
4621 break;
4622 }
4623 }
Brian King35a39692006-09-25 12:39:20 -05004624 } else
4625 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004626 res->resetting_device = 0;
4627
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628 LEAVE;
Brian Kingc6513092006-03-29 09:37:43 -06004629 return (rc ? FAILED : SUCCESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630}
4631
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04004632static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4633{
4634 int rc;
4635
4636 spin_lock_irq(cmd->device->host->host_lock);
4637 rc = __ipr_eh_dev_reset(cmd);
4638 spin_unlock_irq(cmd->device->host->host_lock);
4639
4640 return rc;
4641}
4642
Linus Torvalds1da177e2005-04-16 15:20:36 -07004643/**
4644 * ipr_bus_reset_done - Op done function for bus reset.
4645 * @ipr_cmd: ipr command struct
4646 *
4647 * This function is the op done function for a bus reset
4648 *
4649 * Return value:
4650 * none
4651 **/
4652static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4653{
4654 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4655 struct ipr_resource_entry *res;
4656
4657 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004658 if (!ioa_cfg->sis64)
4659 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4660 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4661 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4662 break;
4663 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665
4666 /*
4667 * If abort has not completed, indicate the reset has, else call the
4668 * abort's done function to wake the sleeping eh thread
4669 */
4670 if (ipr_cmd->sibling->sibling)
4671 ipr_cmd->sibling->sibling = NULL;
4672 else
4673 ipr_cmd->sibling->done(ipr_cmd->sibling);
4674
4675 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4676 LEAVE;
4677}
4678
4679/**
4680 * ipr_abort_timeout - An abort task has timed out
4681 * @ipr_cmd: ipr command struct
4682 *
4683 * This function handles when an abort task times out. If this
4684 * happens we issue a bus reset since we have resources tied
4685 * up that must be freed before returning to the midlayer.
4686 *
4687 * Return value:
4688 * none
4689 **/
4690static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4691{
4692 struct ipr_cmnd *reset_cmd;
4693 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4694 struct ipr_cmd_pkt *cmd_pkt;
4695 unsigned long lock_flags = 0;
4696
4697 ENTER;
4698 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4699 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4700 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4701 return;
4702 }
4703
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004704 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4706 ipr_cmd->sibling = reset_cmd;
4707 reset_cmd->sibling = ipr_cmd;
4708 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4709 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4710 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4711 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4712 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4713
4714 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4715 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4716 LEAVE;
4717}
4718
4719/**
4720 * ipr_cancel_op - Cancel specified op
4721 * @scsi_cmd: scsi command struct
4722 *
4723 * This function cancels specified op.
4724 *
4725 * Return value:
4726 * SUCCESS / FAILED
4727 **/
4728static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4729{
4730 struct ipr_cmnd *ipr_cmd;
4731 struct ipr_ioa_cfg *ioa_cfg;
4732 struct ipr_resource_entry *res;
4733 struct ipr_cmd_pkt *cmd_pkt;
4734 u32 ioasc;
4735 int op_found = 0;
4736
4737 ENTER;
4738 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4739 res = scsi_cmd->device->hostdata;
4740
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004741 /* If we are currently going through reset/reload, return failed.
4742 * This will force the mid-layer to call ipr_eh_host_reset,
4743 * which will then go to sleep and wait for the reset to complete
4744 */
4745 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4746 return FAILED;
Brian King04d97682006-11-21 10:28:04 -06004747 if (!res || !ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748 return FAILED;
4749
4750 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4751 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4752 ipr_cmd->done = ipr_scsi_eh_done;
4753 op_found = 1;
4754 break;
4755 }
4756 }
4757
4758 if (!op_found)
4759 return SUCCESS;
4760
4761 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004762 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004763 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4764 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4765 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4766 ipr_cmd->u.sdev = scsi_cmd->device;
4767
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004768 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4769 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4771 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4772
4773 /*
4774 * If the abort task timed out and we sent a bus reset, we will get
4775 * one the following responses to the abort
4776 */
4777 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4778 ioasc = 0;
4779 ipr_trace;
4780 }
4781
4782 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004783 if (!ipr_is_naca_model(res))
4784 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785
4786 LEAVE;
4787 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4788}
4789
4790/**
4791 * ipr_eh_abort - Abort a single op
4792 * @scsi_cmd: scsi command struct
4793 *
4794 * Return value:
4795 * SUCCESS / FAILED
4796 **/
4797static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4798{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004799 unsigned long flags;
4800 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801
4802 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004803
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004804 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4805 rc = ipr_cancel_op(scsi_cmd);
4806 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004807
4808 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004809 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004810}
4811
4812/**
4813 * ipr_handle_other_interrupt - Handle "other" interrupts
4814 * @ioa_cfg: ioa config struct
4815 * @int_reg: interrupt register
4816 *
4817 * Return value:
4818 * IRQ_NONE / IRQ_HANDLED
4819 **/
4820static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4821 volatile u32 int_reg)
4822{
4823 irqreturn_t rc = IRQ_HANDLED;
4824
4825 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4826 /* Mask the interrupt */
4827 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4828
4829 /* Clear the interrupt */
4830 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4831 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4832
4833 list_del(&ioa_cfg->reset_cmd->queue);
4834 del_timer(&ioa_cfg->reset_cmd->timer);
4835 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4836 } else {
4837 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4838 ioa_cfg->ioa_unit_checked = 1;
4839 else
4840 dev_err(&ioa_cfg->pdev->dev,
4841 "Permanent IOA failure. 0x%08X\n", int_reg);
4842
4843 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4844 ioa_cfg->sdt_state = GET_DUMP;
4845
4846 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4847 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4848 }
4849
4850 return rc;
4851}
4852
4853/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004854 * ipr_isr_eh - Interrupt service routine error handler
4855 * @ioa_cfg: ioa config struct
4856 * @msg: message to log
4857 *
4858 * Return value:
4859 * none
4860 **/
4861static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4862{
4863 ioa_cfg->errors_logged++;
4864 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4865
4866 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4867 ioa_cfg->sdt_state = GET_DUMP;
4868
4869 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4870}
4871
4872/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004873 * ipr_isr - Interrupt service routine
4874 * @irq: irq number
4875 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004876 *
4877 * Return value:
4878 * IRQ_NONE / IRQ_HANDLED
4879 **/
David Howells7d12e782006-10-05 14:55:46 +01004880static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004881{
4882 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4883 unsigned long lock_flags = 0;
4884 volatile u32 int_reg, int_mask_reg;
4885 u32 ioasc;
4886 u16 cmd_index;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004887 int num_hrrq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004888 struct ipr_cmnd *ipr_cmd;
4889 irqreturn_t rc = IRQ_NONE;
4890
4891 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4892
4893 /* If interrupts are disabled, ignore the interrupt */
4894 if (!ioa_cfg->allow_interrupts) {
4895 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4896 return IRQ_NONE;
4897 }
4898
Wayne Boyer214777b2010-02-19 13:24:26 -08004899 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4900 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004901
Wayne Boyer214777b2010-02-19 13:24:26 -08004902 /* If an interrupt on the adapter did not occur, ignore it.
4903 * Or in the case of SIS 64, check for a stage change interrupt.
4904 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004905 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
Wayne Boyer214777b2010-02-19 13:24:26 -08004906 if (ioa_cfg->sis64) {
4907 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4908 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4909 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4910
4911 /* clear stage change */
4912 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4913 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4914 list_del(&ioa_cfg->reset_cmd->queue);
4915 del_timer(&ioa_cfg->reset_cmd->timer);
4916 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4917 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4918 return IRQ_HANDLED;
4919 }
4920 }
4921
Linus Torvalds1da177e2005-04-16 15:20:36 -07004922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4923 return IRQ_NONE;
4924 }
4925
4926 while (1) {
4927 ipr_cmd = NULL;
4928
4929 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4930 ioa_cfg->toggle_bit) {
4931
4932 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4933 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4934
4935 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004936 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004937 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4938 return IRQ_HANDLED;
4939 }
4940
4941 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4942
4943 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4944
4945 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4946
4947 list_del(&ipr_cmd->queue);
4948 del_timer(&ipr_cmd->timer);
4949 ipr_cmd->done(ipr_cmd);
4950
4951 rc = IRQ_HANDLED;
4952
4953 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4954 ioa_cfg->hrrq_curr++;
4955 } else {
4956 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4957 ioa_cfg->toggle_bit ^= 1u;
4958 }
4959 }
4960
4961 if (ipr_cmd != NULL) {
4962 /* Clear the PCI interrupt */
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004963 do {
Wayne Boyer214777b2010-02-19 13:24:26 -08004964 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4965 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004966 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4967 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4968
4969 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4970 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4971 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4972 return IRQ_HANDLED;
4973 }
4974
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975 } else
4976 break;
4977 }
4978
4979 if (unlikely(rc == IRQ_NONE))
4980 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4981
4982 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4983 return rc;
4984}
4985
4986/**
Wayne Boyera32c0552010-02-19 13:23:36 -08004987 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07004988 * @ioa_cfg: ioa config struct
4989 * @ipr_cmd: ipr command struct
4990 *
4991 * Return value:
4992 * 0 on success / -1 on failure
4993 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08004994static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4995 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004996{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09004997 int i, nseg;
4998 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999 u32 length;
5000 u32 ioadl_flags = 0;
5001 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5002 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005003 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005004
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005005 length = scsi_bufflen(scsi_cmd);
5006 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005007 return 0;
5008
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005009 nseg = scsi_dma_map(scsi_cmd);
5010 if (nseg < 0) {
5011 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5012 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005013 }
5014
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005015 ipr_cmd->dma_use_sg = nseg;
5016
Wayne Boyer438b0332010-05-10 09:13:00 -07005017 ioarcb->data_transfer_length = cpu_to_be32(length);
5018
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005019 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5020 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5021 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005022 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5023 ioadl_flags = IPR_IOADL_FLAGS_READ;
5024
5025 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5026 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5027 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5028 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5029 }
5030
5031 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5032 return 0;
5033}
5034
5035/**
5036 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5037 * @ioa_cfg: ioa config struct
5038 * @ipr_cmd: ipr command struct
5039 *
5040 * Return value:
5041 * 0 on success / -1 on failure
5042 **/
5043static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5044 struct ipr_cmnd *ipr_cmd)
5045{
5046 int i, nseg;
5047 struct scatterlist *sg;
5048 u32 length;
5049 u32 ioadl_flags = 0;
5050 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5051 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5052 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5053
5054 length = scsi_bufflen(scsi_cmd);
5055 if (!length)
5056 return 0;
5057
5058 nseg = scsi_dma_map(scsi_cmd);
5059 if (nseg < 0) {
5060 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5061 return -1;
5062 }
5063
5064 ipr_cmd->dma_use_sg = nseg;
5065
5066 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5067 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5068 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5069 ioarcb->data_transfer_length = cpu_to_be32(length);
5070 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005071 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5072 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5073 ioadl_flags = IPR_IOADL_FLAGS_READ;
5074 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5075 ioarcb->read_ioadl_len =
5076 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5077 }
5078
Wayne Boyera32c0552010-02-19 13:23:36 -08005079 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5080 ioadl = ioarcb->u.add_data.u.ioadl;
5081 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5082 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005083 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5084 }
5085
5086 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5087 ioadl[i].flags_and_data_len =
5088 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5089 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5090 }
5091
5092 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5093 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005094}
5095
5096/**
5097 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5098 * @scsi_cmd: scsi command struct
5099 *
5100 * Return value:
5101 * task attributes
5102 **/
5103static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5104{
5105 u8 tag[2];
5106 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5107
5108 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5109 switch (tag[0]) {
5110 case MSG_SIMPLE_TAG:
5111 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5112 break;
5113 case MSG_HEAD_TAG:
5114 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5115 break;
5116 case MSG_ORDERED_TAG:
5117 rc = IPR_FLAGS_LO_ORDERED_TASK;
5118 break;
5119 };
5120 }
5121
5122 return rc;
5123}
5124
5125/**
5126 * ipr_erp_done - Process completion of ERP for a device
5127 * @ipr_cmd: ipr command struct
5128 *
5129 * This function copies the sense buffer into the scsi_cmd
5130 * struct and pushes the scsi_done function.
5131 *
5132 * Return value:
5133 * nothing
5134 **/
5135static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5136{
5137 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5138 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5139 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5140 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5141
5142 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5143 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005144 scmd_printk(KERN_ERR, scsi_cmd,
5145 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146 } else {
5147 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5148 SCSI_SENSE_BUFFERSIZE);
5149 }
5150
5151 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005152 if (!ipr_is_naca_model(res))
5153 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154 res->in_erp = 0;
5155 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005156 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5158 scsi_cmd->scsi_done(scsi_cmd);
5159}
5160
5161/**
5162 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5163 * @ipr_cmd: ipr command struct
5164 *
5165 * Return value:
5166 * none
5167 **/
5168static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5169{
Brian King51b1c7e2007-03-29 12:43:50 -05005170 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5171 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08005172 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005173
5174 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08005175 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005176 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005177 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178 ioarcb->read_ioadl_len = 0;
5179 ioasa->ioasc = 0;
5180 ioasa->residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005181
5182 if (ipr_cmd->ioa_cfg->sis64)
5183 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5184 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5185 else {
5186 ioarcb->write_ioadl_addr =
5187 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5188 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190}
5191
5192/**
5193 * ipr_erp_request_sense - Send request sense to a device
5194 * @ipr_cmd: ipr command struct
5195 *
5196 * This function sends a request sense to a device as a result
5197 * of a check condition.
5198 *
5199 * Return value:
5200 * nothing
5201 **/
5202static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5203{
5204 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5205 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5206
5207 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5208 ipr_erp_done(ipr_cmd);
5209 return;
5210 }
5211
5212 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5213
5214 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5215 cmd_pkt->cdb[0] = REQUEST_SENSE;
5216 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5217 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5218 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5219 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5220
Wayne Boyera32c0552010-02-19 13:23:36 -08005221 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5222 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223
5224 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5225 IPR_REQUEST_SENSE_TIMEOUT * 2);
5226}
5227
5228/**
5229 * ipr_erp_cancel_all - Send cancel all to a device
5230 * @ipr_cmd: ipr command struct
5231 *
5232 * This function sends a cancel all to a device to clear the
5233 * queue. If we are running TCQ on the device, QERR is set to 1,
5234 * which means all outstanding ops have been dropped on the floor.
5235 * Cancel all will return them to us.
5236 *
5237 * Return value:
5238 * nothing
5239 **/
5240static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5241{
5242 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5243 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5244 struct ipr_cmd_pkt *cmd_pkt;
5245
5246 res->in_erp = 1;
5247
5248 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5249
5250 if (!scsi_get_tag_type(scsi_cmd->device)) {
5251 ipr_erp_request_sense(ipr_cmd);
5252 return;
5253 }
5254
5255 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5256 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5257 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5258
5259 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5260 IPR_CANCEL_ALL_TIMEOUT);
5261}
5262
5263/**
5264 * ipr_dump_ioasa - Dump contents of IOASA
5265 * @ioa_cfg: ioa config struct
5266 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06005267 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005268 *
5269 * This function is invoked by the interrupt handler when ops
5270 * fail. It will log the IOASA if appropriate. Only called
5271 * for GPDD ops.
5272 *
5273 * Return value:
5274 * none
5275 **/
5276static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06005277 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005278{
5279 int i;
5280 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05005281 u32 ioasc, fd_ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5283 __be32 *ioasa_data = (__be32 *)ioasa;
5284 int error_index;
5285
5286 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
Brian Kingb0692dd2007-03-29 12:43:09 -05005287 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005288
5289 if (0 == ioasc)
5290 return;
5291
5292 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5293 return;
5294
Brian Kingb0692dd2007-03-29 12:43:09 -05005295 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5296 error_index = ipr_get_error(fd_ioasc);
5297 else
5298 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299
5300 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5301 /* Don't log an error if the IOA already logged one */
5302 if (ioasa->ilid != 0)
5303 return;
5304
Brian Kingcc9bd5d2007-03-29 12:43:01 -05005305 if (!ipr_is_gscsi(res))
5306 return;
5307
Linus Torvalds1da177e2005-04-16 15:20:36 -07005308 if (ipr_error_table[error_index].log_ioasa == 0)
5309 return;
5310 }
5311
Brian Kingfe964d02006-03-29 09:37:29 -06005312 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005313
5314 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
5315 data_len = sizeof(struct ipr_ioasa);
5316 else
5317 data_len = be16_to_cpu(ioasa->ret_stat_len);
5318
5319 ipr_err("IOASA Dump:\n");
5320
5321 for (i = 0; i < data_len / 4; i += 4) {
5322 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5323 be32_to_cpu(ioasa_data[i]),
5324 be32_to_cpu(ioasa_data[i+1]),
5325 be32_to_cpu(ioasa_data[i+2]),
5326 be32_to_cpu(ioasa_data[i+3]));
5327 }
5328}
5329
5330/**
5331 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5332 * @ioasa: IOASA
5333 * @sense_buf: sense data buffer
5334 *
5335 * Return value:
5336 * none
5337 **/
5338static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5339{
5340 u32 failing_lba;
5341 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5342 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5343 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5344 u32 ioasc = be32_to_cpu(ioasa->ioasc);
5345
5346 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5347
5348 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5349 return;
5350
5351 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5352
5353 if (ipr_is_vset_device(res) &&
5354 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5355 ioasa->u.vset.failing_lba_hi != 0) {
5356 sense_buf[0] = 0x72;
5357 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5358 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5359 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5360
5361 sense_buf[7] = 12;
5362 sense_buf[8] = 0;
5363 sense_buf[9] = 0x0A;
5364 sense_buf[10] = 0x80;
5365
5366 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5367
5368 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5369 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5370 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5371 sense_buf[15] = failing_lba & 0x000000ff;
5372
5373 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5374
5375 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5376 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5377 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5378 sense_buf[19] = failing_lba & 0x000000ff;
5379 } else {
5380 sense_buf[0] = 0x70;
5381 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5382 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5383 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5384
5385 /* Illegal request */
5386 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5387 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5388 sense_buf[7] = 10; /* additional length */
5389
5390 /* IOARCB was in error */
5391 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5392 sense_buf[15] = 0xC0;
5393 else /* Parameter data was invalid */
5394 sense_buf[15] = 0x80;
5395
5396 sense_buf[16] =
5397 ((IPR_FIELD_POINTER_MASK &
5398 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
5399 sense_buf[17] =
5400 (IPR_FIELD_POINTER_MASK &
5401 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
5402 } else {
5403 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5404 if (ipr_is_vset_device(res))
5405 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5406 else
5407 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5408
5409 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5410 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5411 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5412 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5413 sense_buf[6] = failing_lba & 0x000000ff;
5414 }
5415
5416 sense_buf[7] = 6; /* additional length */
5417 }
5418 }
5419}
5420
5421/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005422 * ipr_get_autosense - Copy autosense data to sense buffer
5423 * @ipr_cmd: ipr command struct
5424 *
5425 * This function copies the autosense buffer to the buffer
5426 * in the scsi_cmd, if there is autosense available.
5427 *
5428 * Return value:
5429 * 1 if autosense was available / 0 if not
5430 **/
5431static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5432{
5433 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5434
Brian King117d2ce2006-08-02 14:57:58 -05005435 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005436 return 0;
5437
5438 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5439 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5440 SCSI_SENSE_BUFFERSIZE));
5441 return 1;
5442}
5443
5444/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445 * ipr_erp_start - Process an error response for a SCSI op
5446 * @ioa_cfg: ioa config struct
5447 * @ipr_cmd: ipr command struct
5448 *
5449 * This function determines whether or not to initiate ERP
5450 * on the affected device.
5451 *
5452 * Return value:
5453 * nothing
5454 **/
5455static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5456 struct ipr_cmnd *ipr_cmd)
5457{
5458 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5459 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5460 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
Brian King8a048992007-04-26 16:00:10 -05005461 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005462
5463 if (!res) {
5464 ipr_scsi_eh_done(ipr_cmd);
5465 return;
5466 }
5467
Brian King8a048992007-04-26 16:00:10 -05005468 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005469 ipr_gen_sense(ipr_cmd);
5470
Brian Kingcc9bd5d2007-03-29 12:43:01 -05005471 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5472
Brian King8a048992007-04-26 16:00:10 -05005473 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005474 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005475 if (ipr_is_naca_model(res))
5476 scsi_cmd->result |= (DID_ABORT << 16);
5477 else
5478 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005479 break;
5480 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06005481 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005482 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5483 break;
5484 case IPR_IOASC_HW_SEL_TIMEOUT:
5485 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005486 if (!ipr_is_naca_model(res))
5487 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005488 break;
5489 case IPR_IOASC_SYNC_REQUIRED:
5490 if (!res->in_erp)
5491 res->needs_sync_complete = 1;
5492 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5493 break;
5494 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06005495 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005496 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5497 break;
5498 case IPR_IOASC_BUS_WAS_RESET:
5499 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5500 /*
5501 * Report the bus reset and ask for a retry. The device
5502 * will give CC/UA the next command.
5503 */
5504 if (!res->resetting_device)
5505 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5506 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005507 if (!ipr_is_naca_model(res))
5508 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005509 break;
5510 case IPR_IOASC_HW_DEV_BUS_STATUS:
5511 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5512 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005513 if (!ipr_get_autosense(ipr_cmd)) {
5514 if (!ipr_is_naca_model(res)) {
5515 ipr_erp_cancel_all(ipr_cmd);
5516 return;
5517 }
5518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005519 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005520 if (!ipr_is_naca_model(res))
5521 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005522 break;
5523 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5524 break;
5525 default:
Brian King5b7304f2006-08-02 14:57:51 -05005526 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5527 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005528 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005529 res->needs_sync_complete = 1;
5530 break;
5531 }
5532
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005533 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005534 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5535 scsi_cmd->scsi_done(scsi_cmd);
5536}
5537
5538/**
5539 * ipr_scsi_done - mid-layer done function
5540 * @ipr_cmd: ipr command struct
5541 *
5542 * This function is invoked by the interrupt handler for
5543 * ops generated by the SCSI mid-layer
5544 *
5545 * Return value:
5546 * none
5547 **/
5548static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5549{
5550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5551 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5552 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5553
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005554 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005555
5556 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005557 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005558 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5559 scsi_cmd->scsi_done(scsi_cmd);
5560 } else
5561 ipr_erp_start(ioa_cfg, ipr_cmd);
5562}
5563
5564/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005565 * ipr_queuecommand - Queue a mid-layer request
5566 * @scsi_cmd: scsi command struct
5567 * @done: done function
5568 *
5569 * This function queues a request generated by the mid-layer.
5570 *
5571 * Return value:
5572 * 0 on success
5573 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5574 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5575 **/
5576static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5577 void (*done) (struct scsi_cmnd *))
5578{
5579 struct ipr_ioa_cfg *ioa_cfg;
5580 struct ipr_resource_entry *res;
5581 struct ipr_ioarcb *ioarcb;
5582 struct ipr_cmnd *ipr_cmd;
5583 int rc = 0;
5584
5585 scsi_cmd->scsi_done = done;
5586 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5587 res = scsi_cmd->device->hostdata;
5588 scsi_cmd->result = (DID_OK << 16);
5589
5590 /*
5591 * We are currently blocking all devices due to a host reset
5592 * We have told the host to stop giving us new requests, but
5593 * ERP ops don't count. FIXME
5594 */
5595 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5596 return SCSI_MLQUEUE_HOST_BUSY;
5597
5598 /*
5599 * FIXME - Create scsi_set_host_offline interface
5600 * and the ioa_is_dead check can be removed
5601 */
5602 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5603 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5604 scsi_cmd->result = (DID_NO_CONNECT << 16);
5605 scsi_cmd->scsi_done(scsi_cmd);
5606 return 0;
5607 }
5608
Brian King35a39692006-09-25 12:39:20 -05005609 if (ipr_is_gata(res) && res->sata_port)
5610 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5611
Linus Torvalds1da177e2005-04-16 15:20:36 -07005612 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5613 ioarcb = &ipr_cmd->ioarcb;
5614 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5615
5616 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5617 ipr_cmd->scsi_cmd = scsi_cmd;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005618 ioarcb->res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005619 ipr_cmd->done = ipr_scsi_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005620 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005621
5622 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5623 if (scsi_cmd->underflow == 0)
5624 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5625
5626 if (res->needs_sync_complete) {
5627 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5628 res->needs_sync_complete = 0;
5629 }
5630
5631 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5632 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5633 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5634 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5635 }
5636
5637 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5638 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5639 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5640
Wayne Boyera32c0552010-02-19 13:23:36 -08005641 if (likely(rc == 0)) {
5642 if (ioa_cfg->sis64)
5643 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5644 else
5645 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5646 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005647
5648 if (likely(rc == 0)) {
5649 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -08005650 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005651 } else {
5652 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5653 return SCSI_MLQUEUE_HOST_BUSY;
5654 }
5655
5656 return 0;
5657}
5658
5659/**
Brian King35a39692006-09-25 12:39:20 -05005660 * ipr_ioctl - IOCTL handler
5661 * @sdev: scsi device struct
5662 * @cmd: IOCTL cmd
5663 * @arg: IOCTL arg
5664 *
5665 * Return value:
5666 * 0 on success / other on failure
5667 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06005668static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05005669{
5670 struct ipr_resource_entry *res;
5671
5672 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05005673 if (res && ipr_is_gata(res)) {
5674 if (cmd == HDIO_GET_IDENTITY)
5675 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05005676 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05005677 }
Brian King35a39692006-09-25 12:39:20 -05005678
5679 return -EINVAL;
5680}
5681
5682/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005683 * ipr_info - Get information about the card/driver
5684 * @scsi_host: scsi host struct
5685 *
5686 * Return value:
5687 * pointer to buffer with description string
5688 **/
5689static const char * ipr_ioa_info(struct Scsi_Host *host)
5690{
5691 static char buffer[512];
5692 struct ipr_ioa_cfg *ioa_cfg;
5693 unsigned long lock_flags = 0;
5694
5695 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5696
5697 spin_lock_irqsave(host->host_lock, lock_flags);
5698 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5699 spin_unlock_irqrestore(host->host_lock, lock_flags);
5700
5701 return buffer;
5702}
5703
5704static struct scsi_host_template driver_template = {
5705 .module = THIS_MODULE,
5706 .name = "IPR",
5707 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05005708 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005709 .queuecommand = ipr_queuecommand,
5710 .eh_abort_handler = ipr_eh_abort,
5711 .eh_device_reset_handler = ipr_eh_dev_reset,
5712 .eh_host_reset_handler = ipr_eh_host_reset,
5713 .slave_alloc = ipr_slave_alloc,
5714 .slave_configure = ipr_slave_configure,
5715 .slave_destroy = ipr_slave_destroy,
Brian King35a39692006-09-25 12:39:20 -05005716 .target_alloc = ipr_target_alloc,
5717 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005718 .change_queue_depth = ipr_change_queue_depth,
5719 .change_queue_type = ipr_change_queue_type,
5720 .bios_param = ipr_biosparam,
5721 .can_queue = IPR_MAX_COMMANDS,
5722 .this_id = -1,
5723 .sg_tablesize = IPR_MAX_SGLIST,
5724 .max_sectors = IPR_IOA_MAX_SECTORS,
5725 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5726 .use_clustering = ENABLE_CLUSTERING,
5727 .shost_attrs = ipr_ioa_attrs,
5728 .sdev_attrs = ipr_dev_attrs,
5729 .proc_name = IPR_NAME
5730};
5731
Brian King35a39692006-09-25 12:39:20 -05005732/**
5733 * ipr_ata_phy_reset - libata phy_reset handler
5734 * @ap: ata port to reset
5735 *
5736 **/
5737static void ipr_ata_phy_reset(struct ata_port *ap)
5738{
5739 unsigned long flags;
5740 struct ipr_sata_port *sata_port = ap->private_data;
5741 struct ipr_resource_entry *res = sata_port->res;
5742 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5743 int rc;
5744
5745 ENTER;
5746 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5747 while(ioa_cfg->in_reset_reload) {
5748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5749 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5750 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5751 }
5752
5753 if (!ioa_cfg->allow_cmds)
5754 goto out_unlock;
5755
5756 rc = ipr_device_reset(ioa_cfg, res);
5757
5758 if (rc) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02005759 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05005760 goto out_unlock;
5761 }
5762
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005763 ap->link.device[0].class = res->ata_class;
5764 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Tejun Heo3e4ec342010-05-10 21:41:30 +02005765 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05005766
5767out_unlock:
5768 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5769 LEAVE;
5770}
5771
5772/**
5773 * ipr_ata_post_internal - Cleanup after an internal command
5774 * @qc: ATA queued command
5775 *
5776 * Return value:
5777 * none
5778 **/
5779static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5780{
5781 struct ipr_sata_port *sata_port = qc->ap->private_data;
5782 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5783 struct ipr_cmnd *ipr_cmd;
5784 unsigned long flags;
5785
5786 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King73d98ff2006-11-21 10:27:58 -06005787 while(ioa_cfg->in_reset_reload) {
5788 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5789 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5790 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5791 }
5792
Brian King35a39692006-09-25 12:39:20 -05005793 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5794 if (ipr_cmd->qc == qc) {
5795 ipr_device_reset(ioa_cfg, sata_port->res);
5796 break;
5797 }
5798 }
5799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5800}
5801
5802/**
Brian King35a39692006-09-25 12:39:20 -05005803 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5804 * @regs: destination
5805 * @tf: source ATA taskfile
5806 *
5807 * Return value:
5808 * none
5809 **/
5810static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5811 struct ata_taskfile *tf)
5812{
5813 regs->feature = tf->feature;
5814 regs->nsect = tf->nsect;
5815 regs->lbal = tf->lbal;
5816 regs->lbam = tf->lbam;
5817 regs->lbah = tf->lbah;
5818 regs->device = tf->device;
5819 regs->command = tf->command;
5820 regs->hob_feature = tf->hob_feature;
5821 regs->hob_nsect = tf->hob_nsect;
5822 regs->hob_lbal = tf->hob_lbal;
5823 regs->hob_lbam = tf->hob_lbam;
5824 regs->hob_lbah = tf->hob_lbah;
5825 regs->ctl = tf->ctl;
5826}
5827
5828/**
5829 * ipr_sata_done - done function for SATA commands
5830 * @ipr_cmd: ipr command struct
5831 *
5832 * This function is invoked by the interrupt handler for
5833 * ops generated by the SCSI mid-layer to SATA devices
5834 *
5835 * Return value:
5836 * none
5837 **/
5838static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5839{
5840 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5841 struct ata_queued_cmd *qc = ipr_cmd->qc;
5842 struct ipr_sata_port *sata_port = qc->ap->private_data;
5843 struct ipr_resource_entry *res = sata_port->res;
5844 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5845
5846 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5847 sizeof(struct ipr_ioasa_gata));
5848 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5849
5850 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005851 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05005852
5853 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5854 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5855 else
5856 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5857 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5858 ata_qc_complete(qc);
5859}
5860
5861/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005862 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5863 * @ipr_cmd: ipr command struct
5864 * @qc: ATA queued command
5865 *
5866 **/
5867static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5868 struct ata_queued_cmd *qc)
5869{
5870 u32 ioadl_flags = 0;
5871 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5872 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5873 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5874 int len = qc->nbytes;
5875 struct scatterlist *sg;
5876 unsigned int si;
5877 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5878
5879 if (len == 0)
5880 return;
5881
5882 if (qc->dma_dir == DMA_TO_DEVICE) {
5883 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5884 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5885 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5886 ioadl_flags = IPR_IOADL_FLAGS_READ;
5887
5888 ioarcb->data_transfer_length = cpu_to_be32(len);
5889 ioarcb->ioadl_len =
5890 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5891 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5892 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5893
5894 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5895 ioadl64->flags = cpu_to_be32(ioadl_flags);
5896 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5897 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5898
5899 last_ioadl64 = ioadl64;
5900 ioadl64++;
5901 }
5902
5903 if (likely(last_ioadl64))
5904 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5905}
5906
5907/**
Brian King35a39692006-09-25 12:39:20 -05005908 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5909 * @ipr_cmd: ipr command struct
5910 * @qc: ATA queued command
5911 *
5912 **/
5913static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5914 struct ata_queued_cmd *qc)
5915{
5916 u32 ioadl_flags = 0;
5917 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005918 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04005919 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01005920 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05005921 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09005922 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05005923
5924 if (len == 0)
5925 return;
5926
5927 if (qc->dma_dir == DMA_TO_DEVICE) {
5928 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5929 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005930 ioarcb->data_transfer_length = cpu_to_be32(len);
5931 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05005932 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5933 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5934 ioadl_flags = IPR_IOADL_FLAGS_READ;
5935 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5936 ioarcb->read_ioadl_len =
5937 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5938 }
5939
Tejun Heoff2aeb12007-12-05 16:43:11 +09005940 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05005941 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5942 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04005943
5944 last_ioadl = ioadl;
5945 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05005946 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04005947
5948 if (likely(last_ioadl))
5949 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05005950}
5951
5952/**
5953 * ipr_qc_issue - Issue a SATA qc to a device
5954 * @qc: queued command
5955 *
5956 * Return value:
5957 * 0 if success
5958 **/
5959static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5960{
5961 struct ata_port *ap = qc->ap;
5962 struct ipr_sata_port *sata_port = ap->private_data;
5963 struct ipr_resource_entry *res = sata_port->res;
5964 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5965 struct ipr_cmnd *ipr_cmd;
5966 struct ipr_ioarcb *ioarcb;
5967 struct ipr_ioarcb_ata_regs *regs;
5968
5969 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
Brian King0feeed82007-03-29 12:43:43 -05005970 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05005971
5972 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5973 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05005974
Wayne Boyera32c0552010-02-19 13:23:36 -08005975 if (ioa_cfg->sis64) {
5976 regs = &ipr_cmd->i.ata_ioadl.regs;
5977 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5978 } else
5979 regs = &ioarcb->u.add_data.u.regs;
5980
5981 memset(regs, 0, sizeof(*regs));
5982 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05005983
5984 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5985 ipr_cmd->qc = qc;
5986 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005987 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05005988 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5989 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5990 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01005991 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05005992
Wayne Boyera32c0552010-02-19 13:23:36 -08005993 if (ioa_cfg->sis64)
5994 ipr_build_ata_ioadl64(ipr_cmd, qc);
5995 else
5996 ipr_build_ata_ioadl(ipr_cmd, qc);
5997
Brian King35a39692006-09-25 12:39:20 -05005998 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5999 ipr_copy_sata_tf(regs, &qc->tf);
6000 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006001 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05006002
6003 switch (qc->tf.protocol) {
6004 case ATA_PROT_NODATA:
6005 case ATA_PROT_PIO:
6006 break;
6007
6008 case ATA_PROT_DMA:
6009 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6010 break;
6011
Tejun Heo0dc36882007-12-18 16:34:43 -05006012 case ATAPI_PROT_PIO:
6013 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05006014 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6015 break;
6016
Tejun Heo0dc36882007-12-18 16:34:43 -05006017 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05006018 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6019 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6020 break;
6021
6022 default:
6023 WARN_ON(1);
Brian King0feeed82007-03-29 12:43:43 -05006024 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05006025 }
6026
6027 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -08006028
6029 ipr_send_command(ipr_cmd);
6030
Brian King35a39692006-09-25 12:39:20 -05006031 return 0;
6032}
6033
6034/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006035 * ipr_qc_fill_rtf - Read result TF
6036 * @qc: ATA queued command
6037 *
6038 * Return value:
6039 * true
6040 **/
6041static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6042{
6043 struct ipr_sata_port *sata_port = qc->ap->private_data;
6044 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6045 struct ata_taskfile *tf = &qc->result_tf;
6046
6047 tf->feature = g->error;
6048 tf->nsect = g->nsect;
6049 tf->lbal = g->lbal;
6050 tf->lbam = g->lbam;
6051 tf->lbah = g->lbah;
6052 tf->device = g->device;
6053 tf->command = g->status;
6054 tf->hob_nsect = g->hob_nsect;
6055 tf->hob_lbal = g->hob_lbal;
6056 tf->hob_lbam = g->hob_lbam;
6057 tf->hob_lbah = g->hob_lbah;
6058 tf->ctl = g->alt_status;
6059
6060 return true;
6061}
6062
Brian King35a39692006-09-25 12:39:20 -05006063static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05006064 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09006065 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05006066 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05006067 .qc_prep = ata_noop_qc_prep,
6068 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006069 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05006070 .port_start = ata_sas_port_start,
6071 .port_stop = ata_sas_port_stop
6072};
6073
6074static struct ata_port_info sata_port_info = {
6075 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6076 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6077 .pio_mask = 0x10, /* pio4 */
6078 .mwdma_mask = 0x07,
6079 .udma_mask = 0x7f, /* udma0-6 */
6080 .port_ops = &ipr_sata_ops
6081};
6082
Linus Torvalds1da177e2005-04-16 15:20:36 -07006083#ifdef CONFIG_PPC_PSERIES
6084static const u16 ipr_blocked_processors[] = {
6085 PV_NORTHSTAR,
6086 PV_PULSAR,
6087 PV_POWER4,
6088 PV_ICESTAR,
6089 PV_SSTAR,
6090 PV_POWER4p,
6091 PV_630,
6092 PV_630p
6093};
6094
6095/**
6096 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6097 * @ioa_cfg: ioa cfg struct
6098 *
6099 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6100 * certain pSeries hardware. This function determines if the given
6101 * adapter is in one of these confgurations or not.
6102 *
6103 * Return value:
6104 * 1 if adapter is not supported / 0 if adapter is supported
6105 **/
6106static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6107{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006108 int i;
6109
Auke Kok44c10132007-06-08 15:46:36 -07006110 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6111 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6112 if (__is_processor(ipr_blocked_processors[i]))
6113 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006114 }
6115 }
6116 return 0;
6117}
6118#else
6119#define ipr_invalid_adapter(ioa_cfg) 0
6120#endif
6121
6122/**
6123 * ipr_ioa_bringdown_done - IOA bring down completion.
6124 * @ipr_cmd: ipr command struct
6125 *
6126 * This function processes the completion of an adapter bring down.
6127 * It wakes any reset sleepers.
6128 *
6129 * Return value:
6130 * IPR_RC_JOB_RETURN
6131 **/
6132static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6133{
6134 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6135
6136 ENTER;
6137 ioa_cfg->in_reset_reload = 0;
6138 ioa_cfg->reset_retries = 0;
6139 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6140 wake_up_all(&ioa_cfg->reset_wait_q);
6141
6142 spin_unlock_irq(ioa_cfg->host->host_lock);
6143 scsi_unblock_requests(ioa_cfg->host);
6144 spin_lock_irq(ioa_cfg->host->host_lock);
6145 LEAVE;
6146
6147 return IPR_RC_JOB_RETURN;
6148}
6149
6150/**
6151 * ipr_ioa_reset_done - IOA reset completion.
6152 * @ipr_cmd: ipr command struct
6153 *
6154 * This function processes the completion of an adapter reset.
6155 * It schedules any necessary mid-layer add/removes and
6156 * wakes any reset sleepers.
6157 *
6158 * Return value:
6159 * IPR_RC_JOB_RETURN
6160 **/
6161static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6162{
6163 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6164 struct ipr_resource_entry *res;
6165 struct ipr_hostrcb *hostrcb, *temp;
6166 int i = 0;
6167
6168 ENTER;
6169 ioa_cfg->in_reset_reload = 0;
6170 ioa_cfg->allow_cmds = 1;
6171 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06006172 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006173
6174 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6175 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6176 ipr_trace;
6177 break;
6178 }
6179 }
6180 schedule_work(&ioa_cfg->work_q);
6181
6182 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6183 list_del(&hostrcb->queue);
6184 if (i++ < IPR_NUM_LOG_HCAMS)
6185 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6186 else
6187 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6188 }
6189
Brian King6bb04172007-04-26 16:00:08 -05006190 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006191 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6192
6193 ioa_cfg->reset_retries = 0;
6194 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6195 wake_up_all(&ioa_cfg->reset_wait_q);
6196
Mark Nelson30237852008-12-10 12:23:20 +11006197 spin_unlock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006198 scsi_unblock_requests(ioa_cfg->host);
Mark Nelson30237852008-12-10 12:23:20 +11006199 spin_lock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006200
6201 if (!ioa_cfg->allow_cmds)
6202 scsi_block_requests(ioa_cfg->host);
6203
6204 LEAVE;
6205 return IPR_RC_JOB_RETURN;
6206}
6207
6208/**
6209 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6210 * @supported_dev: supported device struct
6211 * @vpids: vendor product id struct
6212 *
6213 * Return value:
6214 * none
6215 **/
6216static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6217 struct ipr_std_inq_vpids *vpids)
6218{
6219 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6220 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6221 supported_dev->num_records = 1;
6222 supported_dev->data_length =
6223 cpu_to_be16(sizeof(struct ipr_supported_device));
6224 supported_dev->reserved = 0;
6225}
6226
6227/**
6228 * ipr_set_supported_devs - Send Set Supported Devices for a device
6229 * @ipr_cmd: ipr command struct
6230 *
Wayne Boyera32c0552010-02-19 13:23:36 -08006231 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07006232 *
6233 * Return value:
6234 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6235 **/
6236static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6237{
6238 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6239 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006240 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6241 struct ipr_resource_entry *res = ipr_cmd->u.res;
6242
6243 ipr_cmd->job_step = ipr_ioa_reset_done;
6244
6245 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06006246 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006247 continue;
6248
6249 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006250 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006251
6252 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6253 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6254 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6255
6256 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006257 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006258 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6259 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6260
Wayne Boyera32c0552010-02-19 13:23:36 -08006261 ipr_init_ioadl(ipr_cmd,
6262 ioa_cfg->vpd_cbs_dma +
6263 offsetof(struct ipr_misc_cbs, supp_dev),
6264 sizeof(struct ipr_supported_device),
6265 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006266
6267 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6268 IPR_SET_SUP_DEVICE_TIMEOUT);
6269
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006270 if (!ioa_cfg->sis64)
6271 ipr_cmd->job_step = ipr_set_supported_devs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006272 return IPR_RC_JOB_RETURN;
6273 }
6274
6275 return IPR_RC_JOB_CONTINUE;
6276}
6277
6278/**
6279 * ipr_get_mode_page - Locate specified mode page
6280 * @mode_pages: mode page buffer
6281 * @page_code: page code to find
6282 * @len: minimum required length for mode page
6283 *
6284 * Return value:
6285 * pointer to mode page / NULL on failure
6286 **/
6287static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6288 u32 page_code, u32 len)
6289{
6290 struct ipr_mode_page_hdr *mode_hdr;
6291 u32 page_length;
6292 u32 length;
6293
6294 if (!mode_pages || (mode_pages->hdr.length == 0))
6295 return NULL;
6296
6297 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6298 mode_hdr = (struct ipr_mode_page_hdr *)
6299 (mode_pages->data + mode_pages->hdr.block_desc_len);
6300
6301 while (length) {
6302 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6303 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6304 return mode_hdr;
6305 break;
6306 } else {
6307 page_length = (sizeof(struct ipr_mode_page_hdr) +
6308 mode_hdr->page_length);
6309 length -= page_length;
6310 mode_hdr = (struct ipr_mode_page_hdr *)
6311 ((unsigned long)mode_hdr + page_length);
6312 }
6313 }
6314 return NULL;
6315}
6316
6317/**
6318 * ipr_check_term_power - Check for term power errors
6319 * @ioa_cfg: ioa config struct
6320 * @mode_pages: IOAFP mode pages buffer
6321 *
6322 * Check the IOAFP's mode page 28 for term power errors
6323 *
6324 * Return value:
6325 * nothing
6326 **/
6327static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6328 struct ipr_mode_pages *mode_pages)
6329{
6330 int i;
6331 int entry_length;
6332 struct ipr_dev_bus_entry *bus;
6333 struct ipr_mode_page28 *mode_page;
6334
6335 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6336 sizeof(struct ipr_mode_page28));
6337
6338 entry_length = mode_page->entry_length;
6339
6340 bus = mode_page->bus;
6341
6342 for (i = 0; i < mode_page->num_entries; i++) {
6343 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6344 dev_err(&ioa_cfg->pdev->dev,
6345 "Term power is absent on scsi bus %d\n",
6346 bus->res_addr.bus);
6347 }
6348
6349 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6350 }
6351}
6352
6353/**
6354 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6355 * @ioa_cfg: ioa config struct
6356 *
6357 * Looks through the config table checking for SES devices. If
6358 * the SES device is in the SES table indicating a maximum SCSI
6359 * bus speed, the speed is limited for the bus.
6360 *
6361 * Return value:
6362 * none
6363 **/
6364static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6365{
6366 u32 max_xfer_rate;
6367 int i;
6368
6369 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6370 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6371 ioa_cfg->bus_attr[i].bus_width);
6372
6373 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6374 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6375 }
6376}
6377
6378/**
6379 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6380 * @ioa_cfg: ioa config struct
6381 * @mode_pages: mode page 28 buffer
6382 *
6383 * Updates mode page 28 based on driver configuration
6384 *
6385 * Return value:
6386 * none
6387 **/
6388static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6389 struct ipr_mode_pages *mode_pages)
6390{
6391 int i, entry_length;
6392 struct ipr_dev_bus_entry *bus;
6393 struct ipr_bus_attributes *bus_attr;
6394 struct ipr_mode_page28 *mode_page;
6395
6396 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6397 sizeof(struct ipr_mode_page28));
6398
6399 entry_length = mode_page->entry_length;
6400
6401 /* Loop for each device bus entry */
6402 for (i = 0, bus = mode_page->bus;
6403 i < mode_page->num_entries;
6404 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6405 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6406 dev_err(&ioa_cfg->pdev->dev,
6407 "Invalid resource address reported: 0x%08X\n",
6408 IPR_GET_PHYS_LOC(bus->res_addr));
6409 continue;
6410 }
6411
6412 bus_attr = &ioa_cfg->bus_attr[i];
6413 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6414 bus->bus_width = bus_attr->bus_width;
6415 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6416 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6417 if (bus_attr->qas_enabled)
6418 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6419 else
6420 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6421 }
6422}
6423
6424/**
6425 * ipr_build_mode_select - Build a mode select command
6426 * @ipr_cmd: ipr command struct
6427 * @res_handle: resource handle to send command to
6428 * @parm: Byte 2 of Mode Sense command
6429 * @dma_addr: DMA buffer address
6430 * @xfer_len: data transfer length
6431 *
6432 * Return value:
6433 * none
6434 **/
6435static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08006436 __be32 res_handle, u8 parm,
6437 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006438{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006439 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6440
6441 ioarcb->res_handle = res_handle;
6442 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6443 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6444 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6445 ioarcb->cmd_pkt.cdb[1] = parm;
6446 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6447
Wayne Boyera32c0552010-02-19 13:23:36 -08006448 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006449}
6450
6451/**
6452 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6453 * @ipr_cmd: ipr command struct
6454 *
6455 * This function sets up the SCSI bus attributes and sends
6456 * a Mode Select for Page 28 to activate them.
6457 *
6458 * Return value:
6459 * IPR_RC_JOB_RETURN
6460 **/
6461static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6462{
6463 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6464 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6465 int length;
6466
6467 ENTER;
Brian King47338042006-02-08 20:57:42 -06006468 ipr_scsi_bus_speed_limit(ioa_cfg);
6469 ipr_check_term_power(ioa_cfg, mode_pages);
6470 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6471 length = mode_pages->hdr.length + 1;
6472 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006473
6474 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6475 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6476 length);
6477
Wayne Boyerf72919e2010-02-19 13:24:21 -08006478 ipr_cmd->job_step = ipr_set_supported_devs;
6479 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6480 struct ipr_resource_entry, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006481 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6482
6483 LEAVE;
6484 return IPR_RC_JOB_RETURN;
6485}
6486
6487/**
6488 * ipr_build_mode_sense - Builds a mode sense command
6489 * @ipr_cmd: ipr command struct
6490 * @res: resource entry struct
6491 * @parm: Byte 2 of mode sense command
6492 * @dma_addr: DMA address of mode sense buffer
6493 * @xfer_len: Size of DMA buffer
6494 *
6495 * Return value:
6496 * none
6497 **/
6498static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6499 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08006500 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006501{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006502 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6503
6504 ioarcb->res_handle = res_handle;
6505 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6506 ioarcb->cmd_pkt.cdb[2] = parm;
6507 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6508 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6509
Wayne Boyera32c0552010-02-19 13:23:36 -08006510 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006511}
6512
6513/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006514 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6515 * @ipr_cmd: ipr command struct
6516 *
6517 * This function handles the failure of an IOA bringup command.
6518 *
6519 * Return value:
6520 * IPR_RC_JOB_RETURN
6521 **/
6522static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6523{
6524 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6525 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6526
6527 dev_err(&ioa_cfg->pdev->dev,
6528 "0x%02X failed with IOASC: 0x%08X\n",
6529 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6530
6531 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6532 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6533 return IPR_RC_JOB_RETURN;
6534}
6535
6536/**
6537 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6538 * @ipr_cmd: ipr command struct
6539 *
6540 * This function handles the failure of a Mode Sense to the IOAFP.
6541 * Some adapters do not handle all mode pages.
6542 *
6543 * Return value:
6544 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6545 **/
6546static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6547{
Wayne Boyerf72919e2010-02-19 13:24:21 -08006548 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006549 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6550
6551 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08006552 ipr_cmd->job_step = ipr_set_supported_devs;
6553 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6554 struct ipr_resource_entry, queue);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006555 return IPR_RC_JOB_CONTINUE;
6556 }
6557
6558 return ipr_reset_cmd_failed(ipr_cmd);
6559}
6560
6561/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006562 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6563 * @ipr_cmd: ipr command struct
6564 *
6565 * This function send a Page 28 mode sense to the IOA to
6566 * retrieve SCSI bus attributes.
6567 *
6568 * Return value:
6569 * IPR_RC_JOB_RETURN
6570 **/
6571static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6572{
6573 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6574
6575 ENTER;
6576 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6577 0x28, ioa_cfg->vpd_cbs_dma +
6578 offsetof(struct ipr_misc_cbs, mode_pages),
6579 sizeof(struct ipr_mode_pages));
6580
6581 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006582 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006583
6584 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6585
6586 LEAVE;
6587 return IPR_RC_JOB_RETURN;
6588}
6589
6590/**
Brian Kingac09c342007-04-26 16:00:16 -05006591 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6592 * @ipr_cmd: ipr command struct
6593 *
6594 * This function enables dual IOA RAID support if possible.
6595 *
6596 * Return value:
6597 * IPR_RC_JOB_RETURN
6598 **/
6599static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6600{
6601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6602 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6603 struct ipr_mode_page24 *mode_page;
6604 int length;
6605
6606 ENTER;
6607 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6608 sizeof(struct ipr_mode_page24));
6609
6610 if (mode_page)
6611 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6612
6613 length = mode_pages->hdr.length + 1;
6614 mode_pages->hdr.length = 0;
6615
6616 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6617 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6618 length);
6619
6620 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6621 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6622
6623 LEAVE;
6624 return IPR_RC_JOB_RETURN;
6625}
6626
6627/**
6628 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6629 * @ipr_cmd: ipr command struct
6630 *
6631 * This function handles the failure of a Mode Sense to the IOAFP.
6632 * Some adapters do not handle all mode pages.
6633 *
6634 * Return value:
6635 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6636 **/
6637static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6638{
6639 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6640
6641 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6642 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6643 return IPR_RC_JOB_CONTINUE;
6644 }
6645
6646 return ipr_reset_cmd_failed(ipr_cmd);
6647}
6648
6649/**
6650 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6651 * @ipr_cmd: ipr command struct
6652 *
6653 * This function send a mode sense to the IOA to retrieve
6654 * the IOA Advanced Function Control mode page.
6655 *
6656 * Return value:
6657 * IPR_RC_JOB_RETURN
6658 **/
6659static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6660{
6661 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6662
6663 ENTER;
6664 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6665 0x24, ioa_cfg->vpd_cbs_dma +
6666 offsetof(struct ipr_misc_cbs, mode_pages),
6667 sizeof(struct ipr_mode_pages));
6668
6669 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6670 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6671
6672 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6673
6674 LEAVE;
6675 return IPR_RC_JOB_RETURN;
6676}
6677
6678/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006679 * ipr_init_res_table - Initialize the resource table
6680 * @ipr_cmd: ipr command struct
6681 *
6682 * This function looks through the existing resource table, comparing
6683 * it with the config table. This function will take care of old/new
6684 * devices and schedule adding/removing them from the mid-layer
6685 * as appropriate.
6686 *
6687 * Return value:
6688 * IPR_RC_JOB_CONTINUE
6689 **/
6690static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6691{
6692 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6693 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006694 struct ipr_config_table_entry_wrapper cfgtew;
6695 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006696 LIST_HEAD(old_res);
6697
6698 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006699 if (ioa_cfg->sis64)
6700 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6701 else
6702 flag = ioa_cfg->u.cfg_table->hdr.flags;
6703
6704 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006705 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6706
6707 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6708 list_move_tail(&res->queue, &old_res);
6709
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006710 if (ioa_cfg->sis64)
Wayne Boyer438b0332010-05-10 09:13:00 -07006711 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006712 else
6713 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6714
6715 for (i = 0; i < entries; i++) {
6716 if (ioa_cfg->sis64)
6717 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6718 else
6719 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006720 found = 0;
6721
6722 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006723 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006724 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6725 found = 1;
6726 break;
6727 }
6728 }
6729
6730 if (!found) {
6731 if (list_empty(&ioa_cfg->free_res_q)) {
6732 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6733 break;
6734 }
6735
6736 found = 1;
6737 res = list_entry(ioa_cfg->free_res_q.next,
6738 struct ipr_resource_entry, queue);
6739 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006740 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006741 res->add_to_ml = 1;
6742 }
6743
6744 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006745 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006746 }
6747
6748 list_for_each_entry_safe(res, temp, &old_res, queue) {
6749 if (res->sdev) {
6750 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006751 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006752 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006753 }
6754 }
6755
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006756 list_for_each_entry_safe(res, temp, &old_res, queue) {
6757 ipr_clear_res_target(res);
6758 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6759 }
6760
Brian Kingac09c342007-04-26 16:00:16 -05006761 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6762 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6763 else
6764 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006765
6766 LEAVE;
6767 return IPR_RC_JOB_CONTINUE;
6768}
6769
6770/**
6771 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6772 * @ipr_cmd: ipr command struct
6773 *
6774 * This function sends a Query IOA Configuration command
6775 * to the adapter to retrieve the IOA configuration table.
6776 *
6777 * Return value:
6778 * IPR_RC_JOB_RETURN
6779 **/
6780static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6781{
6782 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6783 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006784 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05006785 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006786
6787 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05006788 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6789 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006790 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6791 ucode_vpd->major_release, ucode_vpd->card_type,
6792 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6793 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6794 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6795
6796 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer438b0332010-05-10 09:13:00 -07006797 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006798 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6799 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006800
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006801 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08006802 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006803
6804 ipr_cmd->job_step = ipr_init_res_table;
6805
6806 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6807
6808 LEAVE;
6809 return IPR_RC_JOB_RETURN;
6810}
6811
6812/**
6813 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6814 * @ipr_cmd: ipr command struct
6815 *
6816 * This utility function sends an inquiry to the adapter.
6817 *
6818 * Return value:
6819 * none
6820 **/
6821static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08006822 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006823{
6824 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006825
6826 ENTER;
6827 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6828 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6829
6830 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6831 ioarcb->cmd_pkt.cdb[1] = flags;
6832 ioarcb->cmd_pkt.cdb[2] = page;
6833 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6834
Wayne Boyera32c0552010-02-19 13:23:36 -08006835 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006836
6837 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6838 LEAVE;
6839}
6840
6841/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06006842 * ipr_inquiry_page_supported - Is the given inquiry page supported
6843 * @page0: inquiry page 0 buffer
6844 * @page: page code.
6845 *
6846 * This function determines if the specified inquiry page is supported.
6847 *
6848 * Return value:
6849 * 1 if page is supported / 0 if not
6850 **/
6851static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6852{
6853 int i;
6854
6855 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6856 if (page0->page[i] == page)
6857 return 1;
6858
6859 return 0;
6860}
6861
6862/**
Brian Kingac09c342007-04-26 16:00:16 -05006863 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6864 * @ipr_cmd: ipr command struct
6865 *
6866 * This function sends a Page 0xD0 inquiry to the adapter
6867 * to retrieve adapter capabilities.
6868 *
6869 * Return value:
6870 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6871 **/
6872static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6873{
6874 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6875 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6876 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6877
6878 ENTER;
6879 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6880 memset(cap, 0, sizeof(*cap));
6881
6882 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6883 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6884 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6885 sizeof(struct ipr_inquiry_cap));
6886 return IPR_RC_JOB_RETURN;
6887 }
6888
6889 LEAVE;
6890 return IPR_RC_JOB_CONTINUE;
6891}
6892
6893/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006894 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6895 * @ipr_cmd: ipr command struct
6896 *
6897 * This function sends a Page 3 inquiry to the adapter
6898 * to retrieve software VPD information.
6899 *
6900 * Return value:
6901 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6902 **/
6903static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6904{
6905 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006906
6907 ENTER;
6908
Brian Kingac09c342007-04-26 16:00:16 -05006909 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006910
6911 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6912 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6913 sizeof(struct ipr_inquiry_page3));
6914
6915 LEAVE;
6916 return IPR_RC_JOB_RETURN;
6917}
6918
6919/**
6920 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6921 * @ipr_cmd: ipr command struct
6922 *
6923 * This function sends a Page 0 inquiry to the adapter
6924 * to retrieve supported inquiry pages.
6925 *
6926 * Return value:
6927 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6928 **/
6929static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6930{
6931 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006932 char type[5];
6933
6934 ENTER;
6935
6936 /* Grab the type out of the VPD and store it away */
6937 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6938 type[4] = '\0';
6939 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6940
brking@us.ibm.com62275042005-11-01 17:01:14 -06006941 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006942
brking@us.ibm.com62275042005-11-01 17:01:14 -06006943 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6944 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6945 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006946
6947 LEAVE;
6948 return IPR_RC_JOB_RETURN;
6949}
6950
6951/**
6952 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6953 * @ipr_cmd: ipr command struct
6954 *
6955 * This function sends a standard inquiry to the adapter.
6956 *
6957 * Return value:
6958 * IPR_RC_JOB_RETURN
6959 **/
6960static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6961{
6962 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6963
6964 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006965 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006966
6967 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6968 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6969 sizeof(struct ipr_ioa_vpd));
6970
6971 LEAVE;
6972 return IPR_RC_JOB_RETURN;
6973}
6974
6975/**
Wayne Boyer214777b2010-02-19 13:24:26 -08006976 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006977 * @ipr_cmd: ipr command struct
6978 *
6979 * This function send an Identify Host Request Response Queue
6980 * command to establish the HRRQ with the adapter.
6981 *
6982 * Return value:
6983 * IPR_RC_JOB_RETURN
6984 **/
Wayne Boyer214777b2010-02-19 13:24:26 -08006985static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006986{
6987 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6988 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6989
6990 ENTER;
6991 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6992
6993 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6994 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6995
6996 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
Wayne Boyer214777b2010-02-19 13:24:26 -08006997 if (ioa_cfg->sis64)
6998 ioarcb->cmd_pkt.cdb[1] = 0x1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006999 ioarcb->cmd_pkt.cdb[2] =
Wayne Boyer214777b2010-02-19 13:24:26 -08007000 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007001 ioarcb->cmd_pkt.cdb[3] =
Wayne Boyer214777b2010-02-19 13:24:26 -08007002 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007003 ioarcb->cmd_pkt.cdb[4] =
Wayne Boyer214777b2010-02-19 13:24:26 -08007004 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007005 ioarcb->cmd_pkt.cdb[5] =
Wayne Boyer214777b2010-02-19 13:24:26 -08007006 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007007 ioarcb->cmd_pkt.cdb[7] =
7008 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7009 ioarcb->cmd_pkt.cdb[8] =
7010 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7011
Wayne Boyer214777b2010-02-19 13:24:26 -08007012 if (ioa_cfg->sis64) {
7013 ioarcb->cmd_pkt.cdb[10] =
7014 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7015 ioarcb->cmd_pkt.cdb[11] =
7016 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7017 ioarcb->cmd_pkt.cdb[12] =
7018 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7019 ioarcb->cmd_pkt.cdb[13] =
7020 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7021 }
7022
Linus Torvalds1da177e2005-04-16 15:20:36 -07007023 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7024
7025 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7026
7027 LEAVE;
7028 return IPR_RC_JOB_RETURN;
7029}
7030
7031/**
7032 * ipr_reset_timer_done - Adapter reset timer function
7033 * @ipr_cmd: ipr command struct
7034 *
7035 * Description: This function is used in adapter reset processing
7036 * for timing events. If the reset_cmd pointer in the IOA
7037 * config struct is not this adapter's we are doing nested
7038 * resets and fail_all_ops will take care of freeing the
7039 * command block.
7040 *
7041 * Return value:
7042 * none
7043 **/
7044static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7045{
7046 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7047 unsigned long lock_flags = 0;
7048
7049 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7050
7051 if (ioa_cfg->reset_cmd == ipr_cmd) {
7052 list_del(&ipr_cmd->queue);
7053 ipr_cmd->done(ipr_cmd);
7054 }
7055
7056 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7057}
7058
7059/**
7060 * ipr_reset_start_timer - Start a timer for adapter reset job
7061 * @ipr_cmd: ipr command struct
7062 * @timeout: timeout value
7063 *
7064 * Description: This function is used in adapter reset processing
7065 * for timing events. If the reset_cmd pointer in the IOA
7066 * config struct is not this adapter's we are doing nested
7067 * resets and fail_all_ops will take care of freeing the
7068 * command block.
7069 *
7070 * Return value:
7071 * none
7072 **/
7073static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7074 unsigned long timeout)
7075{
7076 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7077 ipr_cmd->done = ipr_reset_ioa_job;
7078
7079 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7080 ipr_cmd->timer.expires = jiffies + timeout;
7081 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7082 add_timer(&ipr_cmd->timer);
7083}
7084
7085/**
7086 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7087 * @ioa_cfg: ioa cfg struct
7088 *
7089 * Return value:
7090 * nothing
7091 **/
7092static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7093{
7094 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7095
7096 /* Initialize Host RRQ pointers */
7097 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7098 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7099 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7100 ioa_cfg->toggle_bit = 1;
7101
7102 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007103 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007104}
7105
7106/**
Wayne Boyer214777b2010-02-19 13:24:26 -08007107 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7108 * @ipr_cmd: ipr command struct
7109 *
7110 * Return value:
7111 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7112 **/
7113static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7114{
7115 unsigned long stage, stage_time;
7116 u32 feedback;
7117 volatile u32 int_reg;
7118 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7119 u64 maskval = 0;
7120
7121 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7122 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7123 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7124
7125 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7126
7127 /* sanity check the stage_time value */
Wayne Boyer438b0332010-05-10 09:13:00 -07007128 if (stage_time == 0)
7129 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7130 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
Wayne Boyer214777b2010-02-19 13:24:26 -08007131 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7132 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7133 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7134
7135 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7136 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7137 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7138 stage_time = ioa_cfg->transop_timeout;
7139 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7140 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7141 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7142 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7143 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7144 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7145 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7146 return IPR_RC_JOB_CONTINUE;
7147 }
7148
7149 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7150 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7151 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7152 ipr_cmd->done = ipr_reset_ioa_job;
7153 add_timer(&ipr_cmd->timer);
7154 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7155
7156 return IPR_RC_JOB_RETURN;
7157}
7158
7159/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007160 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7161 * @ipr_cmd: ipr command struct
7162 *
7163 * This function reinitializes some control blocks and
7164 * enables destructive diagnostics on the adapter.
7165 *
7166 * Return value:
7167 * IPR_RC_JOB_RETURN
7168 **/
7169static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7170{
7171 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7172 volatile u32 int_reg;
7173
7174 ENTER;
Wayne Boyer214777b2010-02-19 13:24:26 -08007175 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007176 ipr_init_ioa_mem(ioa_cfg);
7177
7178 ioa_cfg->allow_interrupts = 1;
7179 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7180
7181 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7182 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
Wayne Boyer214777b2010-02-19 13:24:26 -08007183 ioa_cfg->regs.clr_interrupt_mask_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007184 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7185 return IPR_RC_JOB_CONTINUE;
7186 }
7187
7188 /* Enable destructive diagnostics on IOA */
Wayne Boyer214777b2010-02-19 13:24:26 -08007189 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007190
Wayne Boyer214777b2010-02-19 13:24:26 -08007191 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7192 if (ioa_cfg->sis64)
7193 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
7194
Linus Torvalds1da177e2005-04-16 15:20:36 -07007195 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7196
7197 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7198
Wayne Boyer214777b2010-02-19 13:24:26 -08007199 if (ioa_cfg->sis64) {
7200 ipr_cmd->job_step = ipr_reset_next_stage;
7201 return IPR_RC_JOB_CONTINUE;
7202 }
7203
Linus Torvalds1da177e2005-04-16 15:20:36 -07007204 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
Brian King5469cb52007-03-29 12:42:40 -05007205 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007206 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7207 ipr_cmd->done = ipr_reset_ioa_job;
7208 add_timer(&ipr_cmd->timer);
7209 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7210
7211 LEAVE;
7212 return IPR_RC_JOB_RETURN;
7213}
7214
7215/**
7216 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7217 * @ipr_cmd: ipr command struct
7218 *
7219 * This function is invoked when an adapter dump has run out
7220 * of processing time.
7221 *
7222 * Return value:
7223 * IPR_RC_JOB_CONTINUE
7224 **/
7225static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7226{
7227 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7228
7229 if (ioa_cfg->sdt_state == GET_DUMP)
7230 ioa_cfg->sdt_state = ABORT_DUMP;
7231
7232 ipr_cmd->job_step = ipr_reset_alert;
7233
7234 return IPR_RC_JOB_CONTINUE;
7235}
7236
7237/**
7238 * ipr_unit_check_no_data - Log a unit check/no data error log
7239 * @ioa_cfg: ioa config struct
7240 *
7241 * Logs an error indicating the adapter unit checked, but for some
7242 * reason, we were unable to fetch the unit check buffer.
7243 *
7244 * Return value:
7245 * nothing
7246 **/
7247static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7248{
7249 ioa_cfg->errors_logged++;
7250 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7251}
7252
7253/**
7254 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7255 * @ioa_cfg: ioa config struct
7256 *
7257 * Fetches the unit check buffer from the adapter by clocking the data
7258 * through the mailbox register.
7259 *
7260 * Return value:
7261 * nothing
7262 **/
7263static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7264{
7265 unsigned long mailbox;
7266 struct ipr_hostrcb *hostrcb;
7267 struct ipr_uc_sdt sdt;
7268 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05007269 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007270
7271 mailbox = readl(ioa_cfg->ioa_mailbox);
7272
Wayne Boyerdcbad002010-02-19 13:24:14 -08007273 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007274 ipr_unit_check_no_data(ioa_cfg);
7275 return;
7276 }
7277
7278 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7279 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7280 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7281
Wayne Boyerdcbad002010-02-19 13:24:14 -08007282 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7283 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7284 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007285 ipr_unit_check_no_data(ioa_cfg);
7286 return;
7287 }
7288
7289 /* Find length of the first sdt entry (UC buffer) */
Wayne Boyerdcbad002010-02-19 13:24:14 -08007290 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7291 length = be32_to_cpu(sdt.entry[0].end_token);
7292 else
7293 length = (be32_to_cpu(sdt.entry[0].end_token) -
7294 be32_to_cpu(sdt.entry[0].start_token)) &
7295 IPR_FMT2_MBX_ADDR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007296
7297 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7298 struct ipr_hostrcb, queue);
7299 list_del(&hostrcb->queue);
7300 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7301
7302 rc = ipr_get_ldump_data_section(ioa_cfg,
Wayne Boyerdcbad002010-02-19 13:24:14 -08007303 be32_to_cpu(sdt.entry[0].start_token),
Linus Torvalds1da177e2005-04-16 15:20:36 -07007304 (__be32 *)&hostrcb->hcam,
7305 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7306
Brian King65f56472007-04-26 16:00:12 -05007307 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007308 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08007309 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05007310 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7311 ioa_cfg->sdt_state == GET_DUMP)
7312 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7313 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07007314 ipr_unit_check_no_data(ioa_cfg);
7315
7316 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7317}
7318
7319/**
7320 * ipr_reset_restore_cfg_space - Restore PCI config space.
7321 * @ipr_cmd: ipr command struct
7322 *
7323 * Description: This function restores the saved PCI config space of
7324 * the adapter, fails all outstanding ops back to the callers, and
7325 * fetches the dump/unit check if applicable to this reset.
7326 *
7327 * Return value:
7328 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7329 **/
7330static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7331{
7332 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7333 int rc;
7334
7335 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02007336 ioa_cfg->pdev->state_saved = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007337 rc = pci_restore_state(ioa_cfg->pdev);
7338
7339 if (rc != PCIBIOS_SUCCESSFUL) {
7340 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7341 return IPR_RC_JOB_CONTINUE;
7342 }
7343
7344 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7345 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7346 return IPR_RC_JOB_CONTINUE;
7347 }
7348
7349 ipr_fail_all_ops(ioa_cfg);
7350
7351 if (ioa_cfg->ioa_unit_checked) {
7352 ioa_cfg->ioa_unit_checked = 0;
7353 ipr_get_unit_check_buffer(ioa_cfg);
7354 ipr_cmd->job_step = ipr_reset_alert;
7355 ipr_reset_start_timer(ipr_cmd, 0);
7356 return IPR_RC_JOB_RETURN;
7357 }
7358
7359 if (ioa_cfg->in_ioa_bringdown) {
7360 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7361 } else {
7362 ipr_cmd->job_step = ipr_reset_enable_ioa;
7363
7364 if (GET_DUMP == ioa_cfg->sdt_state) {
7365 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7366 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7367 schedule_work(&ioa_cfg->work_q);
7368 return IPR_RC_JOB_RETURN;
7369 }
7370 }
7371
Wayne Boyer438b0332010-05-10 09:13:00 -07007372 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007373 return IPR_RC_JOB_CONTINUE;
7374}
7375
7376/**
Brian Kinge619e1a2007-01-23 11:25:37 -06007377 * ipr_reset_bist_done - BIST has completed on the adapter.
7378 * @ipr_cmd: ipr command struct
7379 *
7380 * Description: Unblock config space and resume the reset process.
7381 *
7382 * Return value:
7383 * IPR_RC_JOB_CONTINUE
7384 **/
7385static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7386{
7387 ENTER;
7388 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7389 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7390 LEAVE;
7391 return IPR_RC_JOB_CONTINUE;
7392}
7393
7394/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007395 * ipr_reset_start_bist - Run BIST on the adapter.
7396 * @ipr_cmd: ipr command struct
7397 *
7398 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7399 *
7400 * Return value:
7401 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7402 **/
7403static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7404{
7405 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7406 int rc;
7407
7408 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07007409 pci_block_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007410 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7411
7412 if (rc != PCIBIOS_SUCCESSFUL) {
Brian Kinga9aedb02007-03-29 12:43:23 -05007413 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007414 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7415 rc = IPR_RC_JOB_CONTINUE;
7416 } else {
Brian Kinge619e1a2007-01-23 11:25:37 -06007417 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007418 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7419 rc = IPR_RC_JOB_RETURN;
7420 }
7421
7422 LEAVE;
7423 return rc;
7424}
7425
7426/**
Brian King463fc692007-05-07 17:09:05 -05007427 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7428 * @ipr_cmd: ipr command struct
7429 *
7430 * Description: This clears PCI reset to the adapter and delays two seconds.
7431 *
7432 * Return value:
7433 * IPR_RC_JOB_RETURN
7434 **/
7435static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7436{
7437 ENTER;
7438 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7439 ipr_cmd->job_step = ipr_reset_bist_done;
7440 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7441 LEAVE;
7442 return IPR_RC_JOB_RETURN;
7443}
7444
7445/**
7446 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7447 * @ipr_cmd: ipr command struct
7448 *
7449 * Description: This asserts PCI reset to the adapter.
7450 *
7451 * Return value:
7452 * IPR_RC_JOB_RETURN
7453 **/
7454static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7455{
7456 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7457 struct pci_dev *pdev = ioa_cfg->pdev;
7458
7459 ENTER;
7460 pci_block_user_cfg_access(pdev);
7461 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7462 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7463 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7464 LEAVE;
7465 return IPR_RC_JOB_RETURN;
7466}
7467
7468/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007469 * ipr_reset_allowed - Query whether or not IOA can be reset
7470 * @ioa_cfg: ioa config struct
7471 *
7472 * Return value:
7473 * 0 if reset not allowed / non-zero if reset is allowed
7474 **/
7475static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7476{
7477 volatile u32 temp_reg;
7478
7479 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7480 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7481}
7482
7483/**
7484 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7485 * @ipr_cmd: ipr command struct
7486 *
7487 * Description: This function waits for adapter permission to run BIST,
7488 * then runs BIST. If the adapter does not give permission after a
7489 * reasonable time, we will reset the adapter anyway. The impact of
7490 * resetting the adapter without warning the adapter is the risk of
7491 * losing the persistent error log on the adapter. If the adapter is
7492 * reset while it is writing to the flash on the adapter, the flash
7493 * segment will have bad ECC and be zeroed.
7494 *
7495 * Return value:
7496 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7497 **/
7498static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7499{
7500 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7501 int rc = IPR_RC_JOB_RETURN;
7502
7503 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7504 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7505 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7506 } else {
Brian King463fc692007-05-07 17:09:05 -05007507 ipr_cmd->job_step = ioa_cfg->reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007508 rc = IPR_RC_JOB_CONTINUE;
7509 }
7510
7511 return rc;
7512}
7513
7514/**
7515 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7516 * @ipr_cmd: ipr command struct
7517 *
7518 * Description: This function alerts the adapter that it will be reset.
7519 * If memory space is not currently enabled, proceed directly
7520 * to running BIST on the adapter. The timer must always be started
7521 * so we guarantee we do not run BIST from ipr_isr.
7522 *
7523 * Return value:
7524 * IPR_RC_JOB_RETURN
7525 **/
7526static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7527{
7528 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7529 u16 cmd_reg;
7530 int rc;
7531
7532 ENTER;
7533 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7534
7535 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7536 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
Wayne Boyer214777b2010-02-19 13:24:26 -08007537 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007538 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7539 } else {
Brian King463fc692007-05-07 17:09:05 -05007540 ipr_cmd->job_step = ioa_cfg->reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007541 }
7542
7543 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7544 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7545
7546 LEAVE;
7547 return IPR_RC_JOB_RETURN;
7548}
7549
7550/**
7551 * ipr_reset_ucode_download_done - Microcode download completion
7552 * @ipr_cmd: ipr command struct
7553 *
7554 * Description: This function unmaps the microcode download buffer.
7555 *
7556 * Return value:
7557 * IPR_RC_JOB_CONTINUE
7558 **/
7559static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7560{
7561 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7562 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7563
7564 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7565 sglist->num_sg, DMA_TO_DEVICE);
7566
7567 ipr_cmd->job_step = ipr_reset_alert;
7568 return IPR_RC_JOB_CONTINUE;
7569}
7570
7571/**
7572 * ipr_reset_ucode_download - Download microcode to the adapter
7573 * @ipr_cmd: ipr command struct
7574 *
7575 * Description: This function checks to see if it there is microcode
7576 * to download to the adapter. If there is, a download is performed.
7577 *
7578 * Return value:
7579 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7580 **/
7581static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7582{
7583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7584 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7585
7586 ENTER;
7587 ipr_cmd->job_step = ipr_reset_alert;
7588
7589 if (!sglist)
7590 return IPR_RC_JOB_CONTINUE;
7591
7592 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7593 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7594 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7595 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7596 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7597 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7598 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7599
Wayne Boyera32c0552010-02-19 13:23:36 -08007600 if (ioa_cfg->sis64)
7601 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7602 else
7603 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007604 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7605
7606 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7607 IPR_WRITE_BUFFER_TIMEOUT);
7608
7609 LEAVE;
7610 return IPR_RC_JOB_RETURN;
7611}
7612
7613/**
7614 * ipr_reset_shutdown_ioa - Shutdown the adapter
7615 * @ipr_cmd: ipr command struct
7616 *
7617 * Description: This function issues an adapter shutdown of the
7618 * specified type to the specified adapter as part of the
7619 * adapter reset job.
7620 *
7621 * Return value:
7622 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7623 **/
7624static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7625{
7626 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7627 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7628 unsigned long timeout;
7629 int rc = IPR_RC_JOB_CONTINUE;
7630
7631 ENTER;
7632 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7633 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7634 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7635 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7636 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7637
Brian Kingac09c342007-04-26 16:00:16 -05007638 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7639 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007640 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7641 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05007642 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7643 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007644 else
Brian Kingac09c342007-04-26 16:00:16 -05007645 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007646
7647 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7648
7649 rc = IPR_RC_JOB_RETURN;
7650 ipr_cmd->job_step = ipr_reset_ucode_download;
7651 } else
7652 ipr_cmd->job_step = ipr_reset_alert;
7653
7654 LEAVE;
7655 return rc;
7656}
7657
7658/**
7659 * ipr_reset_ioa_job - Adapter reset job
7660 * @ipr_cmd: ipr command struct
7661 *
7662 * Description: This function is the job router for the adapter reset job.
7663 *
7664 * Return value:
7665 * none
7666 **/
7667static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7668{
7669 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7671
7672 do {
7673 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
7674
7675 if (ioa_cfg->reset_cmd != ipr_cmd) {
7676 /*
7677 * We are doing nested adapter resets and this is
7678 * not the current reset job.
7679 */
7680 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7681 return;
7682 }
7683
7684 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007685 rc = ipr_cmd->job_step_failed(ipr_cmd);
7686 if (rc == IPR_RC_JOB_RETURN)
7687 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007688 }
7689
7690 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007691 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007692 rc = ipr_cmd->job_step(ipr_cmd);
7693 } while(rc == IPR_RC_JOB_CONTINUE);
7694}
7695
7696/**
7697 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7698 * @ioa_cfg: ioa config struct
7699 * @job_step: first job step of reset job
7700 * @shutdown_type: shutdown type
7701 *
7702 * Description: This function will initiate the reset of the given adapter
7703 * starting at the selected job step.
7704 * If the caller needs to wait on the completion of the reset,
7705 * the caller must sleep on the reset_wait_q.
7706 *
7707 * Return value:
7708 * none
7709 **/
7710static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7711 int (*job_step) (struct ipr_cmnd *),
7712 enum ipr_shutdown_type shutdown_type)
7713{
7714 struct ipr_cmnd *ipr_cmd;
7715
7716 ioa_cfg->in_reset_reload = 1;
7717 ioa_cfg->allow_cmds = 0;
7718 scsi_block_requests(ioa_cfg->host);
7719
7720 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7721 ioa_cfg->reset_cmd = ipr_cmd;
7722 ipr_cmd->job_step = job_step;
7723 ipr_cmd->u.shutdown_type = shutdown_type;
7724
7725 ipr_reset_ioa_job(ipr_cmd);
7726}
7727
7728/**
7729 * ipr_initiate_ioa_reset - Initiate an adapter reset
7730 * @ioa_cfg: ioa config struct
7731 * @shutdown_type: shutdown type
7732 *
7733 * Description: This function will initiate the reset of the given adapter.
7734 * If the caller needs to wait on the completion of the reset,
7735 * the caller must sleep on the reset_wait_q.
7736 *
7737 * Return value:
7738 * none
7739 **/
7740static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7741 enum ipr_shutdown_type shutdown_type)
7742{
7743 if (ioa_cfg->ioa_is_dead)
7744 return;
7745
7746 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7747 ioa_cfg->sdt_state = ABORT_DUMP;
7748
7749 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7750 dev_err(&ioa_cfg->pdev->dev,
7751 "IOA taken offline - error recovery failed\n");
7752
7753 ioa_cfg->reset_retries = 0;
7754 ioa_cfg->ioa_is_dead = 1;
7755
7756 if (ioa_cfg->in_ioa_bringdown) {
7757 ioa_cfg->reset_cmd = NULL;
7758 ioa_cfg->in_reset_reload = 0;
7759 ipr_fail_all_ops(ioa_cfg);
7760 wake_up_all(&ioa_cfg->reset_wait_q);
7761
7762 spin_unlock_irq(ioa_cfg->host->host_lock);
7763 scsi_unblock_requests(ioa_cfg->host);
7764 spin_lock_irq(ioa_cfg->host->host_lock);
7765 return;
7766 } else {
7767 ioa_cfg->in_ioa_bringdown = 1;
7768 shutdown_type = IPR_SHUTDOWN_NONE;
7769 }
7770 }
7771
7772 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7773 shutdown_type);
7774}
7775
7776/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007777 * ipr_reset_freeze - Hold off all I/O activity
7778 * @ipr_cmd: ipr command struct
7779 *
7780 * Description: If the PCI slot is frozen, hold off all I/O
7781 * activity; then, as soon as the slot is available again,
7782 * initiate an adapter reset.
7783 */
7784static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7785{
7786 /* Disallow new interrupts, avoid loop */
7787 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7788 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7789 ipr_cmd->done = ipr_reset_ioa_job;
7790 return IPR_RC_JOB_RETURN;
7791}
7792
7793/**
7794 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7795 * @pdev: PCI device struct
7796 *
7797 * Description: This routine is called to tell us that the PCI bus
7798 * is down. Can't do anything here, except put the device driver
7799 * into a holding pattern, waiting for the PCI bus to come back.
7800 */
7801static void ipr_pci_frozen(struct pci_dev *pdev)
7802{
7803 unsigned long flags = 0;
7804 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7805
7806 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7807 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7808 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7809}
7810
7811/**
7812 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7813 * @pdev: PCI device struct
7814 *
7815 * Description: This routine is called by the pci error recovery
7816 * code after the PCI slot has been reset, just before we
7817 * should resume normal operations.
7818 */
7819static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7820{
7821 unsigned long flags = 0;
7822 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7823
7824 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King463fc692007-05-07 17:09:05 -05007825 if (ioa_cfg->needs_warm_reset)
7826 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7827 else
7828 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7829 IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007830 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7831 return PCI_ERS_RESULT_RECOVERED;
7832}
7833
7834/**
7835 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7836 * @pdev: PCI device struct
7837 *
7838 * Description: This routine is called when the PCI bus has
7839 * permanently failed.
7840 */
7841static void ipr_pci_perm_failure(struct pci_dev *pdev)
7842{
7843 unsigned long flags = 0;
7844 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7845
7846 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7847 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7848 ioa_cfg->sdt_state = ABORT_DUMP;
7849 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7850 ioa_cfg->in_ioa_bringdown = 1;
Kleber S. Souza6ff63892009-05-04 10:41:02 -03007851 ioa_cfg->allow_cmds = 0;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007852 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7853 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7854}
7855
7856/**
7857 * ipr_pci_error_detected - Called when a PCI error is detected.
7858 * @pdev: PCI device struct
7859 * @state: PCI channel state
7860 *
7861 * Description: Called when a PCI error is detected.
7862 *
7863 * Return value:
7864 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7865 */
7866static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7867 pci_channel_state_t state)
7868{
7869 switch (state) {
7870 case pci_channel_io_frozen:
7871 ipr_pci_frozen(pdev);
7872 return PCI_ERS_RESULT_NEED_RESET;
7873 case pci_channel_io_perm_failure:
7874 ipr_pci_perm_failure(pdev);
7875 return PCI_ERS_RESULT_DISCONNECT;
7876 break;
7877 default:
7878 break;
7879 }
7880 return PCI_ERS_RESULT_NEED_RESET;
7881}
7882
7883/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007884 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7885 * @ioa_cfg: ioa cfg struct
7886 *
7887 * Description: This is the second phase of adapter intialization
7888 * This function takes care of initilizing the adapter to the point
7889 * where it can accept new commands.
7890
7891 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02007892 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07007893 **/
7894static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7895{
7896 int rc = 0;
7897 unsigned long host_lock_flags = 0;
7898
7899 ENTER;
7900 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7901 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06007902 if (ioa_cfg->needs_hard_reset) {
7903 ioa_cfg->needs_hard_reset = 0;
7904 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7905 } else
7906 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7907 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007908
7909 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7910 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7911 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7912
7913 if (ioa_cfg->ioa_is_dead) {
7914 rc = -EIO;
7915 } else if (ipr_invalid_adapter(ioa_cfg)) {
7916 if (!ipr_testmode)
7917 rc = -EIO;
7918
7919 dev_err(&ioa_cfg->pdev->dev,
7920 "Adapter not supported in this hardware configuration.\n");
7921 }
7922
7923 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7924
7925 LEAVE;
7926 return rc;
7927}
7928
7929/**
7930 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7931 * @ioa_cfg: ioa config struct
7932 *
7933 * Return value:
7934 * none
7935 **/
7936static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7937{
7938 int i;
7939
7940 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7941 if (ioa_cfg->ipr_cmnd_list[i])
7942 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7943 ioa_cfg->ipr_cmnd_list[i],
7944 ioa_cfg->ipr_cmnd_list_dma[i]);
7945
7946 ioa_cfg->ipr_cmnd_list[i] = NULL;
7947 }
7948
7949 if (ioa_cfg->ipr_cmd_pool)
7950 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7951
7952 ioa_cfg->ipr_cmd_pool = NULL;
7953}
7954
7955/**
7956 * ipr_free_mem - Frees memory allocated for an adapter
7957 * @ioa_cfg: ioa cfg struct
7958 *
7959 * Return value:
7960 * nothing
7961 **/
7962static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7963{
7964 int i;
7965
7966 kfree(ioa_cfg->res_entries);
7967 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7968 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7969 ipr_free_cmd_blks(ioa_cfg);
7970 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7971 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007972 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7973 ioa_cfg->u.cfg_table,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007974 ioa_cfg->cfg_table_dma);
7975
7976 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7977 pci_free_consistent(ioa_cfg->pdev,
7978 sizeof(struct ipr_hostrcb),
7979 ioa_cfg->hostrcb[i],
7980 ioa_cfg->hostrcb_dma[i]);
7981 }
7982
7983 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007984 kfree(ioa_cfg->trace);
7985}
7986
7987/**
7988 * ipr_free_all_resources - Free all allocated resources for an adapter.
7989 * @ipr_cmd: ipr command struct
7990 *
7991 * This function frees all allocated resources for the
7992 * specified adapter.
7993 *
7994 * Return value:
7995 * none
7996 **/
7997static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7998{
7999 struct pci_dev *pdev = ioa_cfg->pdev;
8000
8001 ENTER;
8002 free_irq(pdev->irq, ioa_cfg);
Wayne Boyer5a9ef252009-01-23 09:17:35 -08008003 pci_disable_msi(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008004 iounmap(ioa_cfg->hdw_dma_regs);
8005 pci_release_regions(pdev);
8006 ipr_free_mem(ioa_cfg);
8007 scsi_host_put(ioa_cfg->host);
8008 pci_disable_device(pdev);
8009 LEAVE;
8010}
8011
8012/**
8013 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8014 * @ioa_cfg: ioa config struct
8015 *
8016 * Return value:
8017 * 0 on success / -ENOMEM on allocation failure
8018 **/
8019static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8020{
8021 struct ipr_cmnd *ipr_cmd;
8022 struct ipr_ioarcb *ioarcb;
8023 dma_addr_t dma_addr;
8024 int i;
8025
8026 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
Wayne Boyera32c0552010-02-19 13:23:36 -08008027 sizeof(struct ipr_cmnd), 16, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008028
8029 if (!ioa_cfg->ipr_cmd_pool)
8030 return -ENOMEM;
8031
8032 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Christoph Lametere94b1762006-12-06 20:33:17 -08008033 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008034
8035 if (!ipr_cmd) {
8036 ipr_free_cmd_blks(ioa_cfg);
8037 return -ENOMEM;
8038 }
8039
8040 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8041 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8042 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8043
8044 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08008045 ipr_cmd->dma_addr = dma_addr;
8046 if (ioa_cfg->sis64)
8047 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8048 else
8049 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8050
Linus Torvalds1da177e2005-04-16 15:20:36 -07008051 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08008052 if (ioa_cfg->sis64) {
8053 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8054 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8055 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8056 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8057 } else {
8058 ioarcb->write_ioadl_addr =
8059 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8060 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8061 ioarcb->ioasa_host_pci_addr =
8062 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8063 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008064 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8065 ipr_cmd->cmd_index = i;
8066 ipr_cmd->ioa_cfg = ioa_cfg;
8067 ipr_cmd->sense_buffer_dma = dma_addr +
8068 offsetof(struct ipr_cmnd, sense_buffer);
8069
8070 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8071 }
8072
8073 return 0;
8074}
8075
8076/**
8077 * ipr_alloc_mem - Allocate memory for an adapter
8078 * @ioa_cfg: ioa config struct
8079 *
8080 * Return value:
8081 * 0 on success / non-zero for error
8082 **/
8083static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8084{
8085 struct pci_dev *pdev = ioa_cfg->pdev;
8086 int i, rc = -ENOMEM;
8087
8088 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06008089 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008090 ioa_cfg->max_devs_supported, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008091
8092 if (!ioa_cfg->res_entries)
8093 goto out;
8094
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008095 if (ioa_cfg->sis64) {
8096 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8097 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8098 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8099 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8100 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8101 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8102 }
8103
8104 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008105 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008106 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8107 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008108
8109 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8110 sizeof(struct ipr_misc_cbs),
8111 &ioa_cfg->vpd_cbs_dma);
8112
8113 if (!ioa_cfg->vpd_cbs)
8114 goto out_free_res_entries;
8115
8116 if (ipr_alloc_cmd_blks(ioa_cfg))
8117 goto out_free_vpd_cbs;
8118
8119 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8120 sizeof(u32) * IPR_NUM_CMD_BLKS,
8121 &ioa_cfg->host_rrq_dma);
8122
8123 if (!ioa_cfg->host_rrq)
8124 goto out_ipr_free_cmd_blocks;
8125
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008126 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8127 ioa_cfg->cfg_table_size,
8128 &ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008129
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008130 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008131 goto out_free_host_rrq;
8132
8133 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8134 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8135 sizeof(struct ipr_hostrcb),
8136 &ioa_cfg->hostrcb_dma[i]);
8137
8138 if (!ioa_cfg->hostrcb[i])
8139 goto out_free_hostrcb_dma;
8140
8141 ioa_cfg->hostrcb[i]->hostrcb_dma =
8142 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06008143 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008144 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8145 }
8146
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06008147 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008148 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8149
8150 if (!ioa_cfg->trace)
8151 goto out_free_hostrcb_dma;
8152
Linus Torvalds1da177e2005-04-16 15:20:36 -07008153 rc = 0;
8154out:
8155 LEAVE;
8156 return rc;
8157
8158out_free_hostrcb_dma:
8159 while (i-- > 0) {
8160 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8161 ioa_cfg->hostrcb[i],
8162 ioa_cfg->hostrcb_dma[i]);
8163 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008164 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8165 ioa_cfg->u.cfg_table,
8166 ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008167out_free_host_rrq:
8168 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8169 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8170out_ipr_free_cmd_blocks:
8171 ipr_free_cmd_blks(ioa_cfg);
8172out_free_vpd_cbs:
8173 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8174 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8175out_free_res_entries:
8176 kfree(ioa_cfg->res_entries);
8177 goto out;
8178}
8179
8180/**
8181 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8182 * @ioa_cfg: ioa config struct
8183 *
8184 * Return value:
8185 * none
8186 **/
8187static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8188{
8189 int i;
8190
8191 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8192 ioa_cfg->bus_attr[i].bus = i;
8193 ioa_cfg->bus_attr[i].qas_enabled = 0;
8194 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8195 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8196 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8197 else
8198 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8199 }
8200}
8201
8202/**
8203 * ipr_init_ioa_cfg - Initialize IOA config struct
8204 * @ioa_cfg: ioa config struct
8205 * @host: scsi host struct
8206 * @pdev: PCI dev struct
8207 *
8208 * Return value:
8209 * none
8210 **/
8211static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8212 struct Scsi_Host *host, struct pci_dev *pdev)
8213{
8214 const struct ipr_interrupt_offsets *p;
8215 struct ipr_interrupts *t;
8216 void __iomem *base;
8217
8218 ioa_cfg->host = host;
8219 ioa_cfg->pdev = pdev;
8220 ioa_cfg->log_level = ipr_log_level;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06008221 ioa_cfg->doorbell = IPR_DOORBELL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008222 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8223 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8224 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8225 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8226 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8227 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8228 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8229 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8230
8231 INIT_LIST_HEAD(&ioa_cfg->free_q);
8232 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8233 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8234 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8235 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8236 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
David Howellsc4028952006-11-22 14:57:56 +00008237 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008238 init_waitqueue_head(&ioa_cfg->reset_wait_q);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008239 init_waitqueue_head(&ioa_cfg->msi_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008240 ioa_cfg->sdt_state = INACTIVE;
8241
8242 ipr_initialize_bus_attr(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008243 ioa_cfg->max_devs_supported = ipr_max_devs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008244
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008245 if (ioa_cfg->sis64) {
8246 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8247 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8248 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8249 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8250 } else {
8251 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8252 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8253 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8254 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8255 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008256 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8257 host->unique_id = host->host_no;
8258 host->max_cmd_len = IPR_MAX_CDB_LEN;
8259 pci_set_drvdata(pdev, ioa_cfg);
8260
8261 p = &ioa_cfg->chip_cfg->regs;
8262 t = &ioa_cfg->regs;
8263 base = ioa_cfg->hdw_dma_regs;
8264
8265 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8266 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008267 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008268 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008269 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008270 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008271 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008272 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008273 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008274 t->ioarrin_reg = base + p->ioarrin_reg;
8275 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008276 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008277 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008278 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008279 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008280 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
Wayne Boyerdcbad002010-02-19 13:24:14 -08008281
8282 if (ioa_cfg->sis64) {
Wayne Boyer214777b2010-02-19 13:24:26 -08008283 t->init_feedback_reg = base + p->init_feedback_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08008284 t->dump_addr_reg = base + p->dump_addr_reg;
8285 t->dump_data_reg = base + p->dump_data_reg;
8286 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008287}
8288
8289/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008290 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07008291 * @dev_id: PCI device id struct
8292 *
8293 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008294 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07008295 **/
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008296static const struct ipr_chip_t * __devinit
8297ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008298{
8299 int i;
8300
Linus Torvalds1da177e2005-04-16 15:20:36 -07008301 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8302 if (ipr_chip[i].vendor == dev_id->vendor &&
8303 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008304 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07008305 return NULL;
8306}
8307
8308/**
Wayne Boyer95fecd92009-06-16 15:13:28 -07008309 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8310 * @pdev: PCI device struct
8311 *
8312 * Description: Simply set the msi_received flag to 1 indicating that
8313 * Message Signaled Interrupts are supported.
8314 *
8315 * Return value:
8316 * 0 on success / non-zero on failure
8317 **/
8318static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8319{
8320 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8321 unsigned long lock_flags = 0;
8322 irqreturn_t rc = IRQ_HANDLED;
8323
8324 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8325
8326 ioa_cfg->msi_received = 1;
8327 wake_up(&ioa_cfg->msi_wait_q);
8328
8329 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8330 return rc;
8331}
8332
8333/**
8334 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8335 * @pdev: PCI device struct
8336 *
8337 * Description: The return value from pci_enable_msi() can not always be
8338 * trusted. This routine sets up and initiates a test interrupt to determine
8339 * if the interrupt is received via the ipr_test_intr() service routine.
8340 * If the tests fails, the driver will fall back to LSI.
8341 *
8342 * Return value:
8343 * 0 on success / non-zero on failure
8344 **/
8345static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8346 struct pci_dev *pdev)
8347{
8348 int rc;
8349 volatile u32 int_reg;
8350 unsigned long lock_flags = 0;
8351
8352 ENTER;
8353
8354 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8355 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8356 ioa_cfg->msi_received = 0;
8357 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer214777b2010-02-19 13:24:26 -08008358 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008359 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8360 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8361
8362 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8363 if (rc) {
8364 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8365 return rc;
8366 } else if (ipr_debug)
8367 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8368
Wayne Boyer214777b2010-02-19 13:24:26 -08008369 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008370 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8371 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8372 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8373
8374 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8375 if (!ioa_cfg->msi_received) {
8376 /* MSI test failed */
8377 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8378 rc = -EOPNOTSUPP;
8379 } else if (ipr_debug)
8380 dev_info(&pdev->dev, "MSI test succeeded.\n");
8381
8382 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8383
8384 free_irq(pdev->irq, ioa_cfg);
8385
8386 LEAVE;
8387
8388 return rc;
8389}
8390
8391/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008392 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8393 * @pdev: PCI device struct
8394 * @dev_id: PCI device id struct
8395 *
8396 * Return value:
8397 * 0 on success / non-zero on failure
8398 **/
8399static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8400 const struct pci_device_id *dev_id)
8401{
8402 struct ipr_ioa_cfg *ioa_cfg;
8403 struct Scsi_Host *host;
8404 unsigned long ipr_regs_pci;
8405 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -07008406 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -05008407 volatile u32 mask, uproc, interrupts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008408
8409 ENTER;
8410
8411 if ((rc = pci_enable_device(pdev))) {
8412 dev_err(&pdev->dev, "Cannot enable adapter\n");
8413 goto out;
8414 }
8415
8416 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8417
8418 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8419
8420 if (!host) {
8421 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8422 rc = -ENOMEM;
8423 goto out_disable;
8424 }
8425
8426 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8427 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Brian King35a39692006-09-25 12:39:20 -05008428 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8429 sata_port_info.flags, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008430
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008431 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008432
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008433 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008434 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8435 dev_id->vendor, dev_id->device);
8436 goto out_scsi_host_put;
8437 }
8438
Wayne Boyera32c0552010-02-19 13:23:36 -08008439 /* set SIS 32 or SIS 64 */
8440 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008441 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8442
Brian King5469cb52007-03-29 12:42:40 -05008443 if (ipr_transop_timeout)
8444 ioa_cfg->transop_timeout = ipr_transop_timeout;
8445 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8446 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8447 else
8448 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8449
Auke Kok44c10132007-06-08 15:46:36 -07008450 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -05008451
Linus Torvalds1da177e2005-04-16 15:20:36 -07008452 ipr_regs_pci = pci_resource_start(pdev, 0);
8453
8454 rc = pci_request_regions(pdev, IPR_NAME);
8455 if (rc < 0) {
8456 dev_err(&pdev->dev,
8457 "Couldn't register memory range of registers\n");
8458 goto out_scsi_host_put;
8459 }
8460
Arjan van de Ven25729a72008-09-28 16:18:02 -07008461 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008462
8463 if (!ipr_regs) {
8464 dev_err(&pdev->dev,
8465 "Couldn't map memory range of registers\n");
8466 rc = -ENOMEM;
8467 goto out_release_regions;
8468 }
8469
8470 ioa_cfg->hdw_dma_regs = ipr_regs;
8471 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8472 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8473
8474 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8475
8476 pci_set_master(pdev);
8477
Wayne Boyera32c0552010-02-19 13:23:36 -08008478 if (ioa_cfg->sis64) {
8479 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8480 if (rc < 0) {
8481 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8482 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8483 }
8484
8485 } else
8486 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8487
Linus Torvalds1da177e2005-04-16 15:20:36 -07008488 if (rc < 0) {
8489 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8490 goto cleanup_nomem;
8491 }
8492
8493 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8494 ioa_cfg->chip_cfg->cache_line_size);
8495
8496 if (rc != PCIBIOS_SUCCESSFUL) {
8497 dev_err(&pdev->dev, "Write of cache line size failed\n");
8498 rc = -EIO;
8499 goto cleanup_nomem;
8500 }
8501
Wayne Boyer95fecd92009-06-16 15:13:28 -07008502 /* Enable MSI style interrupts if they are supported. */
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008503 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
Wayne Boyer95fecd92009-06-16 15:13:28 -07008504 rc = ipr_test_msi(ioa_cfg, pdev);
8505 if (rc == -EOPNOTSUPP)
8506 pci_disable_msi(pdev);
8507 else if (rc)
8508 goto out_msi_disable;
8509 else
8510 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8511 } else if (ipr_debug)
8512 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8513
Linus Torvalds1da177e2005-04-16 15:20:36 -07008514 /* Save away PCI config space for use following IOA reset */
8515 rc = pci_save_state(pdev);
8516
8517 if (rc != PCIBIOS_SUCCESSFUL) {
8518 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8519 rc = -EIO;
8520 goto cleanup_nomem;
8521 }
8522
8523 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8524 goto cleanup_nomem;
8525
8526 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8527 goto cleanup_nomem;
8528
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008529 if (ioa_cfg->sis64)
8530 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8531 + ((sizeof(struct ipr_config_table_entry64)
8532 * ioa_cfg->max_devs_supported)));
8533 else
8534 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8535 + ((sizeof(struct ipr_config_table_entry)
8536 * ioa_cfg->max_devs_supported)));
8537
Linus Torvalds1da177e2005-04-16 15:20:36 -07008538 rc = ipr_alloc_mem(ioa_cfg);
8539 if (rc < 0) {
8540 dev_err(&pdev->dev,
8541 "Couldn't allocate enough memory for device driver!\n");
8542 goto cleanup_nomem;
8543 }
8544
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008545 /*
8546 * If HRRQ updated interrupt is not masked, or reset alert is set,
8547 * the card is in an unknown state and needs a hard reset
8548 */
Wayne Boyer214777b2010-02-19 13:24:26 -08008549 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8550 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8551 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008552 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8553 ioa_cfg->needs_hard_reset = 1;
Brian King473b1e82007-05-02 10:44:11 -05008554 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8555 ioa_cfg->needs_hard_reset = 1;
8556 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8557 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008558
Linus Torvalds1da177e2005-04-16 15:20:36 -07008559 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008560 rc = request_irq(pdev->irq, ipr_isr,
8561 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8562 IPR_NAME, ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008563
8564 if (rc) {
8565 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8566 pdev->irq, rc);
8567 goto cleanup_nolog;
8568 }
8569
Brian King463fc692007-05-07 17:09:05 -05008570 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8571 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8572 ioa_cfg->needs_warm_reset = 1;
8573 ioa_cfg->reset = ipr_reset_slot_reset;
8574 } else
8575 ioa_cfg->reset = ipr_reset_start_bist;
8576
Linus Torvalds1da177e2005-04-16 15:20:36 -07008577 spin_lock(&ipr_driver_lock);
8578 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8579 spin_unlock(&ipr_driver_lock);
8580
8581 LEAVE;
8582out:
8583 return rc;
8584
8585cleanup_nolog:
8586 ipr_free_mem(ioa_cfg);
8587cleanup_nomem:
8588 iounmap(ipr_regs);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008589out_msi_disable:
8590 pci_disable_msi(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008591out_release_regions:
8592 pci_release_regions(pdev);
8593out_scsi_host_put:
8594 scsi_host_put(host);
8595out_disable:
8596 pci_disable_device(pdev);
8597 goto out;
8598}
8599
8600/**
8601 * ipr_scan_vsets - Scans for VSET devices
8602 * @ioa_cfg: ioa config struct
8603 *
8604 * Description: Since the VSET resources do not follow SAM in that we can have
8605 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8606 *
8607 * Return value:
8608 * none
8609 **/
8610static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8611{
8612 int target, lun;
8613
8614 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8615 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8616 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8617}
8618
8619/**
8620 * ipr_initiate_ioa_bringdown - Bring down an adapter
8621 * @ioa_cfg: ioa config struct
8622 * @shutdown_type: shutdown type
8623 *
8624 * Description: This function will initiate bringing down the adapter.
8625 * This consists of issuing an IOA shutdown to the adapter
8626 * to flush the cache, and running BIST.
8627 * If the caller needs to wait on the completion of the reset,
8628 * the caller must sleep on the reset_wait_q.
8629 *
8630 * Return value:
8631 * none
8632 **/
8633static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8634 enum ipr_shutdown_type shutdown_type)
8635{
8636 ENTER;
8637 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8638 ioa_cfg->sdt_state = ABORT_DUMP;
8639 ioa_cfg->reset_retries = 0;
8640 ioa_cfg->in_ioa_bringdown = 1;
8641 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8642 LEAVE;
8643}
8644
8645/**
8646 * __ipr_remove - Remove a single adapter
8647 * @pdev: pci device struct
8648 *
8649 * Adapter hot plug remove entry point.
8650 *
8651 * Return value:
8652 * none
8653 **/
8654static void __ipr_remove(struct pci_dev *pdev)
8655{
8656 unsigned long host_lock_flags = 0;
8657 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8658 ENTER;
8659
8660 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Brian King970ea292007-04-26 16:00:06 -05008661 while(ioa_cfg->in_reset_reload) {
8662 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8663 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8664 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8665 }
8666
Linus Torvalds1da177e2005-04-16 15:20:36 -07008667 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8668
8669 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8670 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
brking@us.ibm.com 5cbf5ea2005-05-02 19:50:47 -05008671 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008672 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8673
8674 spin_lock(&ipr_driver_lock);
8675 list_del(&ioa_cfg->queue);
8676 spin_unlock(&ipr_driver_lock);
8677
8678 if (ioa_cfg->sdt_state == ABORT_DUMP)
8679 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8680 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8681
8682 ipr_free_all_resources(ioa_cfg);
8683
8684 LEAVE;
8685}
8686
8687/**
8688 * ipr_remove - IOA hot plug remove entry point
8689 * @pdev: pci device struct
8690 *
8691 * Adapter hot plug remove entry point.
8692 *
8693 * Return value:
8694 * none
8695 **/
Kleber S. Souzaf3816422009-04-22 10:50:28 -03008696static void __devexit ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008697{
8698 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8699
8700 ENTER;
8701
Tony Jonesee959b02008-02-22 00:13:36 +01008702 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008703 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +01008704 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008705 &ipr_dump_attr);
8706 scsi_remove_host(ioa_cfg->host);
8707
8708 __ipr_remove(pdev);
8709
8710 LEAVE;
8711}
8712
8713/**
8714 * ipr_probe - Adapter hot plug add entry point
8715 *
8716 * Return value:
8717 * 0 on success / non-zero on failure
8718 **/
8719static int __devinit ipr_probe(struct pci_dev *pdev,
8720 const struct pci_device_id *dev_id)
8721{
8722 struct ipr_ioa_cfg *ioa_cfg;
8723 int rc;
8724
8725 rc = ipr_probe_ioa(pdev, dev_id);
8726
8727 if (rc)
8728 return rc;
8729
8730 ioa_cfg = pci_get_drvdata(pdev);
8731 rc = ipr_probe_ioa_part2(ioa_cfg);
8732
8733 if (rc) {
8734 __ipr_remove(pdev);
8735 return rc;
8736 }
8737
8738 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8739
8740 if (rc) {
8741 __ipr_remove(pdev);
8742 return rc;
8743 }
8744
Tony Jonesee959b02008-02-22 00:13:36 +01008745 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008746 &ipr_trace_attr);
8747
8748 if (rc) {
8749 scsi_remove_host(ioa_cfg->host);
8750 __ipr_remove(pdev);
8751 return rc;
8752 }
8753
Tony Jonesee959b02008-02-22 00:13:36 +01008754 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008755 &ipr_dump_attr);
8756
8757 if (rc) {
Tony Jonesee959b02008-02-22 00:13:36 +01008758 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008759 &ipr_trace_attr);
8760 scsi_remove_host(ioa_cfg->host);
8761 __ipr_remove(pdev);
8762 return rc;
8763 }
8764
8765 scsi_scan_host(ioa_cfg->host);
8766 ipr_scan_vsets(ioa_cfg);
8767 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8768 ioa_cfg->allow_ml_add_del = 1;
brking@us.ibm.com11cd8f12005-11-01 17:00:11 -06008769 ioa_cfg->host->max_channel = IPR_VSET_BUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008770 schedule_work(&ioa_cfg->work_q);
8771 return 0;
8772}
8773
8774/**
8775 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008776 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07008777 *
8778 * This function is invoked upon system shutdown/reboot. It will issue
8779 * an adapter shutdown to the adapter to flush the write cache.
8780 *
8781 * Return value:
8782 * none
8783 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008784static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008785{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008786 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008787 unsigned long lock_flags = 0;
8788
8789 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05008790 while(ioa_cfg->in_reset_reload) {
8791 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8792 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8793 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8794 }
8795
Linus Torvalds1da177e2005-04-16 15:20:36 -07008796 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8797 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8798 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8799}
8800
8801static struct pci_device_id ipr_pci_table[] __devinitdata = {
8802 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008803 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008804 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008805 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008806 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008807 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008808 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008809 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008810 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06008811 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008812 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06008813 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008814 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06008815 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008816 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -05008817 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8818 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008819 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06008820 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008821 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05008822 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8823 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06008824 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05008825 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8826 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008827 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06008828 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008829 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05008830 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8831 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -06008832 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05008833 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8834 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06008835 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -05008836 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8837 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -05008838 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King185eb312007-03-29 12:42:53 -05008839 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
8840 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -05008841 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -05008842 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008843 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -06008844 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008845 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -06008846 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008847 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05008848 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8849 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06008850 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05008851 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8852 IPR_USE_LONG_TRANSOP_TIMEOUT },
Wayne Boyerd7b46272010-02-19 13:24:38 -08008853 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8854 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
8855 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8856 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
8857 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8858 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
8859 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8860 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
8861 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8862 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
8863 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8864 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
8865 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8866 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
8867 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8868 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008869 { }
8870};
8871MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8872
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008873static struct pci_error_handlers ipr_err_handler = {
8874 .error_detected = ipr_pci_error_detected,
8875 .slot_reset = ipr_pci_slot_reset,
8876};
8877
Linus Torvalds1da177e2005-04-16 15:20:36 -07008878static struct pci_driver ipr_driver = {
8879 .name = IPR_NAME,
8880 .id_table = ipr_pci_table,
8881 .probe = ipr_probe,
Kleber S. Souzaf3816422009-04-22 10:50:28 -03008882 .remove = __devexit_p(ipr_remove),
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008883 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008884 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008885};
8886
8887/**
Wayne Boyerf72919e2010-02-19 13:24:21 -08008888 * ipr_halt_done - Shutdown prepare completion
8889 *
8890 * Return value:
8891 * none
8892 **/
8893static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8894{
8895 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8896
8897 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8898}
8899
8900/**
8901 * ipr_halt - Issue shutdown prepare to all adapters
8902 *
8903 * Return value:
8904 * NOTIFY_OK on success / NOTIFY_DONE on failure
8905 **/
8906static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8907{
8908 struct ipr_cmnd *ipr_cmd;
8909 struct ipr_ioa_cfg *ioa_cfg;
8910 unsigned long flags = 0;
8911
8912 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8913 return NOTIFY_DONE;
8914
8915 spin_lock(&ipr_driver_lock);
8916
8917 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8918 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8919 if (!ioa_cfg->allow_cmds) {
8920 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8921 continue;
8922 }
8923
8924 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8925 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8926 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8927 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8928 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8929
8930 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8931 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8932 }
8933 spin_unlock(&ipr_driver_lock);
8934
8935 return NOTIFY_OK;
8936}
8937
8938static struct notifier_block ipr_notifier = {
8939 ipr_halt, NULL, 0
8940};
8941
8942/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008943 * ipr_init - Module entry point
8944 *
8945 * Return value:
8946 * 0 on success / negative value on failure
8947 **/
8948static int __init ipr_init(void)
8949{
8950 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8951 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8952
Wayne Boyerf72919e2010-02-19 13:24:21 -08008953 register_reboot_notifier(&ipr_notifier);
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -07008954 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008955}
8956
8957/**
8958 * ipr_exit - Module unload
8959 *
8960 * Module unload entry point.
8961 *
8962 * Return value:
8963 * none
8964 **/
8965static void __exit ipr_exit(void)
8966{
Wayne Boyerf72919e2010-02-19 13:24:21 -08008967 unregister_reboot_notifier(&ipr_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008968 pci_unregister_driver(&ipr_driver);
8969}
8970
8971module_init(ipr_init);
8972module_exit(ipr_exit);