blob: b0c68d24db011ba9465dd31a41c05acd4f13c283 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -030063#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050075#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050076#include <linux/hdreg.h>
Wayne Boyerf72919e2010-02-19 13:24:21 -080077#include <linux/reboot.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080078#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include "ipr.h"
88
89/*
90 * Global Data
91 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080092static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050097static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060098static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080099static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -0500100static unsigned int ipr_dual_ioa_raid = 1;
Wen Xiongcb05cbb2016-07-12 16:02:08 -0500101static unsigned int ipr_number_of_msix = 16;
Brian King4fdd7c72015-03-26 11:23:50 -0500102static unsigned int ipr_fast_reboot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103static DEFINE_SPINLOCK(ipr_driver_lock);
104
105/* This table describes the differences between DMA controller chips */
106static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 .mailbox = 0x0042C,
Brian King89aad422012-03-14 21:20:10 -0500109 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500111 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600112 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 {
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
Wayne Boyer214777b2010-02-19 13:24:26 -0800116 .clr_interrupt_mask_reg32 = 0x00230,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 .sense_interrupt_mask_reg = 0x0022C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800118 .sense_interrupt_mask_reg32 = 0x0022C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .clr_interrupt_reg = 0x00228,
Wayne Boyer214777b2010-02-19 13:24:26 -0800120 .clr_interrupt_reg32 = 0x00228,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 .sense_interrupt_reg = 0x00224,
Wayne Boyer214777b2010-02-19 13:24:26 -0800122 .sense_interrupt_reg32 = 0x00224,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800125 .sense_uproc_interrupt_reg32 = 0x00214,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 .set_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
131 },
132 { /* Snipe and Scamp */
133 .mailbox = 0x0052C,
Brian King89aad422012-03-14 21:20:10 -0500134 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500136 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600137 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 {
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800141 .clr_interrupt_mask_reg32 = 0x0028C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 .sense_interrupt_mask_reg = 0x00288,
Wayne Boyer214777b2010-02-19 13:24:26 -0800143 .sense_interrupt_mask_reg32 = 0x00288,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 .clr_interrupt_reg = 0x00284,
Wayne Boyer214777b2010-02-19 13:24:26 -0800145 .clr_interrupt_reg32 = 0x00284,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 .sense_interrupt_reg = 0x00280,
Wayne Boyer214777b2010-02-19 13:24:26 -0800147 .sense_interrupt_reg32 = 0x00280,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800150 .sense_uproc_interrupt_reg32 = 0x00290,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 .set_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 }
156 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800157 { /* CRoC */
Wayne Boyer110def82010-11-04 09:36:16 -0700158 .mailbox = 0x00044,
Brian King89aad422012-03-14 21:20:10 -0500159 .max_cmds = 1000,
Wayne Boyera74c1632010-02-19 13:23:51 -0800160 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500161 .clear_isr = 0,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600162 .iopoll_weight = 64,
Wayne Boyera74c1632010-02-19 13:23:51 -0800163 {
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
Wayne Boyer214777b2010-02-19 13:24:26 -0800166 .clr_interrupt_mask_reg32 = 0x0001C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800167 .sense_interrupt_mask_reg = 0x00010,
Wayne Boyer214777b2010-02-19 13:24:26 -0800168 .sense_interrupt_mask_reg32 = 0x00014,
Wayne Boyera74c1632010-02-19 13:23:51 -0800169 .clr_interrupt_reg = 0x00008,
Wayne Boyer214777b2010-02-19 13:24:26 -0800170 .clr_interrupt_reg32 = 0x0000C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800171 .sense_interrupt_reg = 0x00000,
Wayne Boyer214777b2010-02-19 13:24:26 -0800172 .sense_interrupt_reg32 = 0x00004,
Wayne Boyera74c1632010-02-19 13:23:51 -0800173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800175 .sense_uproc_interrupt_reg32 = 0x00024,
Wayne Boyera74c1632010-02-19 13:23:51 -0800176 .set_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800177 .set_uproc_interrupt_reg32 = 0x00024,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800178 .clr_uproc_interrupt_reg = 0x00028,
Wayne Boyer214777b2010-02-19 13:24:26 -0800179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800181 .dump_addr_reg = 0x00064,
Wayne Boyer8701f182010-06-04 10:26:50 -0700182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
Wayne Boyera74c1632010-02-19 13:23:51 -0800184 }
185 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186};
187
188static const struct ipr_chip_t ipr_chip[] = {
Christoph Hellwiga299ee62016-09-11 15:31:24 +0200189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199};
200
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300201static int ipr_max_bus_speeds[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203};
204
205MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207module_param_named(max_speed, ipr_max_speed, uint, 0);
208MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209module_param_named(log_level, ipr_log_level, uint, 0);
210MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211module_param_named(testmode, ipr_testmode, int, 0);
212MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800213module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800217module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600218MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500219module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800221module_param_named(max_devs, ipr_max_devs, int, 0);
222MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600224module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
Wen Xiongcb05cbb2016-07-12 16:02:08 -0500225MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
Brian King4fdd7c72015-03-26 11:23:50 -0500226module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228MODULE_LICENSE("GPL");
229MODULE_VERSION(IPR_DRIVER_VERSION);
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231/* A constant array of IOASCs/URCs/Error Messages */
232static const
233struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 "8155: An unknown error was received"},
236 {0x00330000, 0, 0,
237 "Soft underlength error"},
238 {0x005A0000, 0, 0,
239 "Command to be cancelled not found"},
240 {0x00808000, 0, 0,
241 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500245 "4101: Soft device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFFD: Logical block guard error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500283 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 "8009: Impending cache battery pack failure"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500288 {0x02040100, 0, 0,
289 "Logical Unit in process of becoming ready"},
290 {0x02040200, 0, 0,
291 "Initializing command required"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 {0x02040400, 0, 0,
293 "34FF: Disk device format in progress"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500294 {0x02040C00, 0, 0,
295 "Logical unit not accessible, target port in unavailable state"},
Brian King65f56472007-04-26 16:00:12 -0500296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 {0x023F0000, 0, 0,
299 "Synchronization required"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500300 {0x02408500, 0, 0,
301 "IOA microcode download required"},
302 {0x02408600, 0, 0,
303 "Device bus connection is prohibited by host"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 {0x024E0000, 0, 0,
305 "No ready, IOA shutdown"},
306 {0x025A0000, 0, 0,
307 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 "3020: Storage subsystem configuration error"},
310 {0x03110B00, 0, 0,
311 "FFF5: Medium error, data unreadable, recommend reassign"},
312 {0x03110C00, 0, 0,
313 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 "3109: IOA timed out a device command"},
322 {0x04088000, 0, 0,
323 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500325 "4100: Hard device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "310D: Logical block guard error detected by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 "9002: IOA reserved area LRC error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347 "Hardware Error, IOA metadata access error"},
Brian King933916f2007-03-29 12:43:30 -0500348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 "8150: PCI bus error"},
360 {0x04430000, 1, 0,
361 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 "8151: IOA microcode error"},
370 {0x04448500, 0, 0,
371 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500374 {0x04448700, 0, 0,
375 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 {0x04490000, 0, 0,
377 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500389 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600393 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500395 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500397 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500399 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500401 "4110: Unsupported enclosure function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403 "4120: SAS cable VPD cannot be read"},
Brian King933916f2007-03-29 12:43:30 -0500404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 "FFF4: Command to logical unit failed"},
406 {0x05240000, 1, 0,
407 "Illegal request, invalid request type or request packet"},
408 {0x05250000, 0, 0,
409 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600410 {0x05258000, 0, 0,
411 "Illegal request, commands not allowed to this device"},
412 {0x05258100, 0, 0,
413 "Illegal request, command not allowed to a secondary adapter"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800414 {0x05258200, 0, 0,
415 "Illegal request, command not allowed to a non-optimized resource"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 {0x05260000, 0, 0,
417 "Illegal request, invalid field in parameter list"},
418 {0x05260100, 0, 0,
419 "Illegal request, parameter not supported"},
420 {0x05260200, 0, 0,
421 "Illegal request, parameter value invalid"},
422 {0x052C0000, 0, 0,
423 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600424 {0x052C8000, 1, 0,
425 "Illegal request, dual adapter support not enabled"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500426 {0x052C8100, 1, 0,
427 "Illegal request, another cable connector was physically disabled"},
428 {0x054E8000, 1, 0,
429 "Illegal request, inconsistent group id/group count"},
Brian King933916f2007-03-29 12:43:30 -0500430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 "9040: Array protection temporarily suspended, protection resuming"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "4085: Service required"},
Brian King933916f2007-03-29 12:43:30 -0500438 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500439 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500440 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 "FFFB: SCSI bus was reset"},
442 {0x06290500, 0, 0,
443 "FFFE: SCSI bus transition to single ended"},
444 {0x06290600, 0, 0,
445 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500446 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500448 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 "3029: A device replacement has occurred"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500450 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
451 "4102: Device bus fabric performance degradation"},
Brian King933916f2007-03-29 12:43:30 -0500452 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500454 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600455 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500456 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500458 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500460 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500462 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600463 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500464 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500465 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500466 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500467 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500468 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500469 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500470 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500471 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500472 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500473 "4050: Enclosure does not support a required multipath function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500474 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
475 "4121: Configuration error, required cable is missing"},
476 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4122: Cable is not plugged into the correct location on remote IOA"},
478 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4123: Configuration error, invalid cable vital product data"},
480 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
481 "4124: Configuration error, both cable ends are plugged into the same IOA"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800482 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
483 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500484 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500486 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500488 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500490 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600491 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500492 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600493 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500494 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 "9032: Array exposed but still protected"},
Brian King7b3871f2016-09-16 16:51:36 -0500496 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
Brian Kinge4353402007-03-29 12:43:37 -0500497 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500498 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500499 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500500 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500501 "4060: Multipath redundancy level got worse"},
Brian King7b3871f2016-09-16 16:51:36 -0500502 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
Wen Xiongf8ee25d2015-03-26 11:23:58 -0500503 "9083: Device raw mode enabled"},
Brian King7b3871f2016-09-16 16:51:36 -0500504 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
Wen Xiongf8ee25d2015-03-26 11:23:58 -0500505 "9084: Device raw mode disabled"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 {0x07270000, 0, 0,
507 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500508 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500510 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500512 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500514 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500516 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500518 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500520 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500522 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500524 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500526 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500528 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500530 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500532 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500534 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500536 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500538 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500540 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500542 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500544 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500546 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 "9063: Maximum number of functional arrays has been exceeded"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500548 {0x07279A00, 0, 0,
549 "Data protect, other volume set problem"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 {0x0B260000, 0, 0,
551 "Aborted command, invalid descriptor"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500552 {0x0B3F9000, 0, 0,
553 "Target operating conditions have changed, dual adapter takeover"},
554 {0x0B530200, 0, 0,
555 "Aborted command, medium removal prevented"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 {0x0B5A0000, 0, 0,
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500557 "Command terminated by host"},
558 {0x0B5B8000, 0, 0,
559 "Aborted command, command terminated by host"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560};
561
562static const struct ipr_ses_table_entry ipr_ses_table[] = {
563 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
564 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
565 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
570 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
571 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
574 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
576};
577
578/*
579 * Function Prototypes
580 */
581static int ipr_reset_alert(struct ipr_cmnd *);
582static void ipr_process_ccn(struct ipr_cmnd *);
583static void ipr_process_error(struct ipr_cmnd *);
584static void ipr_reset_ioa_job(struct ipr_cmnd *);
585static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
586 enum ipr_shutdown_type);
587
588#ifdef CONFIG_SCSI_IPR_TRACE
589/**
590 * ipr_trc_hook - Add a trace entry to the driver trace
591 * @ipr_cmd: ipr command struct
592 * @type: trace type
593 * @add_data: additional data
594 *
595 * Return value:
596 * none
597 **/
598static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599 u8 type, u32 add_data)
600{
601 struct ipr_trace_entry *trace_entry;
602 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Brian Kingbb7c5432015-07-14 11:41:31 -0500603 unsigned int trace_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Brian Kingbb7c5432015-07-14 11:41:31 -0500605 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
606 trace_entry = &ioa_cfg->trace[trace_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 trace_entry->time = jiffies;
608 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
609 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800610 if (ipr_cmd->ioa_cfg->sis64)
611 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
612 else
613 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500614 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
616 trace_entry->u.add_data = add_data;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600617 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618}
619#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300620#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621#endif
622
623/**
Brian King172cd6e2012-07-17 08:14:40 -0500624 * ipr_lock_and_done - Acquire lock and complete command
625 * @ipr_cmd: ipr command struct
626 *
627 * Return value:
628 * none
629 **/
630static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
631{
632 unsigned long lock_flags;
633 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
634
635 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
636 ipr_cmd->done(ipr_cmd);
637 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
638}
639
640/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642 * @ipr_cmd: ipr command struct
643 *
644 * Return value:
645 * none
646 **/
647static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
648{
649 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700650 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
651 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
Wayne Boyera32c0552010-02-19 13:23:36 -0800652 dma_addr_t dma_addr = ipr_cmd->dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600653 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600655 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600657 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
Wayne Boyera32c0552010-02-19 13:23:36 -0800658 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800660 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800662
Wayne Boyer96d21f02010-05-10 09:13:27 -0700663 if (ipr_cmd->ioa_cfg->sis64) {
Wayne Boyera32c0552010-02-19 13:23:36 -0800664 ioarcb->u.sis64_addr_data.data_ioadl_addr =
665 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
Wayne Boyer96d21f02010-05-10 09:13:27 -0700666 ioasa64->u.gata.status = 0;
667 } else {
Wayne Boyera32c0552010-02-19 13:23:36 -0800668 ioarcb->write_ioadl_addr =
669 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
670 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700671 ioasa->u.gata.status = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800672 }
673
Wayne Boyer96d21f02010-05-10 09:13:27 -0700674 ioasa->hdr.ioasc = 0;
675 ioasa->hdr.residual_data_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500677 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 ipr_cmd->sense_buffer[0] = 0;
679 ipr_cmd->dma_use_sg = 0;
680}
681
682/**
683 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684 * @ipr_cmd: ipr command struct
685 *
686 * Return value:
687 * none
688 **/
Brian King172cd6e2012-07-17 08:14:40 -0500689static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
690 void (*fast_done) (struct ipr_cmnd *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691{
692 ipr_reinit_ipr_cmnd(ipr_cmd);
693 ipr_cmd->u.scratch = 0;
694 ipr_cmd->sibling = NULL;
Brian King6cdb0812014-10-30 17:27:10 -0500695 ipr_cmd->eh_comp = NULL;
Brian King172cd6e2012-07-17 08:14:40 -0500696 ipr_cmd->fast_done = fast_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 init_timer(&ipr_cmd->timer);
698}
699
700/**
Brian King00bfef22012-07-17 08:13:52 -0500701 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 * @ioa_cfg: ioa config struct
703 *
704 * Return value:
705 * pointer to ipr command struct
706 **/
707static
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600708struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600710 struct ipr_cmnd *ipr_cmd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600712 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
713 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
714 struct ipr_cmnd, queue);
715 list_del(&ipr_cmd->queue);
716 }
717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
719 return ipr_cmd;
720}
721
722/**
Brian King00bfef22012-07-17 08:13:52 -0500723 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724 * @ioa_cfg: ioa config struct
725 *
726 * Return value:
727 * pointer to ipr command struct
728 **/
729static
730struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
731{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600732 struct ipr_cmnd *ipr_cmd =
733 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
Brian King172cd6e2012-07-17 08:14:40 -0500734 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King00bfef22012-07-17 08:13:52 -0500735 return ipr_cmd;
736}
737
738/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740 * @ioa_cfg: ioa config struct
741 * @clr_ints: interrupts to clear
742 *
743 * This function masks all interrupts on the adapter, then clears the
744 * interrupts specified in the mask
745 *
746 * Return value:
747 * none
748 **/
749static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
750 u32 clr_ints)
751{
752 volatile u32 int_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600753 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
755 /* Stop new interrupts */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600756 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
757 spin_lock(&ioa_cfg->hrrq[i]._lock);
758 ioa_cfg->hrrq[i].allow_interrupts = 0;
759 spin_unlock(&ioa_cfg->hrrq[i]._lock);
760 }
761 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
763 /* Set interrupt mask to stop all new interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800764 if (ioa_cfg->sis64)
765 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
766 else
767 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
769 /* Clear any pending interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800770 if (ioa_cfg->sis64)
771 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
772 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
774}
775
776/**
777 * ipr_save_pcix_cmd_reg - Save PCI-X command register
778 * @ioa_cfg: ioa config struct
779 *
780 * Return value:
781 * 0 on success / -EIO on failure
782 **/
783static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
784{
785 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
786
Brian King7dce0e12007-01-23 11:25:30 -0600787 if (pcix_cmd_reg == 0)
788 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
790 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
793 return -EIO;
794 }
795
796 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
797 return 0;
798}
799
800/**
801 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802 * @ioa_cfg: ioa config struct
803 *
804 * Return value:
805 * 0 on success / -EIO on failure
806 **/
807static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
808{
809 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
810
811 if (pcix_cmd_reg) {
812 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
813 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
814 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
815 return -EIO;
816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 }
818
819 return 0;
820}
821
822/**
Brian Kingf646f322017-03-15 16:58:39 -0500823 * __ipr_sata_eh_done - done function for aborted SATA commands
824 * @ipr_cmd: ipr command struct
825 *
826 * This function is invoked for ops generated to SATA
827 * devices which are being aborted.
828 *
829 * Return value:
830 * none
831 **/
832static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
833{
834 struct ata_queued_cmd *qc = ipr_cmd->qc;
835 struct ipr_sata_port *sata_port = qc->ap->private_data;
836
837 qc->err_mask |= AC_ERR_OTHER;
838 sata_port->ioasa.status |= ATA_BUSY;
839 ata_qc_complete(qc);
840 if (ipr_cmd->eh_comp)
841 complete(ipr_cmd->eh_comp);
842 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
843}
844
845/**
Brian King35a39692006-09-25 12:39:20 -0500846 * ipr_sata_eh_done - done function for aborted SATA commands
847 * @ipr_cmd: ipr command struct
848 *
849 * This function is invoked for ops generated to SATA
850 * devices which are being aborted.
851 *
852 * Return value:
853 * none
854 **/
855static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
856{
Brian Kingf646f322017-03-15 16:58:39 -0500857 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
858 unsigned long hrrq_flags;
Brian King35a39692006-09-25 12:39:20 -0500859
Brian Kingf646f322017-03-15 16:58:39 -0500860 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
861 __ipr_sata_eh_done(ipr_cmd);
862 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
863}
864
865/**
866 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
867 * @ipr_cmd: ipr command struct
868 *
869 * This function is invoked by the interrupt handler for
870 * ops generated by the SCSI mid-layer which are being aborted.
871 *
872 * Return value:
873 * none
874 **/
875static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
876{
877 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
878
879 scsi_cmd->result |= (DID_ERROR << 16);
880
881 scsi_dma_unmap(ipr_cmd->scsi_cmd);
882 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -0500883 if (ipr_cmd->eh_comp)
884 complete(ipr_cmd->eh_comp);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600885 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King35a39692006-09-25 12:39:20 -0500886}
887
888/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 * ipr_scsi_eh_done - mid-layer done function for aborted ops
890 * @ipr_cmd: ipr command struct
891 *
892 * This function is invoked by the interrupt handler for
893 * ops generated by the SCSI mid-layer which are being aborted.
894 *
895 * Return value:
896 * none
897 **/
898static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
899{
Brian Kingf646f322017-03-15 16:58:39 -0500900 unsigned long hrrq_flags;
901 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
Brian Kingf646f322017-03-15 16:58:39 -0500903 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
904 __ipr_scsi_eh_done(ipr_cmd);
905 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906}
907
908/**
909 * ipr_fail_all_ops - Fails all outstanding ops.
910 * @ioa_cfg: ioa config struct
911 *
912 * This function fails all outstanding ops.
913 *
914 * Return value:
915 * none
916 **/
917static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
918{
919 struct ipr_cmnd *ipr_cmd, *temp;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600920 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
922 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600923 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600924 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600925 list_for_each_entry_safe(ipr_cmd,
926 temp, &hrrq->hrrq_pending_q, queue) {
927 list_del(&ipr_cmd->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600929 ipr_cmd->s.ioasa.hdr.ioasc =
930 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
931 ipr_cmd->s.ioasa.hdr.ilid =
932 cpu_to_be32(IPR_DRIVER_ILID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600934 if (ipr_cmd->scsi_cmd)
Brian Kingf646f322017-03-15 16:58:39 -0500935 ipr_cmd->done = __ipr_scsi_eh_done;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600936 else if (ipr_cmd->qc)
Brian Kingf646f322017-03-15 16:58:39 -0500937 ipr_cmd->done = __ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600939 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
940 IPR_IOASC_IOA_WAS_RESET);
941 del_timer(&ipr_cmd->timer);
942 ipr_cmd->done(ipr_cmd);
943 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600944 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 LEAVE;
947}
948
949/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800950 * ipr_send_command - Send driver initiated requests.
951 * @ipr_cmd: ipr command struct
952 *
953 * This function sends a command to the adapter using the correct write call.
954 * In the case of sis64, calculate the ioarcb size required. Then or in the
955 * appropriate bits.
956 *
957 * Return value:
958 * none
959 **/
960static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
961{
962 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
963 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
964
965 if (ioa_cfg->sis64) {
966 /* The default size is 256 bytes */
967 send_dma_addr |= 0x1;
968
969 /* If the number of ioadls * size of ioadl > 128 bytes,
970 then use a 512 byte ioarcb */
971 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
972 send_dma_addr |= 0x4;
973 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
974 } else
975 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976}
977
978/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 * ipr_do_req - Send driver initiated requests.
980 * @ipr_cmd: ipr command struct
981 * @done: done function
982 * @timeout_func: timeout function
983 * @timeout: timeout value
984 *
985 * This function sends the specified command to the adapter with the
986 * timeout given. The done function is invoked on command completion.
987 *
988 * Return value:
989 * none
990 **/
991static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
992 void (*done) (struct ipr_cmnd *),
993 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
994{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600995 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
997 ipr_cmd->done = done;
998
999 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
1000 ipr_cmd->timer.expires = jiffies + timeout;
1001 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
1002
1003 add_timer(&ipr_cmd->timer);
1004
1005 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1006
Wayne Boyera32c0552010-02-19 13:23:36 -08001007 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008}
1009
1010/**
1011 * ipr_internal_cmd_done - Op done function for an internally generated op.
1012 * @ipr_cmd: ipr command struct
1013 *
1014 * This function is the op done function for an internally generated,
1015 * blocking op. It simply wakes the sleeping thread.
1016 *
1017 * Return value:
1018 * none
1019 **/
1020static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1021{
1022 if (ipr_cmd->sibling)
1023 ipr_cmd->sibling = NULL;
1024 else
1025 complete(&ipr_cmd->completion);
1026}
1027
1028/**
Wayne Boyera32c0552010-02-19 13:23:36 -08001029 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030 * @ipr_cmd: ipr command struct
1031 * @dma_addr: dma address
1032 * @len: transfer length
1033 * @flags: ioadl flag value
1034 *
1035 * This function initializes an ioadl in the case where there is only a single
1036 * descriptor.
1037 *
1038 * Return value:
1039 * nothing
1040 **/
1041static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1042 u32 len, int flags)
1043{
1044 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1046
1047 ipr_cmd->dma_use_sg = 1;
1048
1049 if (ipr_cmd->ioa_cfg->sis64) {
1050 ioadl64->flags = cpu_to_be32(flags);
1051 ioadl64->data_len = cpu_to_be32(len);
1052 ioadl64->address = cpu_to_be64(dma_addr);
1053
1054 ipr_cmd->ioarcb.ioadl_len =
1055 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1057 } else {
1058 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059 ioadl->address = cpu_to_be32(dma_addr);
1060
1061 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062 ipr_cmd->ioarcb.read_ioadl_len =
1063 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1065 } else {
1066 ipr_cmd->ioarcb.ioadl_len =
1067 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1069 }
1070 }
1071}
1072
1073/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075 * @ipr_cmd: ipr command struct
1076 * @timeout_func: function to invoke if command times out
1077 * @timeout: timeout
1078 *
1079 * Return value:
1080 * none
1081 **/
1082static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1083 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1084 u32 timeout)
1085{
1086 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1087
1088 init_completion(&ipr_cmd->completion);
1089 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1090
1091 spin_unlock_irq(ioa_cfg->host->host_lock);
1092 wait_for_completion(&ipr_cmd->completion);
1093 spin_lock_irq(ioa_cfg->host->host_lock);
1094}
1095
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001096static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1097{
Brian King3f1c0582015-07-14 11:41:33 -05001098 unsigned int hrrq;
1099
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001100 if (ioa_cfg->hrrq_num == 1)
Brian King3f1c0582015-07-14 11:41:33 -05001101 hrrq = 0;
1102 else {
1103 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1105 }
1106 return hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001107}
1108
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109/**
1110 * ipr_send_hcam - Send an HCAM to the adapter.
1111 * @ioa_cfg: ioa config struct
1112 * @type: HCAM type
1113 * @hostrcb: hostrcb struct
1114 *
1115 * This function will send a Host Controlled Async command to the adapter.
1116 * If HCAMs are currently not allowed to be issued to the adapter, it will
1117 * place the hostrcb on the free queue.
1118 *
1119 * Return value:
1120 * none
1121 **/
1122static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123 struct ipr_hostrcb *hostrcb)
1124{
1125 struct ipr_cmnd *ipr_cmd;
1126 struct ipr_ioarcb *ioarcb;
1127
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001128 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001130 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1132
1133 ipr_cmd->u.hostrcb = hostrcb;
1134 ioarcb = &ipr_cmd->ioarcb;
1135
1136 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139 ioarcb->cmd_pkt.cdb[1] = type;
1140 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1142
Wayne Boyera32c0552010-02-19 13:23:36 -08001143 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
1146 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147 ipr_cmd->done = ipr_process_ccn;
1148 else
1149 ipr_cmd->done = ipr_process_error;
1150
1151 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1152
Wayne Boyera32c0552010-02-19 13:23:36 -08001153 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 } else {
1155 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1156 }
1157}
1158
1159/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001160 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001162 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 *
1164 * Return value:
1165 * none
1166 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001167static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03001169 switch (proto) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001170 case IPR_PROTO_SATA:
1171 case IPR_PROTO_SAS_STP:
1172 res->ata_class = ATA_DEV_ATA;
1173 break;
1174 case IPR_PROTO_SATA_ATAPI:
1175 case IPR_PROTO_SAS_STP_ATAPI:
1176 res->ata_class = ATA_DEV_ATAPI;
1177 break;
1178 default:
1179 res->ata_class = ATA_DEV_UNKNOWN;
1180 break;
1181 };
1182}
1183
1184/**
1185 * ipr_init_res_entry - Initialize a resource entry struct.
1186 * @res: resource entry struct
1187 * @cfgtew: config table entry wrapper struct
1188 *
1189 * Return value:
1190 * none
1191 **/
1192static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193 struct ipr_config_table_entry_wrapper *cfgtew)
1194{
1195 int found = 0;
1196 unsigned int proto;
1197 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198 struct ipr_resource_entry *gscsi_res = NULL;
1199
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06001200 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 res->in_erp = 0;
1202 res->add_to_ml = 0;
1203 res->del_from_ml = 0;
1204 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06001205 res->reset_occurred = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05001207 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001208
1209 if (ioa_cfg->sis64) {
1210 proto = cfgtew->u.cfgte64->proto;
Brian King359d96e2015-06-11 20:45:20 -05001211 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001213 res->qmodel = IPR_QUEUEING_MODEL64(res);
Wayne Boyer438b0332010-05-10 09:13:00 -07001214 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001215
1216 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217 sizeof(res->res_path));
1218
1219 res->bus = 0;
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001220 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221 sizeof(res->dev_lun.scsi_lun));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001222 res->lun = scsilun_to_int(&res->dev_lun);
1223
1224 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1227 found = 1;
1228 res->target = gscsi_res->target;
1229 break;
1230 }
1231 }
1232 if (!found) {
1233 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234 ioa_cfg->max_devs_supported);
1235 set_bit(res->target, ioa_cfg->target_ids);
1236 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001237 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1239 res->target = 0;
1240 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243 ioa_cfg->max_devs_supported);
1244 set_bit(res->target, ioa_cfg->array_ids);
1245 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246 res->bus = IPR_VSET_VIRTUAL_BUS;
1247 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248 ioa_cfg->max_devs_supported);
1249 set_bit(res->target, ioa_cfg->vset_ids);
1250 } else {
1251 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252 ioa_cfg->max_devs_supported);
1253 set_bit(res->target, ioa_cfg->target_ids);
1254 }
1255 } else {
1256 proto = cfgtew->u.cfgte->proto;
1257 res->qmodel = IPR_QUEUEING_MODEL(res);
1258 res->flags = cfgtew->u.cfgte->flags;
1259 if (res->flags & IPR_IS_IOA_RESOURCE)
1260 res->type = IPR_RES_TYPE_IOAFP;
1261 else
1262 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1263
1264 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265 res->target = cfgtew->u.cfgte->res_addr.target;
1266 res->lun = cfgtew->u.cfgte->res_addr.lun;
Wayne Boyer46d74562010-08-11 07:15:17 -07001267 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001268 }
1269
1270 ipr_update_ata_class(res, proto);
1271}
1272
1273/**
1274 * ipr_is_same_device - Determine if two devices are the same.
1275 * @res: resource entry struct
1276 * @cfgtew: config table entry wrapper struct
1277 *
1278 * Return value:
1279 * 1 if the devices are the same / 0 otherwise
1280 **/
1281static int ipr_is_same_device(struct ipr_resource_entry *res,
1282 struct ipr_config_table_entry_wrapper *cfgtew)
1283{
1284 if (res->ioa_cfg->sis64) {
1285 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286 sizeof(cfgtew->u.cfgte64->dev_id)) &&
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001287 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001288 sizeof(cfgtew->u.cfgte64->lun))) {
1289 return 1;
1290 }
1291 } else {
1292 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293 res->target == cfgtew->u.cfgte->res_addr.target &&
1294 res->lun == cfgtew->u.cfgte->res_addr.lun)
1295 return 1;
1296 }
1297
1298 return 0;
1299}
1300
1301/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001302 * __ipr_format_res_path - Format the resource path for printing.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001303 * @res_path: resource path
1304 * @buf: buffer
Brian Kingb3b3b402013-01-11 17:43:49 -06001305 * @len: length of buffer provided
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001306 *
1307 * Return value:
1308 * pointer to buffer
1309 **/
Brian Kingb3b3b402013-01-11 17:43:49 -06001310static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001311{
1312 int i;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001313 char *p = buffer;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001314
Wayne Boyer46d74562010-08-11 07:15:17 -07001315 *p = '\0';
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001316 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001319
1320 return buffer;
1321}
1322
1323/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001324 * ipr_format_res_path - Format the resource path for printing.
1325 * @ioa_cfg: ioa config struct
1326 * @res_path: resource path
1327 * @buf: buffer
1328 * @len: length of buffer provided
1329 *
1330 * Return value:
1331 * pointer to buffer
1332 **/
1333static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334 u8 *res_path, char *buffer, int len)
1335{
1336 char *p = buffer;
1337
1338 *p = '\0';
1339 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340 __ipr_format_res_path(res_path, p, len - (buffer - p));
1341 return buffer;
1342}
1343
1344/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001345 * ipr_update_res_entry - Update the resource entry.
1346 * @res: resource entry struct
1347 * @cfgtew: config table entry wrapper struct
1348 *
1349 * Return value:
1350 * none
1351 **/
1352static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353 struct ipr_config_table_entry_wrapper *cfgtew)
1354{
1355 char buffer[IPR_MAX_RES_PATH_LENGTH];
1356 unsigned int proto;
1357 int new_path = 0;
1358
1359 if (res->ioa_cfg->sis64) {
Brian King359d96e2015-06-11 20:45:20 -05001360 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
Wayne Boyer75576bb2010-07-14 10:50:14 -07001362 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001363
1364 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365 sizeof(struct ipr_std_inq_data));
1366
1367 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368 proto = cfgtew->u.cfgte64->proto;
1369 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370 res->dev_id = cfgtew->u.cfgte64->dev_id;
1371
1372 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373 sizeof(res->dev_lun.scsi_lun));
1374
1375 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376 sizeof(res->res_path))) {
1377 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378 sizeof(res->res_path));
1379 new_path = 1;
1380 }
1381
1382 if (res->sdev && new_path)
1383 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06001384 ipr_format_res_path(res->ioa_cfg,
1385 res->res_path, buffer, sizeof(buffer)));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001386 } else {
1387 res->flags = cfgtew->u.cfgte->flags;
1388 if (res->flags & IPR_IS_IOA_RESOURCE)
1389 res->type = IPR_RES_TYPE_IOAFP;
1390 else
1391 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1392
1393 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394 sizeof(struct ipr_std_inq_data));
1395
1396 res->qmodel = IPR_QUEUEING_MODEL(res);
1397 proto = cfgtew->u.cfgte->proto;
1398 res->res_handle = cfgtew->u.cfgte->res_handle;
1399 }
1400
1401 ipr_update_ata_class(res, proto);
1402}
1403
1404/**
1405 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1406 * for the resource.
1407 * @res: resource entry struct
1408 * @cfgtew: config table entry wrapper struct
1409 *
1410 * Return value:
1411 * none
1412 **/
1413static void ipr_clear_res_target(struct ipr_resource_entry *res)
1414{
1415 struct ipr_resource_entry *gscsi_res = NULL;
1416 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1417
1418 if (!ioa_cfg->sis64)
1419 return;
1420
1421 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422 clear_bit(res->target, ioa_cfg->array_ids);
1423 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424 clear_bit(res->target, ioa_cfg->vset_ids);
1425 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1428 return;
1429 clear_bit(res->target, ioa_cfg->target_ids);
1430
1431 } else if (res->bus == 0)
1432 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433}
1434
1435/**
1436 * ipr_handle_config_change - Handle a config change from the adapter
1437 * @ioa_cfg: ioa config struct
1438 * @hostrcb: hostrcb
1439 *
1440 * Return value:
1441 * none
1442 **/
1443static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001444 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445{
1446 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001447 struct ipr_config_table_entry_wrapper cfgtew;
1448 __be32 cc_res_handle;
1449
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 u32 is_ndn = 1;
1451
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001452 if (ioa_cfg->sis64) {
1453 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1455 } else {
1456 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457 cc_res_handle = cfgtew.u.cfgte->res_handle;
1458 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
1460 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001461 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 is_ndn = 0;
1463 break;
1464 }
1465 }
1466
1467 if (is_ndn) {
1468 if (list_empty(&ioa_cfg->free_res_q)) {
1469 ipr_send_hcam(ioa_cfg,
1470 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1471 hostrcb);
1472 return;
1473 }
1474
1475 res = list_entry(ioa_cfg->free_res_q.next,
1476 struct ipr_resource_entry, queue);
1477
1478 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001479 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1481 }
1482
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001483 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
1485 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1486 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001488 res->res_handle = IPR_INVALID_RES_HANDLE;
Brian Kingf688f962014-12-02 12:47:37 -06001489 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001490 } else {
1491 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001493 }
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02001494 } else if (!res->sdev || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 res->add_to_ml = 1;
Brian Kingf688f962014-12-02 12:47:37 -06001496 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 }
1498
1499 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1500}
1501
1502/**
1503 * ipr_process_ccn - Op done function for a CCN.
1504 * @ipr_cmd: ipr command struct
1505 *
1506 * This function is the op done function for a configuration
1507 * change notification host controlled async from the adapter.
1508 *
1509 * Return value:
1510 * none
1511 **/
1512static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1513{
1514 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07001516 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
Brian Kingafc3f832016-08-24 12:56:51 -05001518 list_del_init(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001519 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520
1521 if (ioasc) {
Brian King4fdd7c72015-03-26 11:23:50 -05001522 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 dev_err(&ioa_cfg->pdev->dev,
1525 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1526
1527 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1528 } else {
1529 ipr_handle_config_change(ioa_cfg, hostrcb);
1530 }
1531}
1532
1533/**
Brian King8cf093e2007-04-26 16:00:14 -05001534 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1535 * @i: index into buffer
1536 * @buf: string to modify
1537 *
1538 * This function will strip all trailing whitespace, pad the end
1539 * of the string with a single space, and NULL terminate the string.
1540 *
1541 * Return value:
1542 * new length of string
1543 **/
1544static int strip_and_pad_whitespace(int i, char *buf)
1545{
1546 while (i && buf[i] == ' ')
1547 i--;
1548 buf[i+1] = ' ';
1549 buf[i+2] = '\0';
1550 return i + 2;
1551}
1552
1553/**
1554 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1555 * @prefix: string to print at start of printk
1556 * @hostrcb: hostrcb pointer
1557 * @vpd: vendor/product id/sn struct
1558 *
1559 * Return value:
1560 * none
1561 **/
1562static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1563 struct ipr_vpd *vpd)
1564{
1565 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1566 int i = 0;
1567
1568 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1570
1571 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1573
1574 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1575 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1576
1577 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1578}
1579
1580/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001582 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 *
1584 * Return value:
1585 * none
1586 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001587static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588{
1589 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1590 + IPR_SERIAL_NUM_LEN];
1591
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001592 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1593 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 IPR_PROD_ID_LEN);
1595 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1596 ipr_err("Vendor/Product ID: %s\n", buffer);
1597
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001598 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1600 ipr_err(" Serial Number: %s\n", buffer);
1601}
1602
1603/**
Brian King8cf093e2007-04-26 16:00:14 -05001604 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1605 * @prefix: string to print at start of printk
1606 * @hostrcb: hostrcb pointer
1607 * @vpd: vendor/product id/sn/wwn struct
1608 *
1609 * Return value:
1610 * none
1611 **/
1612static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1613 struct ipr_ext_vpd *vpd)
1614{
1615 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1616 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1617 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1618}
1619
1620/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001621 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1622 * @vpd: vendor/product id/sn/wwn struct
1623 *
1624 * Return value:
1625 * none
1626 **/
1627static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1628{
1629 ipr_log_vpd(&vpd->vpd);
1630 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1631 be32_to_cpu(vpd->wwid[1]));
1632}
1633
1634/**
1635 * ipr_log_enhanced_cache_error - Log a cache error.
1636 * @ioa_cfg: ioa config struct
1637 * @hostrcb: hostrcb struct
1638 *
1639 * Return value:
1640 * none
1641 **/
1642static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1643 struct ipr_hostrcb *hostrcb)
1644{
Wayne Boyer4565e372010-02-19 13:24:07 -08001645 struct ipr_hostrcb_type_12_error *error;
1646
1647 if (ioa_cfg->sis64)
1648 error = &hostrcb->hcam.u.error64.u.type_12_error;
1649 else
1650 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001651
1652 ipr_err("-----Current Configuration-----\n");
1653 ipr_err("Cache Directory Card Information:\n");
1654 ipr_log_ext_vpd(&error->ioa_vpd);
1655 ipr_err("Adapter Card Information:\n");
1656 ipr_log_ext_vpd(&error->cfc_vpd);
1657
1658 ipr_err("-----Expected Configuration-----\n");
1659 ipr_err("Cache Directory Card Information:\n");
1660 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1661 ipr_err("Adapter Card Information:\n");
1662 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1663
1664 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1665 be32_to_cpu(error->ioa_data[0]),
1666 be32_to_cpu(error->ioa_data[1]),
1667 be32_to_cpu(error->ioa_data[2]));
1668}
1669
1670/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 * ipr_log_cache_error - Log a cache error.
1672 * @ioa_cfg: ioa config struct
1673 * @hostrcb: hostrcb struct
1674 *
1675 * Return value:
1676 * none
1677 **/
1678static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1679 struct ipr_hostrcb *hostrcb)
1680{
1681 struct ipr_hostrcb_type_02_error *error =
1682 &hostrcb->hcam.u.error.u.type_02_error;
1683
1684 ipr_err("-----Current Configuration-----\n");
1685 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001686 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001688 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
1690 ipr_err("-----Expected Configuration-----\n");
1691 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001692 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001694 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
1696 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1697 be32_to_cpu(error->ioa_data[0]),
1698 be32_to_cpu(error->ioa_data[1]),
1699 be32_to_cpu(error->ioa_data[2]));
1700}
1701
1702/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001703 * ipr_log_enhanced_config_error - Log a configuration error.
1704 * @ioa_cfg: ioa config struct
1705 * @hostrcb: hostrcb struct
1706 *
1707 * Return value:
1708 * none
1709 **/
1710static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1711 struct ipr_hostrcb *hostrcb)
1712{
1713 int errors_logged, i;
1714 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1715 struct ipr_hostrcb_type_13_error *error;
1716
1717 error = &hostrcb->hcam.u.error.u.type_13_error;
1718 errors_logged = be32_to_cpu(error->errors_logged);
1719
1720 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721 be32_to_cpu(error->errors_detected), errors_logged);
1722
1723 dev_entry = error->dev;
1724
1725 for (i = 0; i < errors_logged; i++, dev_entry++) {
1726 ipr_err_separator;
1727
1728 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1729 ipr_log_ext_vpd(&dev_entry->vpd);
1730
1731 ipr_err("-----New Device Information-----\n");
1732 ipr_log_ext_vpd(&dev_entry->new_vpd);
1733
1734 ipr_err("Cache Directory Card Information:\n");
1735 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1736
1737 ipr_err("Adapter Card Information:\n");
1738 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1739 }
1740}
1741
1742/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001743 * ipr_log_sis64_config_error - Log a device error.
1744 * @ioa_cfg: ioa config struct
1745 * @hostrcb: hostrcb struct
1746 *
1747 * Return value:
1748 * none
1749 **/
1750static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1751 struct ipr_hostrcb *hostrcb)
1752{
1753 int errors_logged, i;
1754 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1755 struct ipr_hostrcb_type_23_error *error;
1756 char buffer[IPR_MAX_RES_PATH_LENGTH];
1757
1758 error = &hostrcb->hcam.u.error64.u.type_23_error;
1759 errors_logged = be32_to_cpu(error->errors_logged);
1760
1761 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1762 be32_to_cpu(error->errors_detected), errors_logged);
1763
1764 dev_entry = error->dev;
1765
1766 for (i = 0; i < errors_logged; i++, dev_entry++) {
1767 ipr_err_separator;
1768
1769 ipr_err("Device %d : %s", i + 1,
Brian Kingb3b3b402013-01-11 17:43:49 -06001770 __ipr_format_res_path(dev_entry->res_path,
1771 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08001772 ipr_log_ext_vpd(&dev_entry->vpd);
1773
1774 ipr_err("-----New Device Information-----\n");
1775 ipr_log_ext_vpd(&dev_entry->new_vpd);
1776
1777 ipr_err("Cache Directory Card Information:\n");
1778 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1779
1780 ipr_err("Adapter Card Information:\n");
1781 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1782 }
1783}
1784
1785/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 * ipr_log_config_error - Log a configuration error.
1787 * @ioa_cfg: ioa config struct
1788 * @hostrcb: hostrcb struct
1789 *
1790 * Return value:
1791 * none
1792 **/
1793static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1794 struct ipr_hostrcb *hostrcb)
1795{
1796 int errors_logged, i;
1797 struct ipr_hostrcb_device_data_entry *dev_entry;
1798 struct ipr_hostrcb_type_03_error *error;
1799
1800 error = &hostrcb->hcam.u.error.u.type_03_error;
1801 errors_logged = be32_to_cpu(error->errors_logged);
1802
1803 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1804 be32_to_cpu(error->errors_detected), errors_logged);
1805
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001806 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
1808 for (i = 0; i < errors_logged; i++, dev_entry++) {
1809 ipr_err_separator;
1810
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001811 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001812 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
1814 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001815 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
1817 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001818 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
1820 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001821 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
1823 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1824 be32_to_cpu(dev_entry->ioa_data[0]),
1825 be32_to_cpu(dev_entry->ioa_data[1]),
1826 be32_to_cpu(dev_entry->ioa_data[2]),
1827 be32_to_cpu(dev_entry->ioa_data[3]),
1828 be32_to_cpu(dev_entry->ioa_data[4]));
1829 }
1830}
1831
1832/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001833 * ipr_log_enhanced_array_error - Log an array configuration error.
1834 * @ioa_cfg: ioa config struct
1835 * @hostrcb: hostrcb struct
1836 *
1837 * Return value:
1838 * none
1839 **/
1840static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1841 struct ipr_hostrcb *hostrcb)
1842{
1843 int i, num_entries;
1844 struct ipr_hostrcb_type_14_error *error;
1845 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1846 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1847
1848 error = &hostrcb->hcam.u.error.u.type_14_error;
1849
1850 ipr_err_separator;
1851
1852 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853 error->protection_level,
1854 ioa_cfg->host->host_no,
1855 error->last_func_vset_res_addr.bus,
1856 error->last_func_vset_res_addr.target,
1857 error->last_func_vset_res_addr.lun);
1858
1859 ipr_err_separator;
1860
1861 array_entry = error->array_member;
1862 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
Wayne Boyer72620262010-09-27 10:45:28 -07001863 ARRAY_SIZE(error->array_member));
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001864
1865 for (i = 0; i < num_entries; i++, array_entry++) {
1866 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1867 continue;
1868
1869 if (be32_to_cpu(error->exposed_mode_adn) == i)
1870 ipr_err("Exposed Array Member %d:\n", i);
1871 else
1872 ipr_err("Array Member %d:\n", i);
1873
1874 ipr_log_ext_vpd(&array_entry->vpd);
1875 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1876 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1877 "Expected Location");
1878
1879 ipr_err_separator;
1880 }
1881}
1882
1883/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 * ipr_log_array_error - Log an array configuration error.
1885 * @ioa_cfg: ioa config struct
1886 * @hostrcb: hostrcb struct
1887 *
1888 * Return value:
1889 * none
1890 **/
1891static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1892 struct ipr_hostrcb *hostrcb)
1893{
1894 int i;
1895 struct ipr_hostrcb_type_04_error *error;
1896 struct ipr_hostrcb_array_data_entry *array_entry;
1897 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1898
1899 error = &hostrcb->hcam.u.error.u.type_04_error;
1900
1901 ipr_err_separator;
1902
1903 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1904 error->protection_level,
1905 ioa_cfg->host->host_no,
1906 error->last_func_vset_res_addr.bus,
1907 error->last_func_vset_res_addr.target,
1908 error->last_func_vset_res_addr.lun);
1909
1910 ipr_err_separator;
1911
1912 array_entry = error->array_member;
1913
1914 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001915 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 continue;
1917
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001918 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001920 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001923 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001925 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1926 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1927 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928
1929 ipr_err_separator;
1930
1931 if (i == 9)
1932 array_entry = error->array_member2;
1933 else
1934 array_entry++;
1935 }
1936}
1937
1938/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001939 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001940 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001941 * @data: IOA error data
1942 * @len: data length
1943 *
1944 * Return value:
1945 * none
1946 **/
Brian King359d96e2015-06-11 20:45:20 -05001947static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001948{
1949 int i;
1950
1951 if (len == 0)
1952 return;
1953
Brian Kingac719ab2006-11-21 10:28:42 -06001954 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1955 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1956
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001957 for (i = 0; i < len / 4; i += 4) {
1958 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1959 be32_to_cpu(data[i]),
1960 be32_to_cpu(data[i+1]),
1961 be32_to_cpu(data[i+2]),
1962 be32_to_cpu(data[i+3]));
1963 }
1964}
1965
1966/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001967 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1968 * @ioa_cfg: ioa config struct
1969 * @hostrcb: hostrcb struct
1970 *
1971 * Return value:
1972 * none
1973 **/
1974static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1975 struct ipr_hostrcb *hostrcb)
1976{
1977 struct ipr_hostrcb_type_17_error *error;
1978
Wayne Boyer4565e372010-02-19 13:24:07 -08001979 if (ioa_cfg->sis64)
1980 error = &hostrcb->hcam.u.error64.u.type_17_error;
1981 else
1982 error = &hostrcb->hcam.u.error.u.type_17_error;
1983
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001984 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001985 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001986
Brian King8cf093e2007-04-26 16:00:14 -05001987 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1988 be32_to_cpu(hostrcb->hcam.u.error.prc));
1989 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001990 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001991 be32_to_cpu(hostrcb->hcam.length) -
1992 (offsetof(struct ipr_hostrcb_error, u) +
1993 offsetof(struct ipr_hostrcb_type_17_error, data)));
1994}
1995
1996/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001997 * ipr_log_dual_ioa_error - Log a dual adapter error.
1998 * @ioa_cfg: ioa config struct
1999 * @hostrcb: hostrcb struct
2000 *
2001 * Return value:
2002 * none
2003 **/
2004static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2005 struct ipr_hostrcb *hostrcb)
2006{
2007 struct ipr_hostrcb_type_07_error *error;
2008
2009 error = &hostrcb->hcam.u.error.u.type_07_error;
2010 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08002011 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002012
Brian King8cf093e2007-04-26 16:00:14 -05002013 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2014 be32_to_cpu(hostrcb->hcam.u.error.prc));
2015 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06002016 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002017 be32_to_cpu(hostrcb->hcam.length) -
2018 (offsetof(struct ipr_hostrcb_error, u) +
2019 offsetof(struct ipr_hostrcb_type_07_error, data)));
2020}
2021
Brian King49dc6a12006-11-21 10:28:35 -06002022static const struct {
2023 u8 active;
2024 char *desc;
2025} path_active_desc[] = {
2026 { IPR_PATH_NO_INFO, "Path" },
2027 { IPR_PATH_ACTIVE, "Active path" },
2028 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2029};
2030
2031static const struct {
2032 u8 state;
2033 char *desc;
2034} path_state_desc[] = {
2035 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2036 { IPR_PATH_HEALTHY, "is healthy" },
2037 { IPR_PATH_DEGRADED, "is degraded" },
2038 { IPR_PATH_FAILED, "is failed" }
2039};
2040
2041/**
2042 * ipr_log_fabric_path - Log a fabric path error
2043 * @hostrcb: hostrcb struct
2044 * @fabric: fabric descriptor
2045 *
2046 * Return value:
2047 * none
2048 **/
2049static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2050 struct ipr_hostrcb_fabric_desc *fabric)
2051{
2052 int i, j;
2053 u8 path_state = fabric->path_state;
2054 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2055 u8 state = path_state & IPR_PATH_STATE_MASK;
2056
2057 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2058 if (path_active_desc[i].active != active)
2059 continue;
2060
2061 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2062 if (path_state_desc[j].state != state)
2063 continue;
2064
2065 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2066 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2067 path_active_desc[i].desc, path_state_desc[j].desc,
2068 fabric->ioa_port);
2069 } else if (fabric->cascaded_expander == 0xff) {
2070 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2071 path_active_desc[i].desc, path_state_desc[j].desc,
2072 fabric->ioa_port, fabric->phy);
2073 } else if (fabric->phy == 0xff) {
2074 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2075 path_active_desc[i].desc, path_state_desc[j].desc,
2076 fabric->ioa_port, fabric->cascaded_expander);
2077 } else {
2078 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2079 path_active_desc[i].desc, path_state_desc[j].desc,
2080 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2081 }
2082 return;
2083 }
2084 }
2085
2086 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2087 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2088}
2089
Wayne Boyer4565e372010-02-19 13:24:07 -08002090/**
2091 * ipr_log64_fabric_path - Log a fabric path error
2092 * @hostrcb: hostrcb struct
2093 * @fabric: fabric descriptor
2094 *
2095 * Return value:
2096 * none
2097 **/
2098static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2099 struct ipr_hostrcb64_fabric_desc *fabric)
2100{
2101 int i, j;
2102 u8 path_state = fabric->path_state;
2103 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2104 u8 state = path_state & IPR_PATH_STATE_MASK;
2105 char buffer[IPR_MAX_RES_PATH_LENGTH];
2106
2107 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2108 if (path_active_desc[i].active != active)
2109 continue;
2110
2111 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2112 if (path_state_desc[j].state != state)
2113 continue;
2114
2115 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2116 path_active_desc[i].desc, path_state_desc[j].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002117 ipr_format_res_path(hostrcb->ioa_cfg,
2118 fabric->res_path,
2119 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002120 return;
2121 }
2122 }
2123
2124 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
Brian Kingb3b3b402013-01-11 17:43:49 -06002125 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2126 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002127}
2128
Brian King49dc6a12006-11-21 10:28:35 -06002129static const struct {
2130 u8 type;
2131 char *desc;
2132} path_type_desc[] = {
2133 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2134 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2135 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2136 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2137};
2138
2139static const struct {
2140 u8 status;
2141 char *desc;
2142} path_status_desc[] = {
2143 { IPR_PATH_CFG_NO_PROB, "Functional" },
2144 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2145 { IPR_PATH_CFG_FAILED, "Failed" },
2146 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2147 { IPR_PATH_NOT_DETECTED, "Missing" },
2148 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2149};
2150
2151static const char *link_rate[] = {
2152 "unknown",
2153 "disabled",
2154 "phy reset problem",
2155 "spinup hold",
2156 "port selector",
2157 "unknown",
2158 "unknown",
2159 "unknown",
2160 "1.5Gbps",
2161 "3.0Gbps",
2162 "unknown",
2163 "unknown",
2164 "unknown",
2165 "unknown",
2166 "unknown",
2167 "unknown"
2168};
2169
2170/**
2171 * ipr_log_path_elem - Log a fabric path element.
2172 * @hostrcb: hostrcb struct
2173 * @cfg: fabric path element struct
2174 *
2175 * Return value:
2176 * none
2177 **/
2178static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2179 struct ipr_hostrcb_config_element *cfg)
2180{
2181 int i, j;
2182 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2183 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2184
2185 if (type == IPR_PATH_CFG_NOT_EXIST)
2186 return;
2187
2188 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2189 if (path_type_desc[i].type != type)
2190 continue;
2191
2192 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2193 if (path_status_desc[j].status != status)
2194 continue;
2195
2196 if (type == IPR_PATH_CFG_IOA_PORT) {
2197 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2198 path_status_desc[j].desc, path_type_desc[i].desc,
2199 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2200 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2201 } else {
2202 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2203 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2204 path_status_desc[j].desc, path_type_desc[i].desc,
2205 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2206 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2207 } else if (cfg->cascaded_expander == 0xff) {
2208 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2209 "WWN=%08X%08X\n", path_status_desc[j].desc,
2210 path_type_desc[i].desc, cfg->phy,
2211 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2212 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2213 } else if (cfg->phy == 0xff) {
2214 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2215 "WWN=%08X%08X\n", path_status_desc[j].desc,
2216 path_type_desc[i].desc, cfg->cascaded_expander,
2217 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2218 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2219 } else {
2220 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2221 "WWN=%08X%08X\n", path_status_desc[j].desc,
2222 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2223 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2224 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2225 }
2226 }
2227 return;
2228 }
2229 }
2230
2231 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2232 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2233 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2234 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2235}
2236
2237/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002238 * ipr_log64_path_elem - Log a fabric path element.
2239 * @hostrcb: hostrcb struct
2240 * @cfg: fabric path element struct
2241 *
2242 * Return value:
2243 * none
2244 **/
2245static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2246 struct ipr_hostrcb64_config_element *cfg)
2247{
2248 int i, j;
2249 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2250 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2251 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2252 char buffer[IPR_MAX_RES_PATH_LENGTH];
2253
2254 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2255 return;
2256
2257 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2258 if (path_type_desc[i].type != type)
2259 continue;
2260
2261 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2262 if (path_status_desc[j].status != status)
2263 continue;
2264
2265 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2266 path_status_desc[j].desc, path_type_desc[i].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002267 ipr_format_res_path(hostrcb->ioa_cfg,
2268 cfg->res_path, buffer, sizeof(buffer)),
2269 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2270 be32_to_cpu(cfg->wwid[0]),
2271 be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002272 return;
2273 }
2274 }
2275 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2276 "WWN=%08X%08X\n", cfg->type_status,
Brian Kingb3b3b402013-01-11 17:43:49 -06002277 ipr_format_res_path(hostrcb->ioa_cfg,
2278 cfg->res_path, buffer, sizeof(buffer)),
2279 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2280 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002281}
2282
2283/**
Brian King49dc6a12006-11-21 10:28:35 -06002284 * ipr_log_fabric_error - Log a fabric error.
2285 * @ioa_cfg: ioa config struct
2286 * @hostrcb: hostrcb struct
2287 *
2288 * Return value:
2289 * none
2290 **/
2291static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2292 struct ipr_hostrcb *hostrcb)
2293{
2294 struct ipr_hostrcb_type_20_error *error;
2295 struct ipr_hostrcb_fabric_desc *fabric;
2296 struct ipr_hostrcb_config_element *cfg;
2297 int i, add_len;
2298
2299 error = &hostrcb->hcam.u.error.u.type_20_error;
2300 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2301 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2302
2303 add_len = be32_to_cpu(hostrcb->hcam.length) -
2304 (offsetof(struct ipr_hostrcb_error, u) +
2305 offsetof(struct ipr_hostrcb_type_20_error, desc));
2306
2307 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2308 ipr_log_fabric_path(hostrcb, fabric);
2309 for_each_fabric_cfg(fabric, cfg)
2310 ipr_log_path_elem(hostrcb, cfg);
2311
2312 add_len -= be16_to_cpu(fabric->length);
2313 fabric = (struct ipr_hostrcb_fabric_desc *)
2314 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2315 }
2316
Brian King359d96e2015-06-11 20:45:20 -05002317 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002318}
2319
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002320/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002321 * ipr_log_sis64_array_error - Log a sis64 array error.
2322 * @ioa_cfg: ioa config struct
2323 * @hostrcb: hostrcb struct
2324 *
2325 * Return value:
2326 * none
2327 **/
2328static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2329 struct ipr_hostrcb *hostrcb)
2330{
2331 int i, num_entries;
2332 struct ipr_hostrcb_type_24_error *error;
2333 struct ipr_hostrcb64_array_data_entry *array_entry;
2334 char buffer[IPR_MAX_RES_PATH_LENGTH];
2335 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2336
2337 error = &hostrcb->hcam.u.error64.u.type_24_error;
2338
2339 ipr_err_separator;
2340
2341 ipr_err("RAID %s Array Configuration: %s\n",
2342 error->protection_level,
Brian Kingb3b3b402013-01-11 17:43:49 -06002343 ipr_format_res_path(ioa_cfg, error->last_res_path,
2344 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002345
2346 ipr_err_separator;
2347
2348 array_entry = error->array_member;
Wayne Boyer72620262010-09-27 10:45:28 -07002349 num_entries = min_t(u32, error->num_entries,
2350 ARRAY_SIZE(error->array_member));
Wayne Boyer4565e372010-02-19 13:24:07 -08002351
2352 for (i = 0; i < num_entries; i++, array_entry++) {
2353
2354 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2355 continue;
2356
2357 if (error->exposed_mode_adn == i)
2358 ipr_err("Exposed Array Member %d:\n", i);
2359 else
2360 ipr_err("Array Member %d:\n", i);
2361
2362 ipr_err("Array Member %d:\n", i);
2363 ipr_log_ext_vpd(&array_entry->vpd);
Wayne Boyer72620262010-09-27 10:45:28 -07002364 ipr_err("Current Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002365 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2366 buffer, sizeof(buffer)));
Wayne Boyer72620262010-09-27 10:45:28 -07002367 ipr_err("Expected Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002368 ipr_format_res_path(ioa_cfg,
2369 array_entry->expected_res_path,
2370 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002371
2372 ipr_err_separator;
2373 }
2374}
2375
2376/**
2377 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378 * @ioa_cfg: ioa config struct
2379 * @hostrcb: hostrcb struct
2380 *
2381 * Return value:
2382 * none
2383 **/
2384static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2385 struct ipr_hostrcb *hostrcb)
2386{
2387 struct ipr_hostrcb_type_30_error *error;
2388 struct ipr_hostrcb64_fabric_desc *fabric;
2389 struct ipr_hostrcb64_config_element *cfg;
2390 int i, add_len;
2391
2392 error = &hostrcb->hcam.u.error64.u.type_30_error;
2393
2394 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2395 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2396
2397 add_len = be32_to_cpu(hostrcb->hcam.length) -
2398 (offsetof(struct ipr_hostrcb64_error, u) +
2399 offsetof(struct ipr_hostrcb_type_30_error, desc));
2400
2401 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2402 ipr_log64_fabric_path(hostrcb, fabric);
2403 for_each_fabric_cfg(fabric, cfg)
2404 ipr_log64_path_elem(hostrcb, cfg);
2405
2406 add_len -= be16_to_cpu(fabric->length);
2407 fabric = (struct ipr_hostrcb64_fabric_desc *)
2408 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2409 }
2410
Brian King359d96e2015-06-11 20:45:20 -05002411 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
Wayne Boyer4565e372010-02-19 13:24:07 -08002412}
2413
2414/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 * ipr_log_generic_error - Log an adapter error.
2416 * @ioa_cfg: ioa config struct
2417 * @hostrcb: hostrcb struct
2418 *
2419 * Return value:
2420 * none
2421 **/
2422static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2423 struct ipr_hostrcb *hostrcb)
2424{
Brian Kingac719ab2006-11-21 10:28:42 -06002425 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002426 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427}
2428
2429/**
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002430 * ipr_log_sis64_device_error - Log a cache error.
2431 * @ioa_cfg: ioa config struct
2432 * @hostrcb: hostrcb struct
2433 *
2434 * Return value:
2435 * none
2436 **/
2437static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2438 struct ipr_hostrcb *hostrcb)
2439{
2440 struct ipr_hostrcb_type_21_error *error;
2441 char buffer[IPR_MAX_RES_PATH_LENGTH];
2442
2443 error = &hostrcb->hcam.u.error64.u.type_21_error;
2444
2445 ipr_err("-----Failing Device Information-----\n");
2446 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2447 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2448 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2449 ipr_err("Device Resource Path: %s\n",
2450 __ipr_format_res_path(error->res_path,
2451 buffer, sizeof(buffer)));
2452 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2453 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2454 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2455 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2456 ipr_err("SCSI Sense Data:\n");
2457 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2458 ipr_err("SCSI Command Descriptor Block: \n");
2459 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2460
2461 ipr_err("Additional IOA Data:\n");
2462 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2463}
2464
2465/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2467 * @ioasc: IOASC
2468 *
2469 * This function will return the index of into the ipr_error_table
2470 * for the specified IOASC. If the IOASC is not in the table,
2471 * 0 will be returned, which points to the entry used for unknown errors.
2472 *
2473 * Return value:
2474 * index into the ipr_error_table
2475 **/
2476static u32 ipr_get_error(u32 ioasc)
2477{
2478 int i;
2479
2480 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002481 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 return i;
2483
2484 return 0;
2485}
2486
2487/**
2488 * ipr_handle_log_data - Log an adapter error.
2489 * @ioa_cfg: ioa config struct
2490 * @hostrcb: hostrcb struct
2491 *
2492 * This function logs an adapter error to the system.
2493 *
2494 * Return value:
2495 * none
2496 **/
2497static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2498 struct ipr_hostrcb *hostrcb)
2499{
2500 u32 ioasc;
2501 int error_index;
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002502 struct ipr_hostrcb_type_21_error *error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503
2504 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2505 return;
2506
2507 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2508 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2509
Wayne Boyer4565e372010-02-19 13:24:07 -08002510 if (ioa_cfg->sis64)
2511 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2512 else
2513 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514
Wayne Boyer4565e372010-02-19 13:24:07 -08002515 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2516 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2518 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002519 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 }
2521
2522 error_index = ipr_get_error(ioasc);
2523
2524 if (!ipr_error_table[error_index].log_hcam)
2525 return;
2526
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002527 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2528 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2529 error = &hostrcb->hcam.u.error64.u.type_21_error;
2530
2531 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2532 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2533 return;
2534 }
2535
Brian King49dc6a12006-11-21 10:28:35 -06002536 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537
2538 /* Set indication we have logged an error */
2539 ioa_cfg->errors_logged++;
2540
Brian King933916f2007-03-29 12:43:30 -05002541 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002543 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2544 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545
2546 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 case IPR_HOST_RCB_OVERLAY_ID_2:
2548 ipr_log_cache_error(ioa_cfg, hostrcb);
2549 break;
2550 case IPR_HOST_RCB_OVERLAY_ID_3:
2551 ipr_log_config_error(ioa_cfg, hostrcb);
2552 break;
2553 case IPR_HOST_RCB_OVERLAY_ID_4:
2554 case IPR_HOST_RCB_OVERLAY_ID_6:
2555 ipr_log_array_error(ioa_cfg, hostrcb);
2556 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002557 case IPR_HOST_RCB_OVERLAY_ID_7:
2558 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2559 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002560 case IPR_HOST_RCB_OVERLAY_ID_12:
2561 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2562 break;
2563 case IPR_HOST_RCB_OVERLAY_ID_13:
2564 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2565 break;
2566 case IPR_HOST_RCB_OVERLAY_ID_14:
2567 case IPR_HOST_RCB_OVERLAY_ID_16:
2568 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2569 break;
2570 case IPR_HOST_RCB_OVERLAY_ID_17:
2571 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2572 break;
Brian King49dc6a12006-11-21 10:28:35 -06002573 case IPR_HOST_RCB_OVERLAY_ID_20:
2574 ipr_log_fabric_error(ioa_cfg, hostrcb);
2575 break;
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002576 case IPR_HOST_RCB_OVERLAY_ID_21:
2577 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2578 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002579 case IPR_HOST_RCB_OVERLAY_ID_23:
2580 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2581 break;
2582 case IPR_HOST_RCB_OVERLAY_ID_24:
2583 case IPR_HOST_RCB_OVERLAY_ID_26:
2584 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2585 break;
2586 case IPR_HOST_RCB_OVERLAY_ID_30:
2587 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2588 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002589 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002592 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 break;
2594 }
2595}
2596
Brian Kingafc3f832016-08-24 12:56:51 -05002597static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2598{
2599 struct ipr_hostrcb *hostrcb;
2600
2601 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2602 struct ipr_hostrcb, queue);
2603
2604 if (unlikely(!hostrcb)) {
2605 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2606 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2607 struct ipr_hostrcb, queue);
2608 }
2609
2610 list_del_init(&hostrcb->queue);
2611 return hostrcb;
2612}
2613
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614/**
2615 * ipr_process_error - Op done function for an adapter error log.
2616 * @ipr_cmd: ipr command struct
2617 *
2618 * This function is the op done function for an error log host
2619 * controlled async from the adapter. It will log the error and
2620 * send the HCAM back to the adapter.
2621 *
2622 * Return value:
2623 * none
2624 **/
2625static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2626{
2627 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2628 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07002629 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002630 u32 fd_ioasc;
2631
2632 if (ioa_cfg->sis64)
2633 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2634 else
2635 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636
Brian Kingafc3f832016-08-24 12:56:51 -05002637 list_del_init(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06002638 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639
2640 if (!ioasc) {
2641 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002642 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2643 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Brian King4fdd7c72015-03-26 11:23:50 -05002644 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2645 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 dev_err(&ioa_cfg->pdev->dev,
2647 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2648 }
2649
Brian Kingafc3f832016-08-24 12:56:51 -05002650 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
Brian King8a4236a2016-10-13 14:45:24 -05002651 schedule_work(&ioa_cfg->work_q);
Brian Kingafc3f832016-08-24 12:56:51 -05002652 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
Brian Kingafc3f832016-08-24 12:56:51 -05002653
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2655}
2656
2657/**
2658 * ipr_timeout - An internally generated op has timed out.
2659 * @ipr_cmd: ipr command struct
2660 *
2661 * This function blocks host requests and initiates an
2662 * adapter reset.
2663 *
2664 * Return value:
2665 * none
2666 **/
2667static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2668{
2669 unsigned long lock_flags = 0;
2670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2671
2672 ENTER;
2673 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2674
2675 ioa_cfg->errors_logged++;
2676 dev_err(&ioa_cfg->pdev->dev,
2677 "Adapter being reset due to command timeout.\n");
2678
2679 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2680 ioa_cfg->sdt_state = GET_DUMP;
2681
2682 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2683 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2684
2685 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2686 LEAVE;
2687}
2688
2689/**
2690 * ipr_oper_timeout - Adapter timed out transitioning to operational
2691 * @ipr_cmd: ipr command struct
2692 *
2693 * This function blocks host requests and initiates an
2694 * adapter reset.
2695 *
2696 * Return value:
2697 * none
2698 **/
2699static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2700{
2701 unsigned long lock_flags = 0;
2702 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2703
2704 ENTER;
2705 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2706
2707 ioa_cfg->errors_logged++;
2708 dev_err(&ioa_cfg->pdev->dev,
2709 "Adapter timed out transitioning to operational.\n");
2710
2711 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2712 ioa_cfg->sdt_state = GET_DUMP;
2713
2714 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2715 if (ipr_fastfail)
2716 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2717 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2718 }
2719
2720 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2721 LEAVE;
2722}
2723
2724/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 * ipr_find_ses_entry - Find matching SES in SES table
2726 * @res: resource entry struct of SES
2727 *
2728 * Return value:
2729 * pointer to SES table entry / NULL on failure
2730 **/
2731static const struct ipr_ses_table_entry *
2732ipr_find_ses_entry(struct ipr_resource_entry *res)
2733{
2734 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002735 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2737
2738 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2739 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2740 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002741 vpids = &res->std_inq_data.vpids;
2742 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 matches++;
2744 else
2745 break;
2746 } else
2747 matches++;
2748 }
2749
2750 if (matches == IPR_PROD_ID_LEN)
2751 return ste;
2752 }
2753
2754 return NULL;
2755}
2756
2757/**
2758 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2759 * @ioa_cfg: ioa config struct
2760 * @bus: SCSI bus
2761 * @bus_width: bus width
2762 *
2763 * Return value:
2764 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2765 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2766 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2767 * max 160MHz = max 320MB/sec).
2768 **/
2769static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2770{
2771 struct ipr_resource_entry *res;
2772 const struct ipr_ses_table_entry *ste;
2773 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2774
2775 /* Loop through each config table entry in the config table buffer */
2776 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002777 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 continue;
2779
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002780 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 continue;
2782
2783 if (!(ste = ipr_find_ses_entry(res)))
2784 continue;
2785
2786 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2787 }
2788
2789 return max_xfer_rate;
2790}
2791
2792/**
2793 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2794 * @ioa_cfg: ioa config struct
2795 * @max_delay: max delay in micro-seconds to wait
2796 *
2797 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2798 *
2799 * Return value:
2800 * 0 on success / other on failure
2801 **/
2802static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2803{
2804 volatile u32 pcii_reg;
2805 int delay = 1;
2806
2807 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2808 while (delay < max_delay) {
2809 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2810
2811 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2812 return 0;
2813
2814 /* udelay cannot be used if delay is more than a few milliseconds */
2815 if ((delay / 1000) > MAX_UDELAY_MS)
2816 mdelay(delay / 1000);
2817 else
2818 udelay(delay);
2819
2820 delay += delay;
2821 }
2822 return -EIO;
2823}
2824
2825/**
Wayne Boyerdcbad002010-02-19 13:24:14 -08002826 * ipr_get_sis64_dump_data_section - Dump IOA memory
2827 * @ioa_cfg: ioa config struct
2828 * @start_addr: adapter address to dump
2829 * @dest: destination kernel buffer
2830 * @length_in_words: length to dump in 4 byte words
2831 *
2832 * Return value:
2833 * 0 on success
2834 **/
2835static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2836 u32 start_addr,
2837 __be32 *dest, u32 length_in_words)
2838{
2839 int i;
2840
2841 for (i = 0; i < length_in_words; i++) {
2842 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2843 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2844 dest++;
2845 }
2846
2847 return 0;
2848}
2849
2850/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 * ipr_get_ldump_data_section - Dump IOA memory
2852 * @ioa_cfg: ioa config struct
2853 * @start_addr: adapter address to dump
2854 * @dest: destination kernel buffer
2855 * @length_in_words: length to dump in 4 byte words
2856 *
2857 * Return value:
2858 * 0 on success / -EIO on failure
2859 **/
2860static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2861 u32 start_addr,
2862 __be32 *dest, u32 length_in_words)
2863{
2864 volatile u32 temp_pcii_reg;
2865 int i, delay = 0;
2866
Wayne Boyerdcbad002010-02-19 13:24:14 -08002867 if (ioa_cfg->sis64)
2868 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2869 dest, length_in_words);
2870
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 /* Write IOA interrupt reg starting LDUMP state */
2872 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
Wayne Boyer214777b2010-02-19 13:24:26 -08002873 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874
2875 /* Wait for IO debug acknowledge */
2876 if (ipr_wait_iodbg_ack(ioa_cfg,
2877 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2878 dev_err(&ioa_cfg->pdev->dev,
2879 "IOA dump long data transfer timeout\n");
2880 return -EIO;
2881 }
2882
2883 /* Signal LDUMP interlocked - clear IO debug ack */
2884 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2885 ioa_cfg->regs.clr_interrupt_reg);
2886
2887 /* Write Mailbox with starting address */
2888 writel(start_addr, ioa_cfg->ioa_mailbox);
2889
2890 /* Signal address valid - clear IOA Reset alert */
2891 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002892 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893
2894 for (i = 0; i < length_in_words; i++) {
2895 /* Wait for IO debug acknowledge */
2896 if (ipr_wait_iodbg_ack(ioa_cfg,
2897 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2898 dev_err(&ioa_cfg->pdev->dev,
2899 "IOA dump short data transfer timeout\n");
2900 return -EIO;
2901 }
2902
2903 /* Read data from mailbox and increment destination pointer */
2904 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2905 dest++;
2906
2907 /* For all but the last word of data, signal data received */
2908 if (i < (length_in_words - 1)) {
2909 /* Signal dump data received - Clear IO debug Ack */
2910 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2911 ioa_cfg->regs.clr_interrupt_reg);
2912 }
2913 }
2914
2915 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2916 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002917 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918
2919 writel(IPR_UPROCI_IO_DEBUG_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002920 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921
2922 /* Signal dump data received - Clear IO debug Ack */
2923 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2924 ioa_cfg->regs.clr_interrupt_reg);
2925
2926 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2927 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2928 temp_pcii_reg =
Wayne Boyer214777b2010-02-19 13:24:26 -08002929 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930
2931 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2932 return 0;
2933
2934 udelay(10);
2935 delay += 10;
2936 }
2937
2938 return 0;
2939}
2940
2941#ifdef CONFIG_SCSI_IPR_DUMP
2942/**
2943 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2944 * @ioa_cfg: ioa config struct
2945 * @pci_address: adapter address
2946 * @length: length of data to copy
2947 *
2948 * Copy data from PCI adapter to kernel buffer.
2949 * Note: length MUST be a 4 byte multiple
2950 * Return value:
2951 * 0 on success / other on failure
2952 **/
2953static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2954 unsigned long pci_address, u32 length)
2955{
2956 int bytes_copied = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002957 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958 __be32 *page;
2959 unsigned long lock_flags = 0;
2960 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2961
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002962 if (ioa_cfg->sis64)
2963 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2964 else
2965 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2966
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 while (bytes_copied < length &&
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002968 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 if (ioa_dump->page_offset >= PAGE_SIZE ||
2970 ioa_dump->page_offset == 0) {
2971 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2972
2973 if (!page) {
2974 ipr_trace;
2975 return bytes_copied;
2976 }
2977
2978 ioa_dump->page_offset = 0;
2979 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2980 ioa_dump->next_page_index++;
2981 } else
2982 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2983
2984 rem_len = length - bytes_copied;
2985 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2986 cur_len = min(rem_len, rem_page_len);
2987
2988 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2989 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2990 rc = -EIO;
2991 } else {
2992 rc = ipr_get_ldump_data_section(ioa_cfg,
2993 pci_address + bytes_copied,
2994 &page[ioa_dump->page_offset / 4],
2995 (cur_len / sizeof(u32)));
2996 }
2997 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2998
2999 if (!rc) {
3000 ioa_dump->page_offset += cur_len;
3001 bytes_copied += cur_len;
3002 } else {
3003 ipr_trace;
3004 break;
3005 }
3006 schedule();
3007 }
3008
3009 return bytes_copied;
3010}
3011
3012/**
3013 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3014 * @hdr: dump entry header struct
3015 *
3016 * Return value:
3017 * nothing
3018 **/
3019static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3020{
3021 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3022 hdr->num_elems = 1;
3023 hdr->offset = sizeof(*hdr);
3024 hdr->status = IPR_DUMP_STATUS_SUCCESS;
3025}
3026
3027/**
3028 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3029 * @ioa_cfg: ioa config struct
3030 * @driver_dump: driver dump struct
3031 *
3032 * Return value:
3033 * nothing
3034 **/
3035static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3036 struct ipr_driver_dump *driver_dump)
3037{
3038 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3039
3040 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3041 driver_dump->ioa_type_entry.hdr.len =
3042 sizeof(struct ipr_dump_ioa_type_entry) -
3043 sizeof(struct ipr_dump_entry_header);
3044 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3045 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3046 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3047 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3048 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3049 ucode_vpd->minor_release[1];
3050 driver_dump->hdr.num_entries++;
3051}
3052
3053/**
3054 * ipr_dump_version_data - Fill in the driver version in the dump.
3055 * @ioa_cfg: ioa config struct
3056 * @driver_dump: driver dump struct
3057 *
3058 * Return value:
3059 * nothing
3060 **/
3061static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3062 struct ipr_driver_dump *driver_dump)
3063{
3064 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3065 driver_dump->version_entry.hdr.len =
3066 sizeof(struct ipr_dump_version_entry) -
3067 sizeof(struct ipr_dump_entry_header);
3068 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3069 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3070 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3071 driver_dump->hdr.num_entries++;
3072}
3073
3074/**
3075 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3076 * @ioa_cfg: ioa config struct
3077 * @driver_dump: driver dump struct
3078 *
3079 * Return value:
3080 * nothing
3081 **/
3082static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3083 struct ipr_driver_dump *driver_dump)
3084{
3085 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3086 driver_dump->trace_entry.hdr.len =
3087 sizeof(struct ipr_dump_trace_entry) -
3088 sizeof(struct ipr_dump_entry_header);
3089 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3090 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3091 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3092 driver_dump->hdr.num_entries++;
3093}
3094
3095/**
3096 * ipr_dump_location_data - Fill in the IOA location in the dump.
3097 * @ioa_cfg: ioa config struct
3098 * @driver_dump: driver dump struct
3099 *
3100 * Return value:
3101 * nothing
3102 **/
3103static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3104 struct ipr_driver_dump *driver_dump)
3105{
3106 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3107 driver_dump->location_entry.hdr.len =
3108 sizeof(struct ipr_dump_location_entry) -
3109 sizeof(struct ipr_dump_entry_header);
3110 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3111 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01003112 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113 driver_dump->hdr.num_entries++;
3114}
3115
3116/**
3117 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3118 * @ioa_cfg: ioa config struct
3119 * @dump: dump struct
3120 *
3121 * Return value:
3122 * nothing
3123 **/
3124static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3125{
3126 unsigned long start_addr, sdt_word;
3127 unsigned long lock_flags = 0;
3128 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3129 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003130 u32 num_entries, max_num_entries, start_off, end_off;
3131 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132 struct ipr_sdt *sdt;
Wayne Boyerdcbad002010-02-19 13:24:14 -08003133 int valid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134 int i;
3135
3136 ENTER;
3137
3138 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3139
Brian King41e9a692011-09-21 08:51:11 -05003140 if (ioa_cfg->sdt_state != READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3142 return;
3143 }
3144
Wayne Boyer110def82010-11-04 09:36:16 -07003145 if (ioa_cfg->sis64) {
3146 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3147 ssleep(IPR_DUMP_DELAY_SECONDS);
3148 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3149 }
3150
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 start_addr = readl(ioa_cfg->ioa_mailbox);
3152
Wayne Boyerdcbad002010-02-19 13:24:14 -08003153 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 dev_err(&ioa_cfg->pdev->dev,
3155 "Invalid dump table format: %lx\n", start_addr);
3156 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3157 return;
3158 }
3159
3160 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3161
3162 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3163
3164 /* Initialize the overall dump header */
3165 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3166 driver_dump->hdr.num_entries = 1;
3167 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3168 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3169 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3170 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3171
3172 ipr_dump_version_data(ioa_cfg, driver_dump);
3173 ipr_dump_location_data(ioa_cfg, driver_dump);
3174 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3175 ipr_dump_trace_data(ioa_cfg, driver_dump);
3176
3177 /* Update dump_header */
3178 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3179
3180 /* IOA Dump entry */
3181 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182 ioa_dump->hdr.len = 0;
3183 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3184 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3185
3186 /* First entries in sdt are actually a list of dump addresses and
3187 lengths to gather the real dump data. sdt represents the pointer
3188 to the ioa generated dump table. Dump data will be extracted based
3189 on entries in this table */
3190 sdt = &ioa_dump->sdt;
3191
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003192 if (ioa_cfg->sis64) {
3193 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3194 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3195 } else {
3196 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3197 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3198 }
3199
3200 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3201 (max_num_entries * sizeof(struct ipr_sdt_entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003203 bytes_to_copy / sizeof(__be32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204
3205 /* Smart Dump table is ready to use and the first entry is valid */
Wayne Boyerdcbad002010-02-19 13:24:14 -08003206 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3207 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 dev_err(&ioa_cfg->pdev->dev,
3209 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3210 rc, be32_to_cpu(sdt->hdr.state));
3211 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3212 ioa_cfg->sdt_state = DUMP_OBTAINED;
3213 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3214 return;
3215 }
3216
3217 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3218
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003219 if (num_entries > max_num_entries)
3220 num_entries = max_num_entries;
3221
3222 /* Update dump length to the actual data to be copied */
3223 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3224 if (ioa_cfg->sis64)
3225 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3226 else
3227 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228
3229 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3230
3231 for (i = 0; i < num_entries; i++) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003232 if (ioa_dump->hdr.len > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3234 break;
3235 }
3236
3237 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
Wayne Boyerdcbad002010-02-19 13:24:14 -08003238 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3239 if (ioa_cfg->sis64)
3240 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3241 else {
3242 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3243 end_off = be32_to_cpu(sdt->entry[i].end_token);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244
Wayne Boyerdcbad002010-02-19 13:24:14 -08003245 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3246 bytes_to_copy = end_off - start_off;
3247 else
3248 valid = 0;
3249 }
3250 if (valid) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003251 if (bytes_to_copy > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3253 continue;
3254 }
3255
3256 /* Copy data from adapter to driver buffers */
3257 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3258 bytes_to_copy);
3259
3260 ioa_dump->hdr.len += bytes_copied;
3261
3262 if (bytes_copied != bytes_to_copy) {
3263 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3264 break;
3265 }
3266 }
3267 }
3268 }
3269
3270 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3271
3272 /* Update dump_header */
3273 driver_dump->hdr.len += ioa_dump->hdr.len;
3274 wmb();
3275 ioa_cfg->sdt_state = DUMP_OBTAINED;
3276 LEAVE;
3277}
3278
3279#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003280#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281#endif
3282
3283/**
3284 * ipr_release_dump - Free adapter dump memory
3285 * @kref: kref struct
3286 *
3287 * Return value:
3288 * nothing
3289 **/
3290static void ipr_release_dump(struct kref *kref)
3291{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003292 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3294 unsigned long lock_flags = 0;
3295 int i;
3296
3297 ENTER;
3298 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3299 ioa_cfg->dump = NULL;
3300 ioa_cfg->sdt_state = INACTIVE;
3301 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3302
3303 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3304 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3305
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003306 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 kfree(dump);
3308 LEAVE;
3309}
3310
3311/**
3312 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00003313 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314 *
3315 * Called at task level from a work thread. This function takes care
3316 * of adding and removing device from the mid-layer as configuration
3317 * changes are detected by the adapter.
3318 *
3319 * Return value:
3320 * nothing
3321 **/
David Howellsc4028952006-11-22 14:57:56 +00003322static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323{
3324 unsigned long lock_flags;
3325 struct ipr_resource_entry *res;
3326 struct scsi_device *sdev;
3327 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00003328 struct ipr_ioa_cfg *ioa_cfg =
3329 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 u8 bus, target, lun;
3331 int did_work;
3332
3333 ENTER;
3334 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3335
Brian King41e9a692011-09-21 08:51:11 -05003336 if (ioa_cfg->sdt_state == READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 dump = ioa_cfg->dump;
3338 if (!dump) {
3339 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3340 return;
3341 }
3342 kref_get(&dump->kref);
3343 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3344 ipr_get_ioa_dump(ioa_cfg, dump);
3345 kref_put(&dump->kref, ipr_release_dump);
3346
3347 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King4c647e92011-10-15 09:08:56 -05003348 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3350 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3351 return;
3352 }
3353
Brian Kingb195d5e2016-07-15 14:48:03 -05003354 if (!ioa_cfg->scan_enabled) {
3355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356 return;
3357 }
3358
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359restart:
3360 do {
3361 did_work = 0;
Brian Kingf688f962014-12-02 12:47:37 -06003362 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3364 return;
3365 }
3366
3367 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3368 if (res->del_from_ml && res->sdev) {
3369 did_work = 1;
3370 sdev = res->sdev;
3371 if (!scsi_device_get(sdev)) {
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02003372 if (!res->add_to_ml)
3373 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3374 else
3375 res->del_from_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3377 scsi_remove_device(sdev);
3378 scsi_device_put(sdev);
3379 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3380 }
3381 break;
3382 }
3383 }
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003384 } while (did_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385
3386 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3387 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003388 bus = res->bus;
3389 target = res->target;
3390 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003391 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3393 scsi_add_device(ioa_cfg->host, bus, target, lun);
3394 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3395 goto restart;
3396 }
3397 }
3398
Brian Kingf688f962014-12-02 12:47:37 -06003399 ioa_cfg->scan_done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003401 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 LEAVE;
3403}
3404
3405#ifdef CONFIG_SCSI_IPR_TRACE
3406/**
3407 * ipr_read_trace - Dump the adapter trace
Chris Wright2c3c8be2010-05-12 18:28:57 -07003408 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003410 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 * @buf: buffer
3412 * @off: offset
3413 * @count: buffer size
3414 *
3415 * Return value:
3416 * number of bytes printed to buffer
3417 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003418static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003419 struct bin_attribute *bin_attr,
3420 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421{
Tony Jonesee959b02008-02-22 00:13:36 +01003422 struct device *dev = container_of(kobj, struct device, kobj);
3423 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3425 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003426 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427
3428 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003429 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3430 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003432
3433 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434}
3435
3436static struct bin_attribute ipr_trace_attr = {
3437 .attr = {
3438 .name = "trace",
3439 .mode = S_IRUGO,
3440 },
3441 .size = 0,
3442 .read = ipr_read_trace,
3443};
3444#endif
3445
3446/**
3447 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003448 * @dev: class device struct
3449 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 *
3451 * Return value:
3452 * number of bytes printed to buffer
3453 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003454static ssize_t ipr_show_fw_version(struct device *dev,
3455 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456{
Tony Jonesee959b02008-02-22 00:13:36 +01003457 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3459 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3460 unsigned long lock_flags = 0;
3461 int len;
3462
3463 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3464 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3465 ucode_vpd->major_release, ucode_vpd->card_type,
3466 ucode_vpd->minor_release[0],
3467 ucode_vpd->minor_release[1]);
3468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3469 return len;
3470}
3471
Tony Jonesee959b02008-02-22 00:13:36 +01003472static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 .attr = {
3474 .name = "fw_version",
3475 .mode = S_IRUGO,
3476 },
3477 .show = ipr_show_fw_version,
3478};
3479
3480/**
3481 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003482 * @dev: class device struct
3483 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 *
3485 * Return value:
3486 * number of bytes printed to buffer
3487 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003488static ssize_t ipr_show_log_level(struct device *dev,
3489 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490{
Tony Jonesee959b02008-02-22 00:13:36 +01003491 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3493 unsigned long lock_flags = 0;
3494 int len;
3495
3496 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3497 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3498 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3499 return len;
3500}
3501
3502/**
3503 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003504 * @dev: class device struct
3505 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506 *
3507 * Return value:
3508 * number of bytes printed to buffer
3509 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003510static ssize_t ipr_store_log_level(struct device *dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003511 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 const char *buf, size_t count)
3513{
Tony Jonesee959b02008-02-22 00:13:36 +01003514 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3516 unsigned long lock_flags = 0;
3517
3518 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3519 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3520 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3521 return strlen(buf);
3522}
3523
Tony Jonesee959b02008-02-22 00:13:36 +01003524static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525 .attr = {
3526 .name = "log_level",
3527 .mode = S_IRUGO | S_IWUSR,
3528 },
3529 .show = ipr_show_log_level,
3530 .store = ipr_store_log_level
3531};
3532
3533/**
3534 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003535 * @dev: device struct
3536 * @buf: buffer
3537 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 *
3539 * This function will reset the adapter and wait a reasonable
3540 * amount of time for any errors that the adapter might log.
3541 *
3542 * Return value:
3543 * count on success / other on failure
3544 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003545static ssize_t ipr_store_diagnostics(struct device *dev,
3546 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547 const char *buf, size_t count)
3548{
Tony Jonesee959b02008-02-22 00:13:36 +01003549 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3551 unsigned long lock_flags = 0;
3552 int rc = count;
3553
3554 if (!capable(CAP_SYS_ADMIN))
3555 return -EACCES;
3556
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003558 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003559 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3560 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3561 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3562 }
3563
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 ioa_cfg->errors_logged = 0;
3565 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3566
3567 if (ioa_cfg->in_reset_reload) {
3568 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3569 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3570
3571 /* Wait for a second for any errors to be logged */
3572 msleep(1000);
3573 } else {
3574 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3575 return -EIO;
3576 }
3577
3578 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3579 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3580 rc = -EIO;
3581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3582
3583 return rc;
3584}
3585
Tony Jonesee959b02008-02-22 00:13:36 +01003586static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587 .attr = {
3588 .name = "run_diagnostics",
3589 .mode = S_IWUSR,
3590 },
3591 .store = ipr_store_diagnostics
3592};
3593
3594/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003595 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003596 * @class_dev: device struct
3597 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003598 *
3599 * Return value:
3600 * number of bytes printed to buffer
3601 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003602static ssize_t ipr_show_adapter_state(struct device *dev,
3603 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003604{
Tony Jonesee959b02008-02-22 00:13:36 +01003605 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003606 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3607 unsigned long lock_flags = 0;
3608 int len;
3609
3610 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003611 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003612 len = snprintf(buf, PAGE_SIZE, "offline\n");
3613 else
3614 len = snprintf(buf, PAGE_SIZE, "online\n");
3615 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3616 return len;
3617}
3618
3619/**
3620 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003621 * @dev: device struct
3622 * @buf: buffer
3623 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003624 *
3625 * This function will change the adapter's state.
3626 *
3627 * Return value:
3628 * count on success / other on failure
3629 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003630static ssize_t ipr_store_adapter_state(struct device *dev,
3631 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003632 const char *buf, size_t count)
3633{
Tony Jonesee959b02008-02-22 00:13:36 +01003634 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003635 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3636 unsigned long lock_flags;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003637 int result = count, i;
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003638
3639 if (!capable(CAP_SYS_ADMIN))
3640 return -EACCES;
3641
3642 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003643 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3644 !strncmp(buf, "online", 6)) {
3645 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3646 spin_lock(&ioa_cfg->hrrq[i]._lock);
3647 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3648 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3649 }
3650 wmb();
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003651 ioa_cfg->reset_retries = 0;
3652 ioa_cfg->in_ioa_bringdown = 0;
3653 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3654 }
3655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3656 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3657
3658 return result;
3659}
3660
Tony Jonesee959b02008-02-22 00:13:36 +01003661static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003662 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003663 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003664 .mode = S_IRUGO | S_IWUSR,
3665 },
3666 .show = ipr_show_adapter_state,
3667 .store = ipr_store_adapter_state
3668};
3669
3670/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003672 * @dev: device struct
3673 * @buf: buffer
3674 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675 *
3676 * This function will reset the adapter.
3677 *
3678 * Return value:
3679 * count on success / other on failure
3680 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003681static ssize_t ipr_store_reset_adapter(struct device *dev,
3682 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683 const char *buf, size_t count)
3684{
Tony Jonesee959b02008-02-22 00:13:36 +01003685 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3687 unsigned long lock_flags;
3688 int result = count;
3689
3690 if (!capable(CAP_SYS_ADMIN))
3691 return -EACCES;
3692
3693 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3694 if (!ioa_cfg->in_reset_reload)
3695 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3696 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3697 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3698
3699 return result;
3700}
3701
Tony Jonesee959b02008-02-22 00:13:36 +01003702static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 .attr = {
3704 .name = "reset_host",
3705 .mode = S_IWUSR,
3706 },
3707 .store = ipr_store_reset_adapter
3708};
3709
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003710static int ipr_iopoll(struct irq_poll *iop, int budget);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003711 /**
3712 * ipr_show_iopoll_weight - Show ipr polling mode
3713 * @dev: class device struct
3714 * @buf: buffer
3715 *
3716 * Return value:
3717 * number of bytes printed to buffer
3718 **/
3719static ssize_t ipr_show_iopoll_weight(struct device *dev,
3720 struct device_attribute *attr, char *buf)
3721{
3722 struct Scsi_Host *shost = class_to_shost(dev);
3723 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3724 unsigned long lock_flags = 0;
3725 int len;
3726
3727 spin_lock_irqsave(shost->host_lock, lock_flags);
3728 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3729 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3730
3731 return len;
3732}
3733
3734/**
3735 * ipr_store_iopoll_weight - Change the adapter's polling mode
3736 * @dev: class device struct
3737 * @buf: buffer
3738 *
3739 * Return value:
3740 * number of bytes printed to buffer
3741 **/
3742static ssize_t ipr_store_iopoll_weight(struct device *dev,
3743 struct device_attribute *attr,
3744 const char *buf, size_t count)
3745{
3746 struct Scsi_Host *shost = class_to_shost(dev);
3747 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3748 unsigned long user_iopoll_weight;
3749 unsigned long lock_flags = 0;
3750 int i;
3751
3752 if (!ioa_cfg->sis64) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003753 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003754 return -EINVAL;
3755 }
3756 if (kstrtoul(buf, 10, &user_iopoll_weight))
3757 return -EINVAL;
3758
3759 if (user_iopoll_weight > 256) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003760 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003761 return -EINVAL;
3762 }
3763
3764 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003765 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003766 return strlen(buf);
3767 }
3768
Jens Axboe89f8b332014-03-13 09:38:42 -06003769 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003770 for (i = 1; i < ioa_cfg->hrrq_num; i++)
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003771 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003772 }
3773
3774 spin_lock_irqsave(shost->host_lock, lock_flags);
3775 ioa_cfg->iopoll_weight = user_iopoll_weight;
Jens Axboe89f8b332014-03-13 09:38:42 -06003776 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003777 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003778 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003779 ioa_cfg->iopoll_weight, ipr_iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003780 }
3781 }
3782 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3783
3784 return strlen(buf);
3785}
3786
3787static struct device_attribute ipr_iopoll_weight_attr = {
3788 .attr = {
3789 .name = "iopoll_weight",
3790 .mode = S_IRUGO | S_IWUSR,
3791 },
3792 .show = ipr_show_iopoll_weight,
3793 .store = ipr_store_iopoll_weight
3794};
3795
Linus Torvalds1da177e2005-04-16 15:20:36 -07003796/**
3797 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3798 * @buf_len: buffer length
3799 *
3800 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3801 * list to use for microcode download
3802 *
3803 * Return value:
3804 * pointer to sglist / NULL on failure
3805 **/
3806static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3807{
3808 int sg_size, order, bsize_elem, num_elem, i, j;
3809 struct ipr_sglist *sglist;
3810 struct scatterlist *scatterlist;
3811 struct page *page;
3812
3813 /* Get the minimum size per scatter/gather element */
3814 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3815
3816 /* Get the actual size per element */
3817 order = get_order(sg_size);
3818
3819 /* Determine the actual number of bytes per element */
3820 bsize_elem = PAGE_SIZE * (1 << order);
3821
3822 /* Determine the actual number of sg entries needed */
3823 if (buf_len % bsize_elem)
3824 num_elem = (buf_len / bsize_elem) + 1;
3825 else
3826 num_elem = buf_len / bsize_elem;
3827
3828 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003829 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830 (sizeof(struct scatterlist) * (num_elem - 1)),
3831 GFP_KERNEL);
3832
3833 if (sglist == NULL) {
3834 ipr_trace;
3835 return NULL;
3836 }
3837
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 scatterlist = sglist->scatterlist;
Jens Axboe45711f12007-10-22 21:19:53 +02003839 sg_init_table(scatterlist, num_elem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840
3841 sglist->order = order;
3842 sglist->num_sg = num_elem;
3843
3844 /* Allocate a bunch of sg elements */
3845 for (i = 0; i < num_elem; i++) {
3846 page = alloc_pages(GFP_KERNEL, order);
3847 if (!page) {
3848 ipr_trace;
3849
3850 /* Free up what we already allocated */
3851 for (j = i - 1; j >= 0; j--)
Jens Axboe45711f12007-10-22 21:19:53 +02003852 __free_pages(sg_page(&scatterlist[j]), order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003853 kfree(sglist);
3854 return NULL;
3855 }
3856
Jens Axboe642f1492007-10-24 11:20:47 +02003857 sg_set_page(&scatterlist[i], page, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 }
3859
3860 return sglist;
3861}
3862
3863/**
3864 * ipr_free_ucode_buffer - Frees a microcode download buffer
3865 * @p_dnld: scatter/gather list pointer
3866 *
3867 * Free a DMA'able ucode download buffer previously allocated with
3868 * ipr_alloc_ucode_buffer
3869 *
3870 * Return value:
3871 * nothing
3872 **/
3873static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3874{
3875 int i;
3876
3877 for (i = 0; i < sglist->num_sg; i++)
Jens Axboe45711f12007-10-22 21:19:53 +02003878 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879
3880 kfree(sglist);
3881}
3882
3883/**
3884 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3885 * @sglist: scatter/gather list pointer
3886 * @buffer: buffer pointer
3887 * @len: buffer length
3888 *
3889 * Copy a microcode image from a user buffer into a buffer allocated by
3890 * ipr_alloc_ucode_buffer
3891 *
3892 * Return value:
3893 * 0 on success / other on failure
3894 **/
3895static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3896 u8 *buffer, u32 len)
3897{
3898 int bsize_elem, i, result = 0;
3899 struct scatterlist *scatterlist;
3900 void *kaddr;
3901
3902 /* Determine the actual number of bytes per element */
3903 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3904
3905 scatterlist = sglist->scatterlist;
3906
3907 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003908 struct page *page = sg_page(&scatterlist[i]);
3909
3910 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003911 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003912 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003913
3914 scatterlist[i].length = bsize_elem;
3915
3916 if (result != 0) {
3917 ipr_trace;
3918 return result;
3919 }
3920 }
3921
3922 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003923 struct page *page = sg_page(&scatterlist[i]);
3924
3925 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003927 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928
3929 scatterlist[i].length = len % bsize_elem;
3930 }
3931
3932 sglist->buffer_len = len;
3933 return result;
3934}
3935
3936/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003937 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3938 * @ipr_cmd: ipr command struct
3939 * @sglist: scatter/gather list
3940 *
3941 * Builds a microcode download IOA data list (IOADL).
3942 *
3943 **/
3944static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3945 struct ipr_sglist *sglist)
3946{
3947 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3948 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3949 struct scatterlist *scatterlist = sglist->scatterlist;
3950 int i;
3951
3952 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3953 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3954 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3955
3956 ioarcb->ioadl_len =
3957 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3958 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3959 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3960 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3961 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3962 }
3963
3964 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3965}
3966
3967/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003968 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969 * @ipr_cmd: ipr command struct
3970 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003972 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003975static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3976 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003979 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980 struct scatterlist *scatterlist = sglist->scatterlist;
3981 int i;
3982
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003983 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08003985 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3986
3987 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3989
3990 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3991 ioadl[i].flags_and_data_len =
3992 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3993 ioadl[i].address =
3994 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3995 }
3996
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003997 ioadl[i-1].flags_and_data_len |=
3998 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3999}
4000
4001/**
4002 * ipr_update_ioa_ucode - Update IOA's microcode
4003 * @ioa_cfg: ioa config struct
4004 * @sglist: scatter/gather list
4005 *
4006 * Initiate an adapter reset to update the IOA's microcode
4007 *
4008 * Return value:
4009 * 0 on success / -EIO on failure
4010 **/
4011static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4012 struct ipr_sglist *sglist)
4013{
4014 unsigned long lock_flags;
4015
4016 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004017 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05004018 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4019 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4020 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4021 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004022
4023 if (ioa_cfg->ucode_sglist) {
4024 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4025 dev_err(&ioa_cfg->pdev->dev,
4026 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004027 return -EIO;
4028 }
4029
Anton Blanchardd73341b2014-10-30 17:27:08 -05004030 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4031 sglist->scatterlist, sglist->num_sg,
4032 DMA_TO_DEVICE);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004033
4034 if (!sglist->num_dma_sg) {
4035 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4036 dev_err(&ioa_cfg->pdev->dev,
4037 "Failed to map microcode download buffer!\n");
4038 return -EIO;
4039 }
4040
4041 ioa_cfg->ucode_sglist = sglist;
4042 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4043 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4044 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4045
4046 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4047 ioa_cfg->ucode_sglist = NULL;
4048 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 return 0;
4050}
4051
4052/**
4053 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01004054 * @class_dev: device struct
4055 * @buf: buffer
4056 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057 *
4058 * This function will update the firmware on the adapter.
4059 *
4060 * Return value:
4061 * count on success / other on failure
4062 **/
Tony Jonesee959b02008-02-22 00:13:36 +01004063static ssize_t ipr_store_update_fw(struct device *dev,
4064 struct device_attribute *attr,
4065 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066{
Tony Jonesee959b02008-02-22 00:13:36 +01004067 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4069 struct ipr_ucode_image_header *image_hdr;
4070 const struct firmware *fw_entry;
4071 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072 char fname[100];
4073 char *src;
Gabriel Krisman Bertazi21b81712016-02-25 13:54:20 -03004074 char *endline;
Insu Yund63c7dd2016-01-06 12:44:01 -05004075 int result, dnld_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076
4077 if (!capable(CAP_SYS_ADMIN))
4078 return -EACCES;
4079
Insu Yund63c7dd2016-01-06 12:44:01 -05004080 snprintf(fname, sizeof(fname), "%s", buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081
Gabriel Krisman Bertazi21b81712016-02-25 13:54:20 -03004082 endline = strchr(fname, '\n');
4083 if (endline)
4084 *endline = '\0';
4085
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004086 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4088 return -EIO;
4089 }
4090
4091 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4092
Linus Torvalds1da177e2005-04-16 15:20:36 -07004093 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4094 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4095 sglist = ipr_alloc_ucode_buffer(dnld_size);
4096
4097 if (!sglist) {
4098 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4099 release_firmware(fw_entry);
4100 return -ENOMEM;
4101 }
4102
4103 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4104
4105 if (result) {
4106 dev_err(&ioa_cfg->pdev->dev,
4107 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004108 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109 }
4110
Wayne Boyer14ed9cc2011-10-03 20:54:37 -07004111 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4112
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004113 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004115 if (!result)
4116 result = count;
4117out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118 ipr_free_ucode_buffer(sglist);
4119 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004120 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004121}
4122
Tony Jonesee959b02008-02-22 00:13:36 +01004123static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124 .attr = {
4125 .name = "update_fw",
4126 .mode = S_IWUSR,
4127 },
4128 .store = ipr_store_update_fw
4129};
4130
Wayne Boyer75576bb2010-07-14 10:50:14 -07004131/**
4132 * ipr_show_fw_type - Show the adapter's firmware type.
4133 * @dev: class device struct
4134 * @buf: buffer
4135 *
4136 * Return value:
4137 * number of bytes printed to buffer
4138 **/
4139static ssize_t ipr_show_fw_type(struct device *dev,
4140 struct device_attribute *attr, char *buf)
4141{
4142 struct Scsi_Host *shost = class_to_shost(dev);
4143 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4144 unsigned long lock_flags = 0;
4145 int len;
4146
4147 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4148 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4149 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4150 return len;
4151}
4152
4153static struct device_attribute ipr_ioa_fw_type_attr = {
4154 .attr = {
4155 .name = "fw_type",
4156 .mode = S_IRUGO,
4157 },
4158 .show = ipr_show_fw_type
4159};
4160
Brian Kingafc3f832016-08-24 12:56:51 -05004161static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4162 struct bin_attribute *bin_attr, char *buf,
4163 loff_t off, size_t count)
4164{
4165 struct device *cdev = container_of(kobj, struct device, kobj);
4166 struct Scsi_Host *shost = class_to_shost(cdev);
4167 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4168 struct ipr_hostrcb *hostrcb;
4169 unsigned long lock_flags = 0;
4170 int ret;
4171
4172 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4173 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4174 struct ipr_hostrcb, queue);
4175 if (!hostrcb) {
4176 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4177 return 0;
4178 }
4179 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4180 sizeof(hostrcb->hcam));
4181 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4182 return ret;
4183}
4184
4185static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4186 struct bin_attribute *bin_attr, char *buf,
4187 loff_t off, size_t count)
4188{
4189 struct device *cdev = container_of(kobj, struct device, kobj);
4190 struct Scsi_Host *shost = class_to_shost(cdev);
4191 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4192 struct ipr_hostrcb *hostrcb;
4193 unsigned long lock_flags = 0;
4194
4195 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4196 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4197 struct ipr_hostrcb, queue);
4198 if (!hostrcb) {
4199 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4200 return count;
4201 }
4202
4203 /* Reclaim hostrcb before exit */
4204 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4205 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4206 return count;
4207}
4208
4209static struct bin_attribute ipr_ioa_async_err_log = {
4210 .attr = {
4211 .name = "async_err_log",
4212 .mode = S_IRUGO | S_IWUSR,
4213 },
4214 .size = 0,
4215 .read = ipr_read_async_err_log,
4216 .write = ipr_next_async_err_log
4217};
4218
Tony Jonesee959b02008-02-22 00:13:36 +01004219static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220 &ipr_fw_version_attr,
4221 &ipr_log_level_attr,
4222 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06004223 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224 &ipr_ioa_reset_attr,
4225 &ipr_update_fw_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004226 &ipr_ioa_fw_type_attr,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06004227 &ipr_iopoll_weight_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 NULL,
4229};
4230
4231#ifdef CONFIG_SCSI_IPR_DUMP
4232/**
4233 * ipr_read_dump - Dump the adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004234 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004236 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 * @buf: buffer
4238 * @off: offset
4239 * @count: buffer size
4240 *
4241 * Return value:
4242 * number of bytes printed to buffer
4243 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004244static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004245 struct bin_attribute *bin_attr,
4246 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247{
Tony Jonesee959b02008-02-22 00:13:36 +01004248 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249 struct Scsi_Host *shost = class_to_shost(cdev);
4250 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4251 struct ipr_dump *dump;
4252 unsigned long lock_flags = 0;
4253 char *src;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004254 int len, sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255 size_t rc = count;
4256
4257 if (!capable(CAP_SYS_ADMIN))
4258 return -EACCES;
4259
4260 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4261 dump = ioa_cfg->dump;
4262
4263 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4264 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4265 return 0;
4266 }
4267 kref_get(&dump->kref);
4268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4269
4270 if (off > dump->driver_dump.hdr.len) {
4271 kref_put(&dump->kref, ipr_release_dump);
4272 return 0;
4273 }
4274
4275 if (off + count > dump->driver_dump.hdr.len) {
4276 count = dump->driver_dump.hdr.len - off;
4277 rc = count;
4278 }
4279
4280 if (count && off < sizeof(dump->driver_dump)) {
4281 if (off + count > sizeof(dump->driver_dump))
4282 len = sizeof(dump->driver_dump) - off;
4283 else
4284 len = count;
4285 src = (u8 *)&dump->driver_dump + off;
4286 memcpy(buf, src, len);
4287 buf += len;
4288 off += len;
4289 count -= len;
4290 }
4291
4292 off -= sizeof(dump->driver_dump);
4293
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004294 if (ioa_cfg->sis64)
4295 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4296 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4297 sizeof(struct ipr_sdt_entry));
4298 else
4299 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4300 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4301
4302 if (count && off < sdt_end) {
4303 if (off + count > sdt_end)
4304 len = sdt_end - off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305 else
4306 len = count;
4307 src = (u8 *)&dump->ioa_dump + off;
4308 memcpy(buf, src, len);
4309 buf += len;
4310 off += len;
4311 count -= len;
4312 }
4313
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004314 off -= sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315
4316 while (count) {
4317 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4318 len = PAGE_ALIGN(off) - off;
4319 else
4320 len = count;
4321 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4322 src += off & ~PAGE_MASK;
4323 memcpy(buf, src, len);
4324 buf += len;
4325 off += len;
4326 count -= len;
4327 }
4328
4329 kref_put(&dump->kref, ipr_release_dump);
4330 return rc;
4331}
4332
4333/**
4334 * ipr_alloc_dump - Prepare for adapter dump
4335 * @ioa_cfg: ioa config struct
4336 *
4337 * Return value:
4338 * 0 on success / other on failure
4339 **/
4340static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4341{
4342 struct ipr_dump *dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004343 __be32 **ioa_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004344 unsigned long lock_flags = 0;
4345
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06004346 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347
4348 if (!dump) {
4349 ipr_err("Dump memory allocation failed\n");
4350 return -ENOMEM;
4351 }
4352
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004353 if (ioa_cfg->sis64)
4354 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4355 else
4356 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4357
4358 if (!ioa_data) {
4359 ipr_err("Dump memory allocation failed\n");
4360 kfree(dump);
4361 return -ENOMEM;
4362 }
4363
4364 dump->ioa_dump.ioa_data = ioa_data;
4365
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366 kref_init(&dump->kref);
4367 dump->ioa_cfg = ioa_cfg;
4368
4369 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4370
4371 if (INACTIVE != ioa_cfg->sdt_state) {
4372 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004373 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374 kfree(dump);
4375 return 0;
4376 }
4377
4378 ioa_cfg->dump = dump;
4379 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004380 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381 ioa_cfg->dump_taken = 1;
4382 schedule_work(&ioa_cfg->work_q);
4383 }
4384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4385
Linus Torvalds1da177e2005-04-16 15:20:36 -07004386 return 0;
4387}
4388
4389/**
4390 * ipr_free_dump - Free adapter dump memory
4391 * @ioa_cfg: ioa config struct
4392 *
4393 * Return value:
4394 * 0 on success / other on failure
4395 **/
4396static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4397{
4398 struct ipr_dump *dump;
4399 unsigned long lock_flags = 0;
4400
4401 ENTER;
4402
4403 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4404 dump = ioa_cfg->dump;
4405 if (!dump) {
4406 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4407 return 0;
4408 }
4409
4410 ioa_cfg->dump = NULL;
4411 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4412
4413 kref_put(&dump->kref, ipr_release_dump);
4414
4415 LEAVE;
4416 return 0;
4417}
4418
4419/**
4420 * ipr_write_dump - Setup dump state of adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004421 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004423 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004424 * @buf: buffer
4425 * @off: offset
4426 * @count: buffer size
4427 *
4428 * Return value:
4429 * number of bytes printed to buffer
4430 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004431static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004432 struct bin_attribute *bin_attr,
4433 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434{
Tony Jonesee959b02008-02-22 00:13:36 +01004435 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436 struct Scsi_Host *shost = class_to_shost(cdev);
4437 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4438 int rc;
4439
4440 if (!capable(CAP_SYS_ADMIN))
4441 return -EACCES;
4442
4443 if (buf[0] == '1')
4444 rc = ipr_alloc_dump(ioa_cfg);
4445 else if (buf[0] == '0')
4446 rc = ipr_free_dump(ioa_cfg);
4447 else
4448 return -EINVAL;
4449
4450 if (rc)
4451 return rc;
4452 else
4453 return count;
4454}
4455
4456static struct bin_attribute ipr_dump_attr = {
4457 .attr = {
4458 .name = "dump",
4459 .mode = S_IRUSR | S_IWUSR,
4460 },
4461 .size = 0,
4462 .read = ipr_read_dump,
4463 .write = ipr_write_dump
4464};
4465#else
4466static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4467#endif
4468
4469/**
4470 * ipr_change_queue_depth - Change the device's queue depth
4471 * @sdev: scsi device struct
4472 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07004473 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07004474 *
4475 * Return value:
4476 * actual depth set
4477 **/
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004478static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479{
Brian King35a39692006-09-25 12:39:20 -05004480 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4481 struct ipr_resource_entry *res;
4482 unsigned long lock_flags = 0;
4483
4484 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4485 res = (struct ipr_resource_entry *)sdev->hostdata;
4486
4487 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4488 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4489 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4490
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004491 scsi_change_queue_depth(sdev, qdepth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492 return sdev->queue_depth;
4493}
4494
4495/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4497 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004498 * @attr: device attribute structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499 * @buf: buffer
4500 *
4501 * Return value:
4502 * number of bytes printed to buffer
4503 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04004504static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505{
4506 struct scsi_device *sdev = to_scsi_device(dev);
4507 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4508 struct ipr_resource_entry *res;
4509 unsigned long lock_flags = 0;
4510 ssize_t len = -ENXIO;
4511
4512 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4513 res = (struct ipr_resource_entry *)sdev->hostdata;
4514 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004515 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4517 return len;
4518}
4519
4520static struct device_attribute ipr_adapter_handle_attr = {
4521 .attr = {
4522 .name = "adapter_handle",
4523 .mode = S_IRUSR,
4524 },
4525 .show = ipr_show_adapter_handle
4526};
4527
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004528/**
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004529 * ipr_show_resource_path - Show the resource path or the resource address for
4530 * this device.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004531 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004532 * @attr: device attribute structure
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004533 * @buf: buffer
4534 *
4535 * Return value:
4536 * number of bytes printed to buffer
4537 **/
4538static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4539{
4540 struct scsi_device *sdev = to_scsi_device(dev);
4541 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4542 struct ipr_resource_entry *res;
4543 unsigned long lock_flags = 0;
4544 ssize_t len = -ENXIO;
4545 char buffer[IPR_MAX_RES_PATH_LENGTH];
4546
4547 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4548 res = (struct ipr_resource_entry *)sdev->hostdata;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004549 if (res && ioa_cfg->sis64)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004550 len = snprintf(buf, PAGE_SIZE, "%s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004551 __ipr_format_res_path(res->res_path, buffer,
4552 sizeof(buffer)));
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004553 else if (res)
4554 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4555 res->bus, res->target, res->lun);
4556
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004557 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4558 return len;
4559}
4560
4561static struct device_attribute ipr_resource_path_attr = {
4562 .attr = {
4563 .name = "resource_path",
Wayne Boyer75576bb2010-07-14 10:50:14 -07004564 .mode = S_IRUGO,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004565 },
4566 .show = ipr_show_resource_path
4567};
4568
Wayne Boyer75576bb2010-07-14 10:50:14 -07004569/**
Wayne Boyer46d74562010-08-11 07:15:17 -07004570 * ipr_show_device_id - Show the device_id for this device.
4571 * @dev: device struct
4572 * @attr: device attribute structure
4573 * @buf: buffer
4574 *
4575 * Return value:
4576 * number of bytes printed to buffer
4577 **/
4578static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4579{
4580 struct scsi_device *sdev = to_scsi_device(dev);
4581 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4582 struct ipr_resource_entry *res;
4583 unsigned long lock_flags = 0;
4584 ssize_t len = -ENXIO;
4585
4586 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4587 res = (struct ipr_resource_entry *)sdev->hostdata;
4588 if (res && ioa_cfg->sis64)
Wen Xiongbb8647e2015-06-11 20:45:18 -05004589 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
Wayne Boyer46d74562010-08-11 07:15:17 -07004590 else if (res)
4591 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4592
4593 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4594 return len;
4595}
4596
4597static struct device_attribute ipr_device_id_attr = {
4598 .attr = {
4599 .name = "device_id",
4600 .mode = S_IRUGO,
4601 },
4602 .show = ipr_show_device_id
4603};
4604
4605/**
Wayne Boyer75576bb2010-07-14 10:50:14 -07004606 * ipr_show_resource_type - Show the resource type for this device.
4607 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004608 * @attr: device attribute structure
Wayne Boyer75576bb2010-07-14 10:50:14 -07004609 * @buf: buffer
4610 *
4611 * Return value:
4612 * number of bytes printed to buffer
4613 **/
4614static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4615{
4616 struct scsi_device *sdev = to_scsi_device(dev);
4617 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4618 struct ipr_resource_entry *res;
4619 unsigned long lock_flags = 0;
4620 ssize_t len = -ENXIO;
4621
4622 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4623 res = (struct ipr_resource_entry *)sdev->hostdata;
4624
4625 if (res)
4626 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4627
4628 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4629 return len;
4630}
4631
4632static struct device_attribute ipr_resource_type_attr = {
4633 .attr = {
4634 .name = "resource_type",
4635 .mode = S_IRUGO,
4636 },
4637 .show = ipr_show_resource_type
4638};
4639
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004640/**
4641 * ipr_show_raw_mode - Show the adapter's raw mode
4642 * @dev: class device struct
4643 * @buf: buffer
4644 *
4645 * Return value:
4646 * number of bytes printed to buffer
4647 **/
4648static ssize_t ipr_show_raw_mode(struct device *dev,
4649 struct device_attribute *attr, char *buf)
4650{
4651 struct scsi_device *sdev = to_scsi_device(dev);
4652 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4653 struct ipr_resource_entry *res;
4654 unsigned long lock_flags = 0;
4655 ssize_t len;
4656
4657 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4658 res = (struct ipr_resource_entry *)sdev->hostdata;
4659 if (res)
4660 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4661 else
4662 len = -ENXIO;
4663 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4664 return len;
4665}
4666
4667/**
4668 * ipr_store_raw_mode - Change the adapter's raw mode
4669 * @dev: class device struct
4670 * @buf: buffer
4671 *
4672 * Return value:
4673 * number of bytes printed to buffer
4674 **/
4675static ssize_t ipr_store_raw_mode(struct device *dev,
4676 struct device_attribute *attr,
4677 const char *buf, size_t count)
4678{
4679 struct scsi_device *sdev = to_scsi_device(dev);
4680 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4681 struct ipr_resource_entry *res;
4682 unsigned long lock_flags = 0;
4683 ssize_t len;
4684
4685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4686 res = (struct ipr_resource_entry *)sdev->hostdata;
4687 if (res) {
Gabriel Krisman Bertazie35d7f272015-08-19 11:47:06 -03004688 if (ipr_is_af_dasd_device(res)) {
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004689 res->raw_mode = simple_strtoul(buf, NULL, 10);
4690 len = strlen(buf);
4691 if (res->sdev)
4692 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4693 res->raw_mode ? "enabled" : "disabled");
4694 } else
4695 len = -EINVAL;
4696 } else
4697 len = -ENXIO;
4698 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4699 return len;
4700}
4701
4702static struct device_attribute ipr_raw_mode_attr = {
4703 .attr = {
4704 .name = "raw_mode",
4705 .mode = S_IRUGO | S_IWUSR,
4706 },
4707 .show = ipr_show_raw_mode,
4708 .store = ipr_store_raw_mode
4709};
4710
Linus Torvalds1da177e2005-04-16 15:20:36 -07004711static struct device_attribute *ipr_dev_attrs[] = {
4712 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004713 &ipr_resource_path_attr,
Wayne Boyer46d74562010-08-11 07:15:17 -07004714 &ipr_device_id_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004715 &ipr_resource_type_attr,
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004716 &ipr_raw_mode_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717 NULL,
4718};
4719
4720/**
4721 * ipr_biosparam - Return the HSC mapping
4722 * @sdev: scsi device struct
4723 * @block_device: block device pointer
4724 * @capacity: capacity of the device
4725 * @parm: Array containing returned HSC values.
4726 *
4727 * This function generates the HSC parms that fdisk uses.
4728 * We want to make sure we return something that places partitions
4729 * on 4k boundaries for best performance with the IOA.
4730 *
4731 * Return value:
4732 * 0 on success
4733 **/
4734static int ipr_biosparam(struct scsi_device *sdev,
4735 struct block_device *block_device,
4736 sector_t capacity, int *parm)
4737{
4738 int heads, sectors;
4739 sector_t cylinders;
4740
4741 heads = 128;
4742 sectors = 32;
4743
4744 cylinders = capacity;
4745 sector_div(cylinders, (128 * 32));
4746
4747 /* return result */
4748 parm[0] = heads;
4749 parm[1] = sectors;
4750 parm[2] = cylinders;
4751
4752 return 0;
4753}
4754
4755/**
Brian King35a39692006-09-25 12:39:20 -05004756 * ipr_find_starget - Find target based on bus/target.
4757 * @starget: scsi target struct
4758 *
4759 * Return value:
4760 * resource entry pointer if found / NULL if not found
4761 **/
4762static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4763{
4764 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4765 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4766 struct ipr_resource_entry *res;
4767
4768 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004769 if ((res->bus == starget->channel) &&
Brian King0ee1d712012-03-14 21:20:06 -05004770 (res->target == starget->id)) {
Brian King35a39692006-09-25 12:39:20 -05004771 return res;
4772 }
4773 }
4774
4775 return NULL;
4776}
4777
4778static struct ata_port_info sata_port_info;
4779
4780/**
4781 * ipr_target_alloc - Prepare for commands to a SCSI target
4782 * @starget: scsi target struct
4783 *
4784 * If the device is a SATA device, this function allocates an
4785 * ATA port with libata, else it does nothing.
4786 *
4787 * Return value:
4788 * 0 on success / non-0 on failure
4789 **/
4790static int ipr_target_alloc(struct scsi_target *starget)
4791{
4792 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4793 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4794 struct ipr_sata_port *sata_port;
4795 struct ata_port *ap;
4796 struct ipr_resource_entry *res;
4797 unsigned long lock_flags;
4798
4799 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4800 res = ipr_find_starget(starget);
4801 starget->hostdata = NULL;
4802
4803 if (res && ipr_is_gata(res)) {
4804 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4805 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4806 if (!sata_port)
4807 return -ENOMEM;
4808
4809 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4810 if (ap) {
4811 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4812 sata_port->ioa_cfg = ioa_cfg;
4813 sata_port->ap = ap;
4814 sata_port->res = res;
4815
4816 res->sata_port = sata_port;
4817 ap->private_data = sata_port;
4818 starget->hostdata = sata_port;
4819 } else {
4820 kfree(sata_port);
4821 return -ENOMEM;
4822 }
4823 }
4824 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4825
4826 return 0;
4827}
4828
4829/**
4830 * ipr_target_destroy - Destroy a SCSI target
4831 * @starget: scsi target struct
4832 *
4833 * If the device was a SATA device, this function frees the libata
4834 * ATA port, else it does nothing.
4835 *
4836 **/
4837static void ipr_target_destroy(struct scsi_target *starget)
4838{
4839 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004840 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4841 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4842
4843 if (ioa_cfg->sis64) {
Brian King0ee1d712012-03-14 21:20:06 -05004844 if (!ipr_find_starget(starget)) {
4845 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4846 clear_bit(starget->id, ioa_cfg->array_ids);
4847 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4848 clear_bit(starget->id, ioa_cfg->vset_ids);
4849 else if (starget->channel == 0)
4850 clear_bit(starget->id, ioa_cfg->target_ids);
4851 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004852 }
Brian King35a39692006-09-25 12:39:20 -05004853
4854 if (sata_port) {
4855 starget->hostdata = NULL;
4856 ata_sas_port_destroy(sata_port->ap);
4857 kfree(sata_port);
4858 }
4859}
4860
4861/**
4862 * ipr_find_sdev - Find device based on bus/target/lun.
4863 * @sdev: scsi device struct
4864 *
4865 * Return value:
4866 * resource entry pointer if found / NULL if not found
4867 **/
4868static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4869{
4870 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4871 struct ipr_resource_entry *res;
4872
4873 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004874 if ((res->bus == sdev->channel) &&
4875 (res->target == sdev->id) &&
4876 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004877 return res;
4878 }
4879
4880 return NULL;
4881}
4882
4883/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884 * ipr_slave_destroy - Unconfigure a SCSI device
4885 * @sdev: scsi device struct
4886 *
4887 * Return value:
4888 * nothing
4889 **/
4890static void ipr_slave_destroy(struct scsi_device *sdev)
4891{
4892 struct ipr_resource_entry *res;
4893 struct ipr_ioa_cfg *ioa_cfg;
4894 unsigned long lock_flags = 0;
4895
4896 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4897
4898 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4899 res = (struct ipr_resource_entry *) sdev->hostdata;
4900 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004901 if (res->sata_port)
Tejun Heo3e4ec342010-05-10 21:41:30 +02004902 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004903 sdev->hostdata = NULL;
4904 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004905 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004906 }
4907 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4908}
4909
4910/**
4911 * ipr_slave_configure - Configure a SCSI device
4912 * @sdev: scsi device struct
4913 *
4914 * This function configures the specified scsi device.
4915 *
4916 * Return value:
4917 * 0 on success
4918 **/
4919static int ipr_slave_configure(struct scsi_device *sdev)
4920{
4921 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4922 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004923 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004925 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004926
4927 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4928 res = sdev->hostdata;
4929 if (res) {
4930 if (ipr_is_af_dasd_device(res))
4931 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004932 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004933 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004934 sdev->no_uld_attach = 1;
4935 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004936 if (ipr_is_vset_device(res)) {
Brian King60654e22014-12-02 12:47:46 -06004937 sdev->scsi_level = SCSI_SPC_3;
Jens Axboe242f9dc2008-09-14 05:55:09 -07004938 blk_queue_rq_timeout(sdev->request_queue,
4939 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004940 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004941 }
Brian Kingdd406ef2009-04-22 08:58:02 -05004942 if (ipr_is_gata(res) && res->sata_port)
4943 ap = res->sata_port->ap;
4944 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4945
4946 if (ap) {
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004947 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004948 ata_sas_slave_configure(sdev, ap);
Christoph Hellwigc8b09f62014-11-03 20:15:14 +01004949 }
4950
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004951 if (ioa_cfg->sis64)
4952 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004953 ipr_format_res_path(ioa_cfg,
4954 res->res_path, buffer, sizeof(buffer)));
Brian Kingdd406ef2009-04-22 08:58:02 -05004955 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956 }
4957 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4958 return 0;
4959}
4960
4961/**
Brian King35a39692006-09-25 12:39:20 -05004962 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4963 * @sdev: scsi device struct
4964 *
4965 * This function initializes an ATA port so that future commands
4966 * sent through queuecommand will work.
4967 *
4968 * Return value:
4969 * 0 on success
4970 **/
4971static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4972{
4973 struct ipr_sata_port *sata_port = NULL;
4974 int rc = -ENXIO;
4975
4976 ENTER;
4977 if (sdev->sdev_target)
4978 sata_port = sdev->sdev_target->hostdata;
Dan Williamsb2024452012-03-21 21:09:07 -07004979 if (sata_port) {
Brian King35a39692006-09-25 12:39:20 -05004980 rc = ata_sas_port_init(sata_port->ap);
Dan Williamsb2024452012-03-21 21:09:07 -07004981 if (rc == 0)
4982 rc = ata_sas_sync_probe(sata_port->ap);
4983 }
4984
Brian King35a39692006-09-25 12:39:20 -05004985 if (rc)
4986 ipr_slave_destroy(sdev);
4987
4988 LEAVE;
4989 return rc;
4990}
4991
4992/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993 * ipr_slave_alloc - Prepare for commands to a device.
4994 * @sdev: scsi device struct
4995 *
4996 * This function saves a pointer to the resource entry
4997 * in the scsi device struct if the device exists. We
4998 * can then use this pointer in ipr_queuecommand when
4999 * handling new commands.
5000 *
5001 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06005002 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 **/
5004static int ipr_slave_alloc(struct scsi_device *sdev)
5005{
5006 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5007 struct ipr_resource_entry *res;
5008 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06005009 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010
5011 sdev->hostdata = NULL;
5012
5013 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5014
Brian King35a39692006-09-25 12:39:20 -05005015 res = ipr_find_sdev(sdev);
5016 if (res) {
5017 res->sdev = sdev;
5018 res->add_to_ml = 0;
5019 res->in_erp = 0;
5020 sdev->hostdata = res;
5021 if (!ipr_is_naca_model(res))
5022 res->needs_sync_complete = 1;
5023 rc = 0;
5024 if (ipr_is_gata(res)) {
5025 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5026 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005027 }
5028 }
5029
5030 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5031
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06005032 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033}
5034
Brian King6cdb0812014-10-30 17:27:10 -05005035/**
5036 * ipr_match_lun - Match function for specified LUN
5037 * @ipr_cmd: ipr command struct
5038 * @device: device to match (sdev)
5039 *
5040 * Returns:
5041 * 1 if command matches sdev / 0 if command does not match sdev
5042 **/
5043static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5044{
5045 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5046 return 1;
5047 return 0;
5048}
5049
5050/**
Brian King439ae282017-03-15 16:58:39 -05005051 * ipr_cmnd_is_free - Check if a command is free or not
5052 * @ipr_cmd ipr command struct
5053 *
5054 * Returns:
5055 * true / false
5056 **/
5057static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5058{
5059 struct ipr_cmnd *loop_cmd;
5060
5061 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5062 if (loop_cmd == ipr_cmd)
5063 return true;
5064 }
5065
5066 return false;
5067}
5068
5069/**
Brian Kingef97d8a2017-03-15 16:58:41 -05005070 * ipr_match_res - Match function for specified resource entry
5071 * @ipr_cmd: ipr command struct
5072 * @resource: resource entry to match
5073 *
5074 * Returns:
5075 * 1 if command matches sdev / 0 if command does not match sdev
5076 **/
5077static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5078{
5079 struct ipr_resource_entry *res = resource;
5080
5081 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5082 return 1;
5083 return 0;
5084}
5085
5086/**
Brian King6cdb0812014-10-30 17:27:10 -05005087 * ipr_wait_for_ops - Wait for matching commands to complete
5088 * @ipr_cmd: ipr command struct
5089 * @device: device to match (sdev)
5090 * @match: match function to use
5091 *
5092 * Returns:
5093 * SUCCESS / FAILED
5094 **/
5095static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5096 int (*match)(struct ipr_cmnd *, void *))
5097{
5098 struct ipr_cmnd *ipr_cmd;
Brian King439ae282017-03-15 16:58:39 -05005099 int wait, i;
Brian King6cdb0812014-10-30 17:27:10 -05005100 unsigned long flags;
5101 struct ipr_hrr_queue *hrrq;
5102 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5103 DECLARE_COMPLETION_ONSTACK(comp);
5104
5105 ENTER;
5106 do {
5107 wait = 0;
5108
5109 for_each_hrrq(hrrq, ioa_cfg) {
5110 spin_lock_irqsave(hrrq->lock, flags);
Brian King439ae282017-03-15 16:58:39 -05005111 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5112 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5113 if (!ipr_cmnd_is_free(ipr_cmd)) {
5114 if (match(ipr_cmd, device)) {
5115 ipr_cmd->eh_comp = &comp;
5116 wait++;
5117 }
Brian King6cdb0812014-10-30 17:27:10 -05005118 }
5119 }
5120 spin_unlock_irqrestore(hrrq->lock, flags);
5121 }
5122
5123 if (wait) {
5124 timeout = wait_for_completion_timeout(&comp, timeout);
5125
5126 if (!timeout) {
5127 wait = 0;
5128
5129 for_each_hrrq(hrrq, ioa_cfg) {
5130 spin_lock_irqsave(hrrq->lock, flags);
Brian King439ae282017-03-15 16:58:39 -05005131 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5132 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5133 if (!ipr_cmnd_is_free(ipr_cmd)) {
5134 if (match(ipr_cmd, device)) {
5135 ipr_cmd->eh_comp = NULL;
5136 wait++;
5137 }
Brian King6cdb0812014-10-30 17:27:10 -05005138 }
5139 }
5140 spin_unlock_irqrestore(hrrq->lock, flags);
5141 }
5142
5143 if (wait)
5144 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5145 LEAVE;
5146 return wait ? FAILED : SUCCESS;
5147 }
5148 }
5149 } while (wait);
5150
5151 LEAVE;
5152 return SUCCESS;
5153}
5154
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005155static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005156{
5157 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005158 unsigned long lock_flags = 0;
5159 int rc = SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005160
5161 ENTER;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005162 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5163 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05005165 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005166 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005167 dev_err(&ioa_cfg->pdev->dev,
5168 "Adapter being reset as a result of error recovery.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005169
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005170 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5171 ioa_cfg->sdt_state = GET_DUMP;
5172 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005173
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005174 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5175 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5176 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005178 /* If we got hit with a host reset while we were already resetting
5179 the adapter for some reason, and the reset failed. */
5180 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5181 ipr_trace;
5182 rc = FAILED;
5183 }
5184
5185 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186 LEAVE;
5187 return rc;
5188}
5189
5190/**
Brian Kingc6513092006-03-29 09:37:43 -06005191 * ipr_device_reset - Reset the device
5192 * @ioa_cfg: ioa config struct
5193 * @res: resource entry struct
5194 *
5195 * This function issues a device reset to the affected device.
5196 * If the device is a SCSI device, a LUN reset will be sent
5197 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05005198 * will be sent. If the device is a SATA device, a PHY reset will
5199 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06005200 *
5201 * Return value:
5202 * 0 on success / non-zero on failure
5203 **/
5204static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5205 struct ipr_resource_entry *res)
5206{
5207 struct ipr_cmnd *ipr_cmd;
5208 struct ipr_ioarcb *ioarcb;
5209 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05005210 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06005211 u32 ioasc;
5212
5213 ENTER;
5214 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5215 ioarcb = &ipr_cmd->ioarcb;
5216 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08005217
5218 if (ipr_cmd->ioa_cfg->sis64) {
5219 regs = &ipr_cmd->i.ata_ioadl.regs;
5220 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5221 } else
5222 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06005223
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005224 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06005225 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5226 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05005227 if (ipr_is_gata(res)) {
5228 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08005229 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05005230 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5231 }
Brian Kingc6513092006-03-29 09:37:43 -06005232
5233 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005234 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005235 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005236 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5237 if (ipr_cmd->ioa_cfg->sis64)
5238 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5239 sizeof(struct ipr_ioasa_gata));
5240 else
5241 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5242 sizeof(struct ipr_ioasa_gata));
5243 }
Brian Kingc6513092006-03-29 09:37:43 -06005244
5245 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005246 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
Brian Kingc6513092006-03-29 09:37:43 -06005247}
5248
5249/**
Brian King35a39692006-09-25 12:39:20 -05005250 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09005251 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05005252 * @classes: class of the attached device
5253 *
Tejun Heocc0680a2007-08-06 18:36:23 +09005254 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05005255 *
5256 * Return value:
5257 * 0 on success / non-zero on failure
5258 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09005259static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07005260 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05005261{
Tejun Heocc0680a2007-08-06 18:36:23 +09005262 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05005263 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5264 struct ipr_resource_entry *res;
5265 unsigned long lock_flags = 0;
Brian Kingef97d8a2017-03-15 16:58:41 -05005266 int rc = -ENXIO, ret;
Brian King35a39692006-09-25 12:39:20 -05005267
5268 ENTER;
5269 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005270 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06005271 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5272 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5273 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5274 }
5275
Brian King35a39692006-09-25 12:39:20 -05005276 res = sata_port->res;
5277 if (res) {
5278 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005279 *classes = res->ata_class;
Brian Kingef97d8a2017-03-15 16:58:41 -05005280 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Brian King35a39692006-09-25 12:39:20 -05005281
Brian Kingef97d8a2017-03-15 16:58:41 -05005282 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5283 if (ret != SUCCESS) {
5284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5285 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5286 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5287
5288 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5289 }
5290 } else
5291 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5292
Brian King35a39692006-09-25 12:39:20 -05005293 LEAVE;
5294 return rc;
5295}
5296
5297/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005298 * ipr_eh_dev_reset - Reset the device
5299 * @scsi_cmd: scsi command struct
5300 *
5301 * This function issues a device reset to the affected device.
5302 * A LUN reset will be sent to the device first. If that does
5303 * not work, a target reset will be sent.
5304 *
5305 * Return value:
5306 * SUCCESS / FAILED
5307 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005308static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005309{
5310 struct ipr_cmnd *ipr_cmd;
5311 struct ipr_ioa_cfg *ioa_cfg;
5312 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05005313 struct ata_port *ap;
Brian King439ae282017-03-15 16:58:39 -05005314 int rc = 0, i;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005315 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005316
5317 ENTER;
5318 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5319 res = scsi_cmd->device->hostdata;
5320
Linus Torvalds1da177e2005-04-16 15:20:36 -07005321 /*
5322 * If we are currently going through reset/reload, return failed. This will force the
5323 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5324 * reset to complete
5325 */
5326 if (ioa_cfg->in_reset_reload)
5327 return FAILED;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005328 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005329 return FAILED;
5330
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005331 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005332 spin_lock(&hrrq->_lock);
Brian King439ae282017-03-15 16:58:39 -05005333 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5334 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5335
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005336 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
Brian King960e9642017-03-15 16:58:37 -05005337 if (!ipr_cmd->qc)
5338 continue;
Brian King439ae282017-03-15 16:58:39 -05005339 if (ipr_cmnd_is_free(ipr_cmd))
5340 continue;
Brian King960e9642017-03-15 16:58:37 -05005341
5342 ipr_cmd->done = ipr_sata_eh_done;
5343 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005344 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5345 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5346 }
Brian King7402ece2006-11-21 10:28:23 -06005347 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005348 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005349 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005350 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005351 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005352 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05005353
5354 if (ipr_is_gata(res) && res->sata_port) {
5355 ap = res->sata_port->ap;
5356 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09005357 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05005358 spin_lock_irq(scsi_cmd->device->host->host_lock);
5359 } else
5360 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005361 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06005362 res->reset_occurred = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005363
Linus Torvalds1da177e2005-04-16 15:20:36 -07005364 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005365 return rc ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005366}
5367
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005368static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005369{
5370 int rc;
Brian King6cdb0812014-10-30 17:27:10 -05005371 struct ipr_ioa_cfg *ioa_cfg;
Brian Kingef97d8a2017-03-15 16:58:41 -05005372 struct ipr_resource_entry *res;
Brian King6cdb0812014-10-30 17:27:10 -05005373
5374 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
Brian Kingef97d8a2017-03-15 16:58:41 -05005375 res = cmd->device->hostdata;
5376
5377 if (!res)
5378 return FAILED;
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005379
5380 spin_lock_irq(cmd->device->host->host_lock);
5381 rc = __ipr_eh_dev_reset(cmd);
5382 spin_unlock_irq(cmd->device->host->host_lock);
5383
Brian Kingef97d8a2017-03-15 16:58:41 -05005384 if (rc == SUCCESS) {
5385 if (ipr_is_gata(res) && res->sata_port)
5386 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5387 else
5388 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5389 }
Brian King6cdb0812014-10-30 17:27:10 -05005390
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005391 return rc;
5392}
5393
Linus Torvalds1da177e2005-04-16 15:20:36 -07005394/**
5395 * ipr_bus_reset_done - Op done function for bus reset.
5396 * @ipr_cmd: ipr command struct
5397 *
5398 * This function is the op done function for a bus reset
5399 *
5400 * Return value:
5401 * none
5402 **/
5403static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5404{
5405 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5406 struct ipr_resource_entry *res;
5407
5408 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005409 if (!ioa_cfg->sis64)
5410 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5411 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5412 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5413 break;
5414 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005415 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416
5417 /*
5418 * If abort has not completed, indicate the reset has, else call the
5419 * abort's done function to wake the sleeping eh thread
5420 */
5421 if (ipr_cmd->sibling->sibling)
5422 ipr_cmd->sibling->sibling = NULL;
5423 else
5424 ipr_cmd->sibling->done(ipr_cmd->sibling);
5425
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005426 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005427 LEAVE;
5428}
5429
5430/**
5431 * ipr_abort_timeout - An abort task has timed out
5432 * @ipr_cmd: ipr command struct
5433 *
5434 * This function handles when an abort task times out. If this
5435 * happens we issue a bus reset since we have resources tied
5436 * up that must be freed before returning to the midlayer.
5437 *
5438 * Return value:
5439 * none
5440 **/
5441static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5442{
5443 struct ipr_cmnd *reset_cmd;
5444 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5445 struct ipr_cmd_pkt *cmd_pkt;
5446 unsigned long lock_flags = 0;
5447
5448 ENTER;
5449 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5450 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5451 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5452 return;
5453 }
5454
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005455 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005456 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5457 ipr_cmd->sibling = reset_cmd;
5458 reset_cmd->sibling = ipr_cmd;
5459 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5460 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5461 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5462 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5463 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5464
5465 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5466 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5467 LEAVE;
5468}
5469
5470/**
5471 * ipr_cancel_op - Cancel specified op
5472 * @scsi_cmd: scsi command struct
5473 *
5474 * This function cancels specified op.
5475 *
5476 * Return value:
5477 * SUCCESS / FAILED
5478 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005479static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005480{
5481 struct ipr_cmnd *ipr_cmd;
5482 struct ipr_ioa_cfg *ioa_cfg;
5483 struct ipr_resource_entry *res;
5484 struct ipr_cmd_pkt *cmd_pkt;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005485 u32 ioasc, int_reg;
Brian King439ae282017-03-15 16:58:39 -05005486 int i, op_found = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005487 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005488
5489 ENTER;
5490 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5491 res = scsi_cmd->device->hostdata;
5492
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005493 /* If we are currently going through reset/reload, return failed.
5494 * This will force the mid-layer to call ipr_eh_host_reset,
5495 * which will then go to sleep and wait for the reset to complete
5496 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005497 if (ioa_cfg->in_reset_reload ||
5498 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005499 return FAILED;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005500 if (!res)
5501 return FAILED;
5502
5503 /*
5504 * If we are aborting a timed out op, chances are that the timeout was caused
5505 * by a still not detected EEH error. In such cases, reading a register will
5506 * trigger the EEH recovery infrastructure.
5507 */
5508 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5509
5510 if (!ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005511 return FAILED;
5512
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005513 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005514 spin_lock(&hrrq->_lock);
Brian King439ae282017-03-15 16:58:39 -05005515 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5516 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5517 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5518 op_found = 1;
5519 break;
5520 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005521 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005522 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005523 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005524 }
5525
5526 if (!op_found)
5527 return SUCCESS;
5528
5529 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005530 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005531 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5532 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5533 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5534 ipr_cmd->u.sdev = scsi_cmd->device;
5535
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005536 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5537 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005538 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005539 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005540
5541 /*
5542 * If the abort task timed out and we sent a bus reset, we will get
5543 * one the following responses to the abort
5544 */
5545 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5546 ioasc = 0;
5547 ipr_trace;
5548 }
5549
Kleber Sacilotto de Souzac4ee22a2013-03-14 13:52:23 -05005550 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005551 if (!ipr_is_naca_model(res))
5552 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005553
5554 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005555 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005556}
5557
5558/**
5559 * ipr_eh_abort - Abort a single op
5560 * @scsi_cmd: scsi command struct
5561 *
5562 * Return value:
Brian Kingf688f962014-12-02 12:47:37 -06005563 * 0 if scan in progress / 1 if scan is complete
5564 **/
5565static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5566{
5567 unsigned long lock_flags;
5568 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5569 int rc = 0;
5570
5571 spin_lock_irqsave(shost->host_lock, lock_flags);
5572 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5573 rc = 1;
5574 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5575 rc = 1;
5576 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5577 return rc;
5578}
5579
5580/**
5581 * ipr_eh_host_reset - Reset the host adapter
5582 * @scsi_cmd: scsi command struct
5583 *
5584 * Return value:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005585 * SUCCESS / FAILED
5586 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005587static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005588{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005589 unsigned long flags;
5590 int rc;
Brian King6cdb0812014-10-30 17:27:10 -05005591 struct ipr_ioa_cfg *ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005592
5593 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005594
Brian King6cdb0812014-10-30 17:27:10 -05005595 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5596
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005597 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5598 rc = ipr_cancel_op(scsi_cmd);
5599 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005600
Brian King6cdb0812014-10-30 17:27:10 -05005601 if (rc == SUCCESS)
5602 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005603 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005604 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005605}
5606
5607/**
5608 * ipr_handle_other_interrupt - Handle "other" interrupts
5609 * @ioa_cfg: ioa config struct
Wayne Boyer634651f2010-08-27 14:45:07 -07005610 * @int_reg: interrupt register
Linus Torvalds1da177e2005-04-16 15:20:36 -07005611 *
5612 * Return value:
5613 * IRQ_NONE / IRQ_HANDLED
5614 **/
Wayne Boyer634651f2010-08-27 14:45:07 -07005615static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer630ad8312011-04-07 12:12:30 -07005616 u32 int_reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005617{
5618 irqreturn_t rc = IRQ_HANDLED;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005619 u32 int_mask_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005620
Wayne Boyer7dacb642011-04-12 10:29:02 -07005621 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5622 int_reg &= ~int_mask_reg;
5623
5624 /* If an interrupt on the adapter did not occur, ignore it.
5625 * Or in the case of SIS 64, check for a stage change interrupt.
5626 */
5627 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5628 if (ioa_cfg->sis64) {
5629 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5630 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5631 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5632
5633 /* clear stage change */
5634 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5635 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5636 list_del(&ioa_cfg->reset_cmd->queue);
5637 del_timer(&ioa_cfg->reset_cmd->timer);
5638 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5639 return IRQ_HANDLED;
5640 }
5641 }
5642
5643 return IRQ_NONE;
5644 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005645
5646 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5647 /* Mask the interrupt */
5648 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005649 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5650
5651 list_del(&ioa_cfg->reset_cmd->queue);
5652 del_timer(&ioa_cfg->reset_cmd->timer);
5653 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005654 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
Brian King7dd21302012-03-14 21:20:08 -05005655 if (ioa_cfg->clear_isr) {
5656 if (ipr_debug && printk_ratelimit())
5657 dev_err(&ioa_cfg->pdev->dev,
5658 "Spurious interrupt detected. 0x%08X\n", int_reg);
5659 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5660 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5661 return IRQ_NONE;
5662 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005663 } else {
5664 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5665 ioa_cfg->ioa_unit_checked = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005666 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5667 dev_err(&ioa_cfg->pdev->dev,
5668 "No Host RRQ. 0x%08X\n", int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005669 else
5670 dev_err(&ioa_cfg->pdev->dev,
5671 "Permanent IOA failure. 0x%08X\n", int_reg);
5672
5673 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5674 ioa_cfg->sdt_state = GET_DUMP;
5675
5676 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5677 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5678 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005679
Linus Torvalds1da177e2005-04-16 15:20:36 -07005680 return rc;
5681}
5682
5683/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005684 * ipr_isr_eh - Interrupt service routine error handler
5685 * @ioa_cfg: ioa config struct
5686 * @msg: message to log
5687 *
5688 * Return value:
5689 * none
5690 **/
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005691static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005692{
5693 ioa_cfg->errors_logged++;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005694 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005695
5696 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5697 ioa_cfg->sdt_state = GET_DUMP;
5698
5699 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5700}
5701
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005702static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005703 struct list_head *doneq)
5704{
5705 u32 ioasc;
5706 u16 cmd_index;
5707 struct ipr_cmnd *ipr_cmd;
5708 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5709 int num_hrrq = 0;
5710
5711 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005712 if (!hrr_queue->allow_interrupts)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005713 return 0;
5714
5715 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5716 hrr_queue->toggle_bit) {
5717
5718 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5719 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5720 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5721
5722 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5723 cmd_index < hrr_queue->min_cmd_id)) {
5724 ipr_isr_eh(ioa_cfg,
5725 "Invalid response handle from IOA: ",
5726 cmd_index);
5727 break;
5728 }
5729
5730 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5731 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5732
5733 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5734
5735 list_move_tail(&ipr_cmd->queue, doneq);
5736
5737 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5738 hrr_queue->hrrq_curr++;
5739 } else {
5740 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5741 hrr_queue->toggle_bit ^= 1u;
5742 }
5743 num_hrrq++;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005744 if (budget > 0 && num_hrrq >= budget)
5745 break;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005746 }
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005747
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005748 return num_hrrq;
5749}
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005750
Christoph Hellwig511cbce2015-11-10 14:56:14 +01005751static int ipr_iopoll(struct irq_poll *iop, int budget)
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005752{
5753 struct ipr_ioa_cfg *ioa_cfg;
5754 struct ipr_hrr_queue *hrrq;
5755 struct ipr_cmnd *ipr_cmd, *temp;
5756 unsigned long hrrq_flags;
5757 int completed_ops;
5758 LIST_HEAD(doneq);
5759
5760 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5761 ioa_cfg = hrrq->ioa_cfg;
5762
5763 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5764 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5765
5766 if (completed_ops < budget)
Christoph Hellwig511cbce2015-11-10 14:56:14 +01005767 irq_poll_complete(iop);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005768 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5769
5770 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5771 list_del(&ipr_cmd->queue);
5772 del_timer(&ipr_cmd->timer);
5773 ipr_cmd->fast_done(ipr_cmd);
5774 }
5775
5776 return completed_ops;
5777}
5778
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005779/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005780 * ipr_isr - Interrupt service routine
5781 * @irq: irq number
5782 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005783 *
5784 * Return value:
5785 * IRQ_NONE / IRQ_HANDLED
5786 **/
David Howells7d12e782006-10-05 14:55:46 +01005787static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005788{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005789 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5790 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005791 unsigned long hrrq_flags = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005792 u32 int_reg = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005793 int num_hrrq = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005794 int irq_none = 0;
Brian King172cd6e2012-07-17 08:14:40 -05005795 struct ipr_cmnd *ipr_cmd, *temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005796 irqreturn_t rc = IRQ_NONE;
Brian King172cd6e2012-07-17 08:14:40 -05005797 LIST_HEAD(doneq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005798
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005799 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005800 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005801 if (!hrrq->allow_interrupts) {
5802 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005803 return IRQ_NONE;
5804 }
5805
Linus Torvalds1da177e2005-04-16 15:20:36 -07005806 while (1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005807 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5808 rc = IRQ_HANDLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005809
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005810 if (!ioa_cfg->clear_isr)
5811 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005812
Linus Torvalds1da177e2005-04-16 15:20:36 -07005813 /* Clear the PCI interrupt */
Wayne Boyera5442ba2011-05-17 09:18:53 -07005814 num_hrrq = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005815 do {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005816 writel(IPR_PCII_HRRQ_UPDATED,
5817 ioa_cfg->regs.clr_interrupt_reg32);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005818 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005819 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005820 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005821
Wayne Boyer7dacb642011-04-12 10:29:02 -07005822 } else if (rc == IRQ_NONE && irq_none == 0) {
5823 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5824 irq_none++;
Wayne Boyera5442ba2011-05-17 09:18:53 -07005825 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5826 int_reg & IPR_PCII_HRRQ_UPDATED) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005827 ipr_isr_eh(ioa_cfg,
5828 "Error clearing HRRQ: ", num_hrrq);
Brian King172cd6e2012-07-17 08:14:40 -05005829 rc = IRQ_HANDLED;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005830 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005831 } else
5832 break;
5833 }
5834
5835 if (unlikely(rc == IRQ_NONE))
Wayne Boyer634651f2010-08-27 14:45:07 -07005836 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005837
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005838 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05005839 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5840 list_del(&ipr_cmd->queue);
5841 del_timer(&ipr_cmd->timer);
5842 ipr_cmd->fast_done(ipr_cmd);
5843 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005844 return rc;
5845}
Brian King172cd6e2012-07-17 08:14:40 -05005846
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005847/**
5848 * ipr_isr_mhrrq - Interrupt service routine
5849 * @irq: irq number
5850 * @devp: pointer to ioa config struct
5851 *
5852 * Return value:
5853 * IRQ_NONE / IRQ_HANDLED
5854 **/
5855static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5856{
5857 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005858 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005859 unsigned long hrrq_flags = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005860 struct ipr_cmnd *ipr_cmd, *temp;
5861 irqreturn_t rc = IRQ_NONE;
5862 LIST_HEAD(doneq);
5863
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005864 spin_lock_irqsave(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005865
5866 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005867 if (!hrrq->allow_interrupts) {
5868 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005869 return IRQ_NONE;
5870 }
5871
Jens Axboe89f8b332014-03-13 09:38:42 -06005872 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005873 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5874 hrrq->toggle_bit) {
Christoph Hellwigea511902015-12-07 06:41:11 -08005875 irq_poll_sched(&hrrq->iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005876 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5877 return IRQ_HANDLED;
5878 }
5879 } else {
5880 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5881 hrrq->toggle_bit)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005882
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005883 if (ipr_process_hrrq(hrrq, -1, &doneq))
5884 rc = IRQ_HANDLED;
5885 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005886
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005887 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005888
5889 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5890 list_del(&ipr_cmd->queue);
5891 del_timer(&ipr_cmd->timer);
5892 ipr_cmd->fast_done(ipr_cmd);
5893 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005894 return rc;
5895}
5896
5897/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005898 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07005899 * @ioa_cfg: ioa config struct
5900 * @ipr_cmd: ipr command struct
5901 *
5902 * Return value:
5903 * 0 on success / -1 on failure
5904 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08005905static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5906 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005907{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005908 int i, nseg;
5909 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005910 u32 length;
5911 u32 ioadl_flags = 0;
5912 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5913 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005914 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005915
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005916 length = scsi_bufflen(scsi_cmd);
5917 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005918 return 0;
5919
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005920 nseg = scsi_dma_map(scsi_cmd);
5921 if (nseg < 0) {
Anton Blanchard51f52a42011-05-09 10:07:40 +10005922 if (printk_ratelimit())
Anton Blanchardd73341b2014-10-30 17:27:08 -05005923 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005924 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005925 }
5926
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005927 ipr_cmd->dma_use_sg = nseg;
5928
Wayne Boyer438b0332010-05-10 09:13:00 -07005929 ioarcb->data_transfer_length = cpu_to_be32(length);
Wayne Boyerb8803b12010-05-14 08:55:13 -07005930 ioarcb->ioadl_len =
5931 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
Wayne Boyer438b0332010-05-10 09:13:00 -07005932
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005933 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5934 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5935 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005936 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5937 ioadl_flags = IPR_IOADL_FLAGS_READ;
5938
5939 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5940 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5941 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5942 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5943 }
5944
5945 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5946 return 0;
5947}
5948
5949/**
5950 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5951 * @ioa_cfg: ioa config struct
5952 * @ipr_cmd: ipr command struct
5953 *
5954 * Return value:
5955 * 0 on success / -1 on failure
5956 **/
5957static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5958 struct ipr_cmnd *ipr_cmd)
5959{
5960 int i, nseg;
5961 struct scatterlist *sg;
5962 u32 length;
5963 u32 ioadl_flags = 0;
5964 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5965 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5966 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5967
5968 length = scsi_bufflen(scsi_cmd);
5969 if (!length)
5970 return 0;
5971
5972 nseg = scsi_dma_map(scsi_cmd);
5973 if (nseg < 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05005974 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
Wayne Boyera32c0552010-02-19 13:23:36 -08005975 return -1;
5976 }
5977
5978 ipr_cmd->dma_use_sg = nseg;
5979
5980 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5981 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5982 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5983 ioarcb->data_transfer_length = cpu_to_be32(length);
5984 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005985 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5986 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5987 ioadl_flags = IPR_IOADL_FLAGS_READ;
5988 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5989 ioarcb->read_ioadl_len =
5990 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5991 }
5992
Wayne Boyera32c0552010-02-19 13:23:36 -08005993 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5994 ioadl = ioarcb->u.add_data.u.ioadl;
5995 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5996 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005997 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5998 }
5999
6000 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6001 ioadl[i].flags_and_data_len =
6002 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6003 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6004 }
6005
6006 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6007 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006008}
6009
6010/**
Brian Kingf646f322017-03-15 16:58:39 -05006011 * __ipr_erp_done - Process completion of ERP for a device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006012 * @ipr_cmd: ipr command struct
6013 *
6014 * This function copies the sense buffer into the scsi_cmd
6015 * struct and pushes the scsi_done function.
6016 *
6017 * Return value:
6018 * nothing
6019 **/
Brian Kingf646f322017-03-15 16:58:39 -05006020static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006021{
6022 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6023 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006024 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006025
6026 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6027 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06006028 scmd_printk(KERN_ERR, scsi_cmd,
6029 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006030 } else {
6031 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6032 SCSI_SENSE_BUFFERSIZE);
6033 }
6034
6035 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006036 if (!ipr_is_naca_model(res))
6037 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006038 res->in_erp = 0;
6039 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006040 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006041 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006042 if (ipr_cmd->eh_comp)
6043 complete(ipr_cmd->eh_comp);
6044 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006045}
6046
6047/**
Brian Kingf646f322017-03-15 16:58:39 -05006048 * ipr_erp_done - Process completion of ERP for a device
6049 * @ipr_cmd: ipr command struct
6050 *
6051 * This function copies the sense buffer into the scsi_cmd
6052 * struct and pushes the scsi_done function.
6053 *
6054 * Return value:
6055 * nothing
6056 **/
6057static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6058{
6059 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6060 unsigned long hrrq_flags;
6061
6062 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6063 __ipr_erp_done(ipr_cmd);
6064 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006065}
6066
6067/**
6068 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6069 * @ipr_cmd: ipr command struct
6070 *
6071 * Return value:
6072 * none
6073 **/
6074static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6075{
Brian King51b1c7e2007-03-29 12:43:50 -05006076 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006077 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08006078 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006079
6080 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08006081 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006082 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08006083 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006084 ioarcb->read_ioadl_len = 0;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006085 ioasa->hdr.ioasc = 0;
6086 ioasa->hdr.residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08006087
6088 if (ipr_cmd->ioa_cfg->sis64)
6089 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6090 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6091 else {
6092 ioarcb->write_ioadl_addr =
6093 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6094 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6095 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006096}
6097
6098/**
Brian Kingf646f322017-03-15 16:58:39 -05006099 * __ipr_erp_request_sense - Send request sense to a device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006100 * @ipr_cmd: ipr command struct
6101 *
6102 * This function sends a request sense to a device as a result
6103 * of a check condition.
6104 *
6105 * Return value:
6106 * nothing
6107 **/
Brian Kingf646f322017-03-15 16:58:39 -05006108static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006109{
6110 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006111 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006112
6113 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
Brian Kingf646f322017-03-15 16:58:39 -05006114 __ipr_erp_done(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006115 return;
6116 }
6117
6118 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6119
6120 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6121 cmd_pkt->cdb[0] = REQUEST_SENSE;
6122 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6123 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6124 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6125 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6126
Wayne Boyera32c0552010-02-19 13:23:36 -08006127 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6128 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006129
6130 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6131 IPR_REQUEST_SENSE_TIMEOUT * 2);
6132}
6133
6134/**
Brian Kingf646f322017-03-15 16:58:39 -05006135 * ipr_erp_request_sense - Send request sense to a device
6136 * @ipr_cmd: ipr command struct
6137 *
6138 * This function sends a request sense to a device as a result
6139 * of a check condition.
6140 *
6141 * Return value:
6142 * nothing
6143 **/
6144static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6145{
6146 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6147 unsigned long hrrq_flags;
6148
6149 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6150 __ipr_erp_request_sense(ipr_cmd);
6151 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6152}
6153
6154/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006155 * ipr_erp_cancel_all - Send cancel all to a device
6156 * @ipr_cmd: ipr command struct
6157 *
6158 * This function sends a cancel all to a device to clear the
6159 * queue. If we are running TCQ on the device, QERR is set to 1,
6160 * which means all outstanding ops have been dropped on the floor.
6161 * Cancel all will return them to us.
6162 *
6163 * Return value:
6164 * nothing
6165 **/
6166static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6167{
6168 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6169 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6170 struct ipr_cmd_pkt *cmd_pkt;
6171
6172 res->in_erp = 1;
6173
6174 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6175
Christoph Hellwig17ea0122014-11-24 15:36:20 +01006176 if (!scsi_cmd->device->simple_tags) {
Brian Kingf646f322017-03-15 16:58:39 -05006177 __ipr_erp_request_sense(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006178 return;
6179 }
6180
6181 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6182 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6183 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6184
6185 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6186 IPR_CANCEL_ALL_TIMEOUT);
6187}
6188
6189/**
6190 * ipr_dump_ioasa - Dump contents of IOASA
6191 * @ioa_cfg: ioa config struct
6192 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06006193 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006194 *
6195 * This function is invoked by the interrupt handler when ops
6196 * fail. It will log the IOASA if appropriate. Only called
6197 * for GPDD ops.
6198 *
6199 * Return value:
6200 * none
6201 **/
6202static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06006203 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006204{
6205 int i;
6206 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05006207 u32 ioasc, fd_ioasc;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006208 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006209 __be32 *ioasa_data = (__be32 *)ioasa;
6210 int error_index;
6211
Wayne Boyer96d21f02010-05-10 09:13:27 -07006212 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6213 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006214
6215 if (0 == ioasc)
6216 return;
6217
6218 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6219 return;
6220
Brian Kingb0692dd2007-03-29 12:43:09 -05006221 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6222 error_index = ipr_get_error(fd_ioasc);
6223 else
6224 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006225
6226 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6227 /* Don't log an error if the IOA already logged one */
Wayne Boyer96d21f02010-05-10 09:13:27 -07006228 if (ioasa->hdr.ilid != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006229 return;
6230
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006231 if (!ipr_is_gscsi(res))
6232 return;
6233
Linus Torvalds1da177e2005-04-16 15:20:36 -07006234 if (ipr_error_table[error_index].log_ioasa == 0)
6235 return;
6236 }
6237
Brian Kingfe964d02006-03-29 09:37:29 -06006238 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006239
Wayne Boyer96d21f02010-05-10 09:13:27 -07006240 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6241 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6242 data_len = sizeof(struct ipr_ioasa64);
6243 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006244 data_len = sizeof(struct ipr_ioasa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006245
6246 ipr_err("IOASA Dump:\n");
6247
6248 for (i = 0; i < data_len / 4; i += 4) {
6249 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6250 be32_to_cpu(ioasa_data[i]),
6251 be32_to_cpu(ioasa_data[i+1]),
6252 be32_to_cpu(ioasa_data[i+2]),
6253 be32_to_cpu(ioasa_data[i+3]));
6254 }
6255}
6256
6257/**
6258 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6259 * @ioasa: IOASA
6260 * @sense_buf: sense data buffer
6261 *
6262 * Return value:
6263 * none
6264 **/
6265static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6266{
6267 u32 failing_lba;
6268 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6269 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006270 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6271 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006272
6273 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6274
6275 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6276 return;
6277
6278 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6279
6280 if (ipr_is_vset_device(res) &&
6281 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6282 ioasa->u.vset.failing_lba_hi != 0) {
6283 sense_buf[0] = 0x72;
6284 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6285 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6286 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6287
6288 sense_buf[7] = 12;
6289 sense_buf[8] = 0;
6290 sense_buf[9] = 0x0A;
6291 sense_buf[10] = 0x80;
6292
6293 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6294
6295 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6296 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6297 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6298 sense_buf[15] = failing_lba & 0x000000ff;
6299
6300 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6301
6302 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6303 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6304 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6305 sense_buf[19] = failing_lba & 0x000000ff;
6306 } else {
6307 sense_buf[0] = 0x70;
6308 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6309 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6310 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6311
6312 /* Illegal request */
6313 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
Wayne Boyer96d21f02010-05-10 09:13:27 -07006314 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006315 sense_buf[7] = 10; /* additional length */
6316
6317 /* IOARCB was in error */
6318 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6319 sense_buf[15] = 0xC0;
6320 else /* Parameter data was invalid */
6321 sense_buf[15] = 0x80;
6322
6323 sense_buf[16] =
6324 ((IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07006325 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006326 sense_buf[17] =
6327 (IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07006328 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006329 } else {
6330 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6331 if (ipr_is_vset_device(res))
6332 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6333 else
6334 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6335
6336 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6337 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6338 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6339 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6340 sense_buf[6] = failing_lba & 0x000000ff;
6341 }
6342
6343 sense_buf[7] = 6; /* additional length */
6344 }
6345 }
6346}
6347
6348/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006349 * ipr_get_autosense - Copy autosense data to sense buffer
6350 * @ipr_cmd: ipr command struct
6351 *
6352 * This function copies the autosense buffer to the buffer
6353 * in the scsi_cmd, if there is autosense available.
6354 *
6355 * Return value:
6356 * 1 if autosense was available / 0 if not
6357 **/
6358static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6359{
Wayne Boyer96d21f02010-05-10 09:13:27 -07006360 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6361 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006362
Wayne Boyer96d21f02010-05-10 09:13:27 -07006363 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006364 return 0;
6365
Wayne Boyer96d21f02010-05-10 09:13:27 -07006366 if (ipr_cmd->ioa_cfg->sis64)
6367 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6368 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6369 SCSI_SENSE_BUFFERSIZE));
6370 else
6371 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6372 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6373 SCSI_SENSE_BUFFERSIZE));
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006374 return 1;
6375}
6376
6377/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006378 * ipr_erp_start - Process an error response for a SCSI op
6379 * @ioa_cfg: ioa config struct
6380 * @ipr_cmd: ipr command struct
6381 *
6382 * This function determines whether or not to initiate ERP
6383 * on the affected device.
6384 *
6385 * Return value:
6386 * nothing
6387 **/
6388static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6389 struct ipr_cmnd *ipr_cmd)
6390{
6391 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6392 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006393 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King8a048992007-04-26 16:00:10 -05006394 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006395
6396 if (!res) {
Brian Kingf646f322017-03-15 16:58:39 -05006397 __ipr_scsi_eh_done(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006398 return;
6399 }
6400
Brian King8a048992007-04-26 16:00:10 -05006401 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006402 ipr_gen_sense(ipr_cmd);
6403
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006404 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6405
Brian King8a048992007-04-26 16:00:10 -05006406 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006407 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006408 if (ipr_is_naca_model(res))
6409 scsi_cmd->result |= (DID_ABORT << 16);
6410 else
6411 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006412 break;
6413 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006414 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006415 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6416 break;
6417 case IPR_IOASC_HW_SEL_TIMEOUT:
6418 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006419 if (!ipr_is_naca_model(res))
6420 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006421 break;
6422 case IPR_IOASC_SYNC_REQUIRED:
6423 if (!res->in_erp)
6424 res->needs_sync_complete = 1;
6425 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6426 break;
6427 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006428 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Mauricio Faria de Oliveira785a4702017-04-11 11:46:04 -03006429 /*
6430 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6431 * so SCSI mid-layer and upper layers handle it accordingly.
6432 */
6433 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6434 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006435 break;
6436 case IPR_IOASC_BUS_WAS_RESET:
6437 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6438 /*
6439 * Report the bus reset and ask for a retry. The device
6440 * will give CC/UA the next command.
6441 */
6442 if (!res->resetting_device)
6443 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6444 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006445 if (!ipr_is_naca_model(res))
6446 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006447 break;
6448 case IPR_IOASC_HW_DEV_BUS_STATUS:
6449 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6450 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006451 if (!ipr_get_autosense(ipr_cmd)) {
6452 if (!ipr_is_naca_model(res)) {
6453 ipr_erp_cancel_all(ipr_cmd);
6454 return;
6455 }
6456 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006457 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006458 if (!ipr_is_naca_model(res))
6459 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006460 break;
6461 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6462 break;
Wen Xiongf8ee25d2015-03-26 11:23:58 -05006463 case IPR_IOASC_IR_NON_OPTIMIZED:
6464 if (res->raw_mode) {
6465 res->raw_mode = 0;
6466 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6467 } else
6468 scsi_cmd->result |= (DID_ERROR << 16);
6469 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006470 default:
Brian King5b7304f2006-08-02 14:57:51 -05006471 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6472 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006473 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006474 res->needs_sync_complete = 1;
6475 break;
6476 }
6477
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006478 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006479 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006480 if (ipr_cmd->eh_comp)
6481 complete(ipr_cmd->eh_comp);
6482 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006483}
6484
6485/**
6486 * ipr_scsi_done - mid-layer done function
6487 * @ipr_cmd: ipr command struct
6488 *
6489 * This function is invoked by the interrupt handler for
6490 * ops generated by the SCSI mid-layer
6491 *
6492 * Return value:
6493 * none
6494 **/
6495static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6496{
6497 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6498 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006499 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King36b8e182015-07-14 11:41:29 -05006500 unsigned long lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006501
Wayne Boyer96d21f02010-05-10 09:13:27 -07006502 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006503
6504 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
Brian King172cd6e2012-07-17 08:14:40 -05006505 scsi_dma_unmap(scsi_cmd);
6506
Brian King36b8e182015-07-14 11:41:29 -05006507 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006508 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006509 if (ipr_cmd->eh_comp)
6510 complete(ipr_cmd->eh_comp);
6511 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King36b8e182015-07-14 11:41:29 -05006512 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006513 } else {
Brian King36b8e182015-07-14 11:41:29 -05006514 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6515 spin_lock(&ipr_cmd->hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006516 ipr_erp_start(ioa_cfg, ipr_cmd);
Brian King36b8e182015-07-14 11:41:29 -05006517 spin_unlock(&ipr_cmd->hrrq->_lock);
6518 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006519 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006520}
6521
6522/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006523 * ipr_queuecommand - Queue a mid-layer request
Brian King00bfef22012-07-17 08:13:52 -05006524 * @shost: scsi host struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006525 * @scsi_cmd: scsi command struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006526 *
6527 * This function queues a request generated by the mid-layer.
6528 *
6529 * Return value:
6530 * 0 on success
6531 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6532 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6533 **/
Brian King00bfef22012-07-17 08:13:52 -05006534static int ipr_queuecommand(struct Scsi_Host *shost,
6535 struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006536{
6537 struct ipr_ioa_cfg *ioa_cfg;
6538 struct ipr_resource_entry *res;
6539 struct ipr_ioarcb *ioarcb;
6540 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006541 unsigned long hrrq_flags, lock_flags;
Dan Carpenterd12f1572012-07-30 11:18:22 +03006542 int rc;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006543 struct ipr_hrr_queue *hrrq;
6544 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006545
Brian King00bfef22012-07-17 08:13:52 -05006546 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6547
Linus Torvalds1da177e2005-04-16 15:20:36 -07006548 scsi_cmd->result = (DID_OK << 16);
Brian King00bfef22012-07-17 08:13:52 -05006549 res = scsi_cmd->device->hostdata;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006550
6551 if (ipr_is_gata(res) && res->sata_port) {
6552 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6553 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6554 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6555 return rc;
6556 }
6557
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006558 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6559 hrrq = &ioa_cfg->hrrq[hrrq_id];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006560
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006561 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006562 /*
6563 * We are currently blocking all devices due to a host reset
6564 * We have told the host to stop giving us new requests, but
6565 * ERP ops don't count. FIXME
6566 */
Brian Kingbfae7822013-01-30 23:45:08 -06006567 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006568 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006569 return SCSI_MLQUEUE_HOST_BUSY;
Brian King00bfef22012-07-17 08:13:52 -05006570 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006571
6572 /*
6573 * FIXME - Create scsi_set_host_offline interface
6574 * and the ioa_is_dead check can be removed
6575 */
Brian Kingbfae7822013-01-30 23:45:08 -06006576 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006577 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006578 goto err_nodev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006579 }
6580
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006581 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6582 if (ipr_cmd == NULL) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006583 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006584 return SCSI_MLQUEUE_HOST_BUSY;
6585 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006586 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006587
Brian King172cd6e2012-07-17 08:14:40 -05006588 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006589 ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006590
6591 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6592 ipr_cmd->scsi_cmd = scsi_cmd;
Brian King172cd6e2012-07-17 08:14:40 -05006593 ipr_cmd->done = ipr_scsi_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006594
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006595 if (ipr_is_gscsi(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006596 if (scsi_cmd->underflow == 0)
6597 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6598
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006599 if (res->reset_occurred) {
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006600 res->reset_occurred = 0;
Wayne Boyerab6c10b2011-03-31 09:56:10 -07006601 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006602 }
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006603 }
6604
6605 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6606 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6607
Linus Torvalds1da177e2005-04-16 15:20:36 -07006608 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
Christoph Hellwig50668632014-10-30 14:30:06 +01006609 if (scsi_cmd->flags & SCMD_TAGGED)
6610 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6611 else
6612 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006613 }
6614
6615 if (scsi_cmd->cmnd[0] >= 0xC0 &&
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006616 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006617 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006618 }
Gabriel Krisman Bertazi3cb4fc12015-08-19 11:47:05 -03006619 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
Wen Xiongf8ee25d2015-03-26 11:23:58 -05006620 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006621
Gabriel Krisman Bertazi3cb4fc12015-08-19 11:47:05 -03006622 if (scsi_cmd->underflow == 0)
6623 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6624 }
6625
Dan Carpenterd12f1572012-07-30 11:18:22 +03006626 if (ioa_cfg->sis64)
6627 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6628 else
6629 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006630
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006631 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6632 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006633 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006634 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006635 if (!rc)
6636 scsi_dma_unmap(scsi_cmd);
Brian Kinga5fb4072012-03-14 21:20:09 -05006637 return SCSI_MLQUEUE_HOST_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006638 }
6639
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006640 if (unlikely(hrrq->ioa_is_dead)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006641 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006642 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006643 scsi_dma_unmap(scsi_cmd);
6644 goto err_nodev;
6645 }
6646
6647 ioarcb->res_handle = res->res_handle;
6648 if (res->needs_sync_complete) {
6649 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6650 res->needs_sync_complete = 0;
6651 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006652 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
Brian King00bfef22012-07-17 08:13:52 -05006653 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian Kinga5fb4072012-03-14 21:20:09 -05006654 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006655 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006656 return 0;
6657
6658err_nodev:
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006659 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006660 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6661 scsi_cmd->result = (DID_NO_CONNECT << 16);
6662 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006663 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006664 return 0;
6665}
6666
6667/**
Brian King35a39692006-09-25 12:39:20 -05006668 * ipr_ioctl - IOCTL handler
6669 * @sdev: scsi device struct
6670 * @cmd: IOCTL cmd
6671 * @arg: IOCTL arg
6672 *
6673 * Return value:
6674 * 0 on success / other on failure
6675 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06006676static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05006677{
6678 struct ipr_resource_entry *res;
6679
6680 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05006681 if (res && ipr_is_gata(res)) {
6682 if (cmd == HDIO_GET_IDENTITY)
6683 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05006684 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05006685 }
Brian King35a39692006-09-25 12:39:20 -05006686
6687 return -EINVAL;
6688}
6689
6690/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006691 * ipr_info - Get information about the card/driver
6692 * @scsi_host: scsi host struct
6693 *
6694 * Return value:
6695 * pointer to buffer with description string
6696 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006697static const char *ipr_ioa_info(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006698{
6699 static char buffer[512];
6700 struct ipr_ioa_cfg *ioa_cfg;
6701 unsigned long lock_flags = 0;
6702
6703 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6704
6705 spin_lock_irqsave(host->host_lock, lock_flags);
6706 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6707 spin_unlock_irqrestore(host->host_lock, lock_flags);
6708
6709 return buffer;
6710}
6711
6712static struct scsi_host_template driver_template = {
6713 .module = THIS_MODULE,
6714 .name = "IPR",
6715 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05006716 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006717 .queuecommand = ipr_queuecommand,
6718 .eh_abort_handler = ipr_eh_abort,
6719 .eh_device_reset_handler = ipr_eh_dev_reset,
6720 .eh_host_reset_handler = ipr_eh_host_reset,
6721 .slave_alloc = ipr_slave_alloc,
6722 .slave_configure = ipr_slave_configure,
6723 .slave_destroy = ipr_slave_destroy,
Brian Kingf688f962014-12-02 12:47:37 -06006724 .scan_finished = ipr_scan_finished,
Brian King35a39692006-09-25 12:39:20 -05006725 .target_alloc = ipr_target_alloc,
6726 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006727 .change_queue_depth = ipr_change_queue_depth,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006728 .bios_param = ipr_biosparam,
6729 .can_queue = IPR_MAX_COMMANDS,
6730 .this_id = -1,
6731 .sg_tablesize = IPR_MAX_SGLIST,
6732 .max_sectors = IPR_IOA_MAX_SECTORS,
6733 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6734 .use_clustering = ENABLE_CLUSTERING,
6735 .shost_attrs = ipr_ioa_attrs,
6736 .sdev_attrs = ipr_dev_attrs,
Martin K. Petersen54b2b502013-10-23 06:25:40 -04006737 .proc_name = IPR_NAME,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006738};
6739
Brian King35a39692006-09-25 12:39:20 -05006740/**
6741 * ipr_ata_phy_reset - libata phy_reset handler
6742 * @ap: ata port to reset
6743 *
6744 **/
6745static void ipr_ata_phy_reset(struct ata_port *ap)
6746{
6747 unsigned long flags;
6748 struct ipr_sata_port *sata_port = ap->private_data;
6749 struct ipr_resource_entry *res = sata_port->res;
6750 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6751 int rc;
6752
6753 ENTER;
6754 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006755 while (ioa_cfg->in_reset_reload) {
Brian King35a39692006-09-25 12:39:20 -05006756 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6757 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6758 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6759 }
6760
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006761 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Brian King35a39692006-09-25 12:39:20 -05006762 goto out_unlock;
6763
6764 rc = ipr_device_reset(ioa_cfg, res);
6765
6766 if (rc) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02006767 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006768 goto out_unlock;
6769 }
6770
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006771 ap->link.device[0].class = res->ata_class;
6772 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Tejun Heo3e4ec342010-05-10 21:41:30 +02006773 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006774
6775out_unlock:
6776 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6777 LEAVE;
6778}
6779
6780/**
6781 * ipr_ata_post_internal - Cleanup after an internal command
6782 * @qc: ATA queued command
6783 *
6784 * Return value:
6785 * none
6786 **/
6787static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6788{
6789 struct ipr_sata_port *sata_port = qc->ap->private_data;
6790 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6791 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006792 struct ipr_hrr_queue *hrrq;
Brian King35a39692006-09-25 12:39:20 -05006793 unsigned long flags;
6794
6795 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006796 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06006797 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6798 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6799 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6800 }
6801
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006802 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006803 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006804 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6805 if (ipr_cmd->qc == qc) {
6806 ipr_device_reset(ioa_cfg, sata_port->res);
6807 break;
6808 }
Brian King35a39692006-09-25 12:39:20 -05006809 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006810 spin_unlock(&hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006811 }
6812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6813}
6814
6815/**
Brian King35a39692006-09-25 12:39:20 -05006816 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6817 * @regs: destination
6818 * @tf: source ATA taskfile
6819 *
6820 * Return value:
6821 * none
6822 **/
6823static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6824 struct ata_taskfile *tf)
6825{
6826 regs->feature = tf->feature;
6827 regs->nsect = tf->nsect;
6828 regs->lbal = tf->lbal;
6829 regs->lbam = tf->lbam;
6830 regs->lbah = tf->lbah;
6831 regs->device = tf->device;
6832 regs->command = tf->command;
6833 regs->hob_feature = tf->hob_feature;
6834 regs->hob_nsect = tf->hob_nsect;
6835 regs->hob_lbal = tf->hob_lbal;
6836 regs->hob_lbam = tf->hob_lbam;
6837 regs->hob_lbah = tf->hob_lbah;
6838 regs->ctl = tf->ctl;
6839}
6840
6841/**
6842 * ipr_sata_done - done function for SATA commands
6843 * @ipr_cmd: ipr command struct
6844 *
6845 * This function is invoked by the interrupt handler for
6846 * ops generated by the SCSI mid-layer to SATA devices
6847 *
6848 * Return value:
6849 * none
6850 **/
6851static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6852{
6853 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6854 struct ata_queued_cmd *qc = ipr_cmd->qc;
6855 struct ipr_sata_port *sata_port = qc->ap->private_data;
6856 struct ipr_resource_entry *res = sata_port->res;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006857 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King35a39692006-09-25 12:39:20 -05006858
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006859 spin_lock(&ipr_cmd->hrrq->_lock);
Wayne Boyer96d21f02010-05-10 09:13:27 -07006860 if (ipr_cmd->ioa_cfg->sis64)
6861 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6862 sizeof(struct ipr_ioasa_gata));
6863 else
6864 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6865 sizeof(struct ipr_ioasa_gata));
Brian King35a39692006-09-25 12:39:20 -05006866 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6867
Wayne Boyer96d21f02010-05-10 09:13:27 -07006868 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006869 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05006870
6871 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
Wayne Boyer96d21f02010-05-10 09:13:27 -07006872 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
Brian King35a39692006-09-25 12:39:20 -05006873 else
Wayne Boyer96d21f02010-05-10 09:13:27 -07006874 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006875 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006876 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006877 ata_qc_complete(qc);
6878}
6879
6880/**
Wayne Boyera32c0552010-02-19 13:23:36 -08006881 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6882 * @ipr_cmd: ipr command struct
6883 * @qc: ATA queued command
6884 *
6885 **/
6886static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6887 struct ata_queued_cmd *qc)
6888{
6889 u32 ioadl_flags = 0;
6890 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006891 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
Wayne Boyera32c0552010-02-19 13:23:36 -08006892 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6893 int len = qc->nbytes;
6894 struct scatterlist *sg;
6895 unsigned int si;
6896 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6897
6898 if (len == 0)
6899 return;
6900
6901 if (qc->dma_dir == DMA_TO_DEVICE) {
6902 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6903 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6904 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6905 ioadl_flags = IPR_IOADL_FLAGS_READ;
6906
6907 ioarcb->data_transfer_length = cpu_to_be32(len);
6908 ioarcb->ioadl_len =
6909 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6910 ioarcb->u.sis64_addr_data.data_ioadl_addr =
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006911 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
Wayne Boyera32c0552010-02-19 13:23:36 -08006912
6913 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6914 ioadl64->flags = cpu_to_be32(ioadl_flags);
6915 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6916 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6917
6918 last_ioadl64 = ioadl64;
6919 ioadl64++;
6920 }
6921
6922 if (likely(last_ioadl64))
6923 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6924}
6925
6926/**
Brian King35a39692006-09-25 12:39:20 -05006927 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6928 * @ipr_cmd: ipr command struct
6929 * @qc: ATA queued command
6930 *
6931 **/
6932static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6933 struct ata_queued_cmd *qc)
6934{
6935 u32 ioadl_flags = 0;
6936 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08006937 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006938 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01006939 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05006940 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09006941 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05006942
6943 if (len == 0)
6944 return;
6945
6946 if (qc->dma_dir == DMA_TO_DEVICE) {
6947 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6948 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08006949 ioarcb->data_transfer_length = cpu_to_be32(len);
6950 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05006951 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6952 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6953 ioadl_flags = IPR_IOADL_FLAGS_READ;
6954 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6955 ioarcb->read_ioadl_len =
6956 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6957 }
6958
Tejun Heoff2aeb12007-12-05 16:43:11 +09006959 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05006960 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6961 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006962
6963 last_ioadl = ioadl;
6964 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05006965 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006966
6967 if (likely(last_ioadl))
6968 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05006969}
6970
6971/**
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006972 * ipr_qc_defer - Get a free ipr_cmd
6973 * @qc: queued command
6974 *
6975 * Return value:
6976 * 0 if success
6977 **/
6978static int ipr_qc_defer(struct ata_queued_cmd *qc)
6979{
6980 struct ata_port *ap = qc->ap;
6981 struct ipr_sata_port *sata_port = ap->private_data;
6982 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6983 struct ipr_cmnd *ipr_cmd;
6984 struct ipr_hrr_queue *hrrq;
6985 int hrrq_id;
6986
6987 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6988 hrrq = &ioa_cfg->hrrq[hrrq_id];
6989
6990 qc->lldd_task = NULL;
6991 spin_lock(&hrrq->_lock);
6992 if (unlikely(hrrq->ioa_is_dead)) {
6993 spin_unlock(&hrrq->_lock);
6994 return 0;
6995 }
6996
6997 if (unlikely(!hrrq->allow_cmds)) {
6998 spin_unlock(&hrrq->_lock);
6999 return ATA_DEFER_LINK;
7000 }
7001
7002 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7003 if (ipr_cmd == NULL) {
7004 spin_unlock(&hrrq->_lock);
7005 return ATA_DEFER_LINK;
7006 }
7007
7008 qc->lldd_task = ipr_cmd;
7009 spin_unlock(&hrrq->_lock);
7010 return 0;
7011}
7012
7013/**
Brian King35a39692006-09-25 12:39:20 -05007014 * ipr_qc_issue - Issue a SATA qc to a device
7015 * @qc: queued command
7016 *
7017 * Return value:
7018 * 0 if success
7019 **/
7020static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7021{
7022 struct ata_port *ap = qc->ap;
7023 struct ipr_sata_port *sata_port = ap->private_data;
7024 struct ipr_resource_entry *res = sata_port->res;
7025 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7026 struct ipr_cmnd *ipr_cmd;
7027 struct ipr_ioarcb *ioarcb;
7028 struct ipr_ioarcb_ata_regs *regs;
7029
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007030 if (qc->lldd_task == NULL)
7031 ipr_qc_defer(qc);
7032
7033 ipr_cmd = qc->lldd_task;
7034 if (ipr_cmd == NULL)
Brian King0feeed82007-03-29 12:43:43 -05007035 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05007036
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007037 qc->lldd_task = NULL;
7038 spin_lock(&ipr_cmd->hrrq->_lock);
7039 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7040 ipr_cmd->hrrq->ioa_is_dead)) {
7041 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7042 spin_unlock(&ipr_cmd->hrrq->_lock);
7043 return AC_ERR_SYSTEM;
7044 }
7045
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007046 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King35a39692006-09-25 12:39:20 -05007047 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05007048
Wayne Boyera32c0552010-02-19 13:23:36 -08007049 if (ioa_cfg->sis64) {
7050 regs = &ipr_cmd->i.ata_ioadl.regs;
7051 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7052 } else
7053 regs = &ioarcb->u.add_data.u.regs;
7054
7055 memset(regs, 0, sizeof(*regs));
7056 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05007057
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007058 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Brian King35a39692006-09-25 12:39:20 -05007059 ipr_cmd->qc = qc;
7060 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007061 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05007062 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7063 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7064 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01007065 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05007066
Wayne Boyera32c0552010-02-19 13:23:36 -08007067 if (ioa_cfg->sis64)
7068 ipr_build_ata_ioadl64(ipr_cmd, qc);
7069 else
7070 ipr_build_ata_ioadl(ipr_cmd, qc);
7071
Brian King35a39692006-09-25 12:39:20 -05007072 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7073 ipr_copy_sata_tf(regs, &qc->tf);
7074 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007075 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05007076
7077 switch (qc->tf.protocol) {
7078 case ATA_PROT_NODATA:
7079 case ATA_PROT_PIO:
7080 break;
7081
7082 case ATA_PROT_DMA:
7083 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7084 break;
7085
Tejun Heo0dc36882007-12-18 16:34:43 -05007086 case ATAPI_PROT_PIO:
7087 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05007088 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7089 break;
7090
Tejun Heo0dc36882007-12-18 16:34:43 -05007091 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05007092 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7093 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7094 break;
7095
7096 default:
7097 WARN_ON(1);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007098 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King0feeed82007-03-29 12:43:43 -05007099 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05007100 }
7101
Wayne Boyera32c0552010-02-19 13:23:36 -08007102 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007103 spin_unlock(&ipr_cmd->hrrq->_lock);
Wayne Boyera32c0552010-02-19 13:23:36 -08007104
Brian King35a39692006-09-25 12:39:20 -05007105 return 0;
7106}
7107
7108/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007109 * ipr_qc_fill_rtf - Read result TF
7110 * @qc: ATA queued command
7111 *
7112 * Return value:
7113 * true
7114 **/
7115static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7116{
7117 struct ipr_sata_port *sata_port = qc->ap->private_data;
7118 struct ipr_ioasa_gata *g = &sata_port->ioasa;
7119 struct ata_taskfile *tf = &qc->result_tf;
7120
7121 tf->feature = g->error;
7122 tf->nsect = g->nsect;
7123 tf->lbal = g->lbal;
7124 tf->lbam = g->lbam;
7125 tf->lbah = g->lbah;
7126 tf->device = g->device;
7127 tf->command = g->status;
7128 tf->hob_nsect = g->hob_nsect;
7129 tf->hob_lbal = g->hob_lbal;
7130 tf->hob_lbam = g->hob_lbam;
7131 tf->hob_lbah = g->hob_lbah;
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007132
7133 return true;
7134}
7135
Brian King35a39692006-09-25 12:39:20 -05007136static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05007137 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09007138 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05007139 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05007140 .qc_prep = ata_noop_qc_prep,
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007141 .qc_defer = ipr_qc_defer,
Brian King35a39692006-09-25 12:39:20 -05007142 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007143 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05007144 .port_start = ata_sas_port_start,
7145 .port_stop = ata_sas_port_stop
7146};
7147
7148static struct ata_port_info sata_port_info = {
Shaohua Li5067c042015-03-12 10:32:18 -07007149 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7150 ATA_FLAG_SAS_HOST,
Sergei Shtylyov0f2e0332011-01-21 20:32:01 +03007151 .pio_mask = ATA_PIO4_ONLY,
7152 .mwdma_mask = ATA_MWDMA2,
7153 .udma_mask = ATA_UDMA6,
Brian King35a39692006-09-25 12:39:20 -05007154 .port_ops = &ipr_sata_ops
7155};
7156
Linus Torvalds1da177e2005-04-16 15:20:36 -07007157#ifdef CONFIG_PPC_PSERIES
7158static const u16 ipr_blocked_processors[] = {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00007159 PVR_NORTHSTAR,
7160 PVR_PULSAR,
7161 PVR_POWER4,
7162 PVR_ICESTAR,
7163 PVR_SSTAR,
7164 PVR_POWER4p,
7165 PVR_630,
7166 PVR_630p
Linus Torvalds1da177e2005-04-16 15:20:36 -07007167};
7168
7169/**
7170 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7171 * @ioa_cfg: ioa cfg struct
7172 *
7173 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7174 * certain pSeries hardware. This function determines if the given
7175 * adapter is in one of these confgurations or not.
7176 *
7177 * Return value:
7178 * 1 if adapter is not supported / 0 if adapter is supported
7179 **/
7180static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7181{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007182 int i;
7183
Auke Kok44c10132007-06-08 15:46:36 -07007184 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007185 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00007186 if (pvr_version_is(ipr_blocked_processors[i]))
Auke Kok44c10132007-06-08 15:46:36 -07007187 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007188 }
7189 }
7190 return 0;
7191}
7192#else
7193#define ipr_invalid_adapter(ioa_cfg) 0
7194#endif
7195
7196/**
7197 * ipr_ioa_bringdown_done - IOA bring down completion.
7198 * @ipr_cmd: ipr command struct
7199 *
7200 * This function processes the completion of an adapter bring down.
7201 * It wakes any reset sleepers.
7202 *
7203 * Return value:
7204 * IPR_RC_JOB_RETURN
7205 **/
7206static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7207{
7208 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05007209 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007210
7211 ENTER;
Brian Kingbfae7822013-01-30 23:45:08 -06007212 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7213 ipr_trace;
7214 spin_unlock_irq(ioa_cfg->host->host_lock);
7215 scsi_unblock_requests(ioa_cfg->host);
7216 spin_lock_irq(ioa_cfg->host->host_lock);
7217 }
7218
Linus Torvalds1da177e2005-04-16 15:20:36 -07007219 ioa_cfg->in_reset_reload = 0;
7220 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05007221 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7222 spin_lock(&ioa_cfg->hrrq[i]._lock);
7223 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7224 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7225 }
7226 wmb();
7227
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007228 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007229 wake_up_all(&ioa_cfg->reset_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007230 LEAVE;
7231
7232 return IPR_RC_JOB_RETURN;
7233}
7234
7235/**
7236 * ipr_ioa_reset_done - IOA reset completion.
7237 * @ipr_cmd: ipr command struct
7238 *
7239 * This function processes the completion of an adapter reset.
7240 * It schedules any necessary mid-layer add/removes and
7241 * wakes any reset sleepers.
7242 *
7243 * Return value:
7244 * IPR_RC_JOB_RETURN
7245 **/
7246static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7247{
7248 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7249 struct ipr_resource_entry *res;
Brian Kingafc3f832016-08-24 12:56:51 -05007250 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007251
7252 ENTER;
7253 ioa_cfg->in_reset_reload = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007254 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7255 spin_lock(&ioa_cfg->hrrq[j]._lock);
7256 ioa_cfg->hrrq[j].allow_cmds = 1;
7257 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7258 }
7259 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007260 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06007261 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007262
7263 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Brian Kingf688f962014-12-02 12:47:37 -06007264 if (res->add_to_ml || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007265 ipr_trace;
7266 break;
7267 }
7268 }
7269 schedule_work(&ioa_cfg->work_q);
7270
Brian Kingafc3f832016-08-24 12:56:51 -05007271 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7272 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7273 if (j < IPR_NUM_LOG_HCAMS)
7274 ipr_send_hcam(ioa_cfg,
7275 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7276 ioa_cfg->hostrcb[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007277 else
Brian Kingafc3f832016-08-24 12:56:51 -05007278 ipr_send_hcam(ioa_cfg,
7279 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7280 ioa_cfg->hostrcb[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007281 }
7282
Brian King6bb04172007-04-26 16:00:08 -05007283 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007284 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7285
7286 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007287 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007288 wake_up_all(&ioa_cfg->reset_wait_q);
7289
Mark Nelson30237852008-12-10 12:23:20 +11007290 spin_unlock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007291 scsi_unblock_requests(ioa_cfg->host);
Mark Nelson30237852008-12-10 12:23:20 +11007292 spin_lock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007293
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007294 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007295 scsi_block_requests(ioa_cfg->host);
7296
Brian Kingf688f962014-12-02 12:47:37 -06007297 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007298 LEAVE;
7299 return IPR_RC_JOB_RETURN;
7300}
7301
7302/**
7303 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7304 * @supported_dev: supported device struct
7305 * @vpids: vendor product id struct
7306 *
7307 * Return value:
7308 * none
7309 **/
7310static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7311 struct ipr_std_inq_vpids *vpids)
7312{
7313 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7314 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7315 supported_dev->num_records = 1;
7316 supported_dev->data_length =
7317 cpu_to_be16(sizeof(struct ipr_supported_device));
7318 supported_dev->reserved = 0;
7319}
7320
7321/**
7322 * ipr_set_supported_devs - Send Set Supported Devices for a device
7323 * @ipr_cmd: ipr command struct
7324 *
Wayne Boyera32c0552010-02-19 13:23:36 -08007325 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07007326 *
7327 * Return value:
7328 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7329 **/
7330static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7331{
7332 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7333 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007334 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7335 struct ipr_resource_entry *res = ipr_cmd->u.res;
7336
7337 ipr_cmd->job_step = ipr_ioa_reset_done;
7338
7339 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06007340 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007341 continue;
7342
7343 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007344 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007345
7346 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7347 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7348 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7349
7350 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007351 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007352 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7353 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7354
Wayne Boyera32c0552010-02-19 13:23:36 -08007355 ipr_init_ioadl(ipr_cmd,
7356 ioa_cfg->vpd_cbs_dma +
7357 offsetof(struct ipr_misc_cbs, supp_dev),
7358 sizeof(struct ipr_supported_device),
7359 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007360
7361 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7362 IPR_SET_SUP_DEVICE_TIMEOUT);
7363
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007364 if (!ioa_cfg->sis64)
7365 ipr_cmd->job_step = ipr_set_supported_devs;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007366 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007367 return IPR_RC_JOB_RETURN;
7368 }
7369
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007370 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007371 return IPR_RC_JOB_CONTINUE;
7372}
7373
7374/**
7375 * ipr_get_mode_page - Locate specified mode page
7376 * @mode_pages: mode page buffer
7377 * @page_code: page code to find
7378 * @len: minimum required length for mode page
7379 *
7380 * Return value:
7381 * pointer to mode page / NULL on failure
7382 **/
7383static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7384 u32 page_code, u32 len)
7385{
7386 struct ipr_mode_page_hdr *mode_hdr;
7387 u32 page_length;
7388 u32 length;
7389
7390 if (!mode_pages || (mode_pages->hdr.length == 0))
7391 return NULL;
7392
7393 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7394 mode_hdr = (struct ipr_mode_page_hdr *)
7395 (mode_pages->data + mode_pages->hdr.block_desc_len);
7396
7397 while (length) {
7398 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7399 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7400 return mode_hdr;
7401 break;
7402 } else {
7403 page_length = (sizeof(struct ipr_mode_page_hdr) +
7404 mode_hdr->page_length);
7405 length -= page_length;
7406 mode_hdr = (struct ipr_mode_page_hdr *)
7407 ((unsigned long)mode_hdr + page_length);
7408 }
7409 }
7410 return NULL;
7411}
7412
7413/**
7414 * ipr_check_term_power - Check for term power errors
7415 * @ioa_cfg: ioa config struct
7416 * @mode_pages: IOAFP mode pages buffer
7417 *
7418 * Check the IOAFP's mode page 28 for term power errors
7419 *
7420 * Return value:
7421 * nothing
7422 **/
7423static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7424 struct ipr_mode_pages *mode_pages)
7425{
7426 int i;
7427 int entry_length;
7428 struct ipr_dev_bus_entry *bus;
7429 struct ipr_mode_page28 *mode_page;
7430
7431 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7432 sizeof(struct ipr_mode_page28));
7433
7434 entry_length = mode_page->entry_length;
7435
7436 bus = mode_page->bus;
7437
7438 for (i = 0; i < mode_page->num_entries; i++) {
7439 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7440 dev_err(&ioa_cfg->pdev->dev,
7441 "Term power is absent on scsi bus %d\n",
7442 bus->res_addr.bus);
7443 }
7444
7445 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7446 }
7447}
7448
7449/**
7450 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7451 * @ioa_cfg: ioa config struct
7452 *
7453 * Looks through the config table checking for SES devices. If
7454 * the SES device is in the SES table indicating a maximum SCSI
7455 * bus speed, the speed is limited for the bus.
7456 *
7457 * Return value:
7458 * none
7459 **/
7460static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7461{
7462 u32 max_xfer_rate;
7463 int i;
7464
7465 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7466 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7467 ioa_cfg->bus_attr[i].bus_width);
7468
7469 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7470 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7471 }
7472}
7473
7474/**
7475 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7476 * @ioa_cfg: ioa config struct
7477 * @mode_pages: mode page 28 buffer
7478 *
7479 * Updates mode page 28 based on driver configuration
7480 *
7481 * Return value:
7482 * none
7483 **/
7484static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007485 struct ipr_mode_pages *mode_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007486{
7487 int i, entry_length;
7488 struct ipr_dev_bus_entry *bus;
7489 struct ipr_bus_attributes *bus_attr;
7490 struct ipr_mode_page28 *mode_page;
7491
7492 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7493 sizeof(struct ipr_mode_page28));
7494
7495 entry_length = mode_page->entry_length;
7496
7497 /* Loop for each device bus entry */
7498 for (i = 0, bus = mode_page->bus;
7499 i < mode_page->num_entries;
7500 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7501 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7502 dev_err(&ioa_cfg->pdev->dev,
7503 "Invalid resource address reported: 0x%08X\n",
7504 IPR_GET_PHYS_LOC(bus->res_addr));
7505 continue;
7506 }
7507
7508 bus_attr = &ioa_cfg->bus_attr[i];
7509 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7510 bus->bus_width = bus_attr->bus_width;
7511 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7512 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7513 if (bus_attr->qas_enabled)
7514 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7515 else
7516 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7517 }
7518}
7519
7520/**
7521 * ipr_build_mode_select - Build a mode select command
7522 * @ipr_cmd: ipr command struct
7523 * @res_handle: resource handle to send command to
7524 * @parm: Byte 2 of Mode Sense command
7525 * @dma_addr: DMA buffer address
7526 * @xfer_len: data transfer length
7527 *
7528 * Return value:
7529 * none
7530 **/
7531static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08007532 __be32 res_handle, u8 parm,
7533 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007534{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007535 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7536
7537 ioarcb->res_handle = res_handle;
7538 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7539 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7540 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7541 ioarcb->cmd_pkt.cdb[1] = parm;
7542 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7543
Wayne Boyera32c0552010-02-19 13:23:36 -08007544 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007545}
7546
7547/**
7548 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7549 * @ipr_cmd: ipr command struct
7550 *
7551 * This function sets up the SCSI bus attributes and sends
7552 * a Mode Select for Page 28 to activate them.
7553 *
7554 * Return value:
7555 * IPR_RC_JOB_RETURN
7556 **/
7557static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7558{
7559 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7560 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7561 int length;
7562
7563 ENTER;
Brian King47338042006-02-08 20:57:42 -06007564 ipr_scsi_bus_speed_limit(ioa_cfg);
7565 ipr_check_term_power(ioa_cfg, mode_pages);
7566 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7567 length = mode_pages->hdr.length + 1;
7568 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007569
7570 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7571 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7572 length);
7573
Wayne Boyerf72919e2010-02-19 13:24:21 -08007574 ipr_cmd->job_step = ipr_set_supported_devs;
7575 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7576 struct ipr_resource_entry, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007577 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7578
7579 LEAVE;
7580 return IPR_RC_JOB_RETURN;
7581}
7582
7583/**
7584 * ipr_build_mode_sense - Builds a mode sense command
7585 * @ipr_cmd: ipr command struct
7586 * @res: resource entry struct
7587 * @parm: Byte 2 of mode sense command
7588 * @dma_addr: DMA address of mode sense buffer
7589 * @xfer_len: Size of DMA buffer
7590 *
7591 * Return value:
7592 * none
7593 **/
7594static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7595 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08007596 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007597{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007598 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7599
7600 ioarcb->res_handle = res_handle;
7601 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7602 ioarcb->cmd_pkt.cdb[2] = parm;
7603 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7604 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7605
Wayne Boyera32c0552010-02-19 13:23:36 -08007606 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007607}
7608
7609/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007610 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7611 * @ipr_cmd: ipr command struct
7612 *
7613 * This function handles the failure of an IOA bringup command.
7614 *
7615 * Return value:
7616 * IPR_RC_JOB_RETURN
7617 **/
7618static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7619{
7620 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007621 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007622
7623 dev_err(&ioa_cfg->pdev->dev,
7624 "0x%02X failed with IOASC: 0x%08X\n",
7625 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7626
7627 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007628 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007629 return IPR_RC_JOB_RETURN;
7630}
7631
7632/**
7633 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7634 * @ipr_cmd: ipr command struct
7635 *
7636 * This function handles the failure of a Mode Sense to the IOAFP.
7637 * Some adapters do not handle all mode pages.
7638 *
7639 * Return value:
7640 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7641 **/
7642static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7643{
Wayne Boyerf72919e2010-02-19 13:24:21 -08007644 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007645 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007646
7647 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08007648 ipr_cmd->job_step = ipr_set_supported_devs;
7649 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7650 struct ipr_resource_entry, queue);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007651 return IPR_RC_JOB_CONTINUE;
7652 }
7653
7654 return ipr_reset_cmd_failed(ipr_cmd);
7655}
7656
7657/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007658 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7659 * @ipr_cmd: ipr command struct
7660 *
7661 * This function send a Page 28 mode sense to the IOA to
7662 * retrieve SCSI bus attributes.
7663 *
7664 * Return value:
7665 * IPR_RC_JOB_RETURN
7666 **/
7667static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7668{
7669 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7670
7671 ENTER;
7672 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7673 0x28, ioa_cfg->vpd_cbs_dma +
7674 offsetof(struct ipr_misc_cbs, mode_pages),
7675 sizeof(struct ipr_mode_pages));
7676
7677 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007678 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007679
7680 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7681
7682 LEAVE;
7683 return IPR_RC_JOB_RETURN;
7684}
7685
7686/**
Brian Kingac09c342007-04-26 16:00:16 -05007687 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7688 * @ipr_cmd: ipr command struct
7689 *
7690 * This function enables dual IOA RAID support if possible.
7691 *
7692 * Return value:
7693 * IPR_RC_JOB_RETURN
7694 **/
7695static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7696{
7697 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7698 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7699 struct ipr_mode_page24 *mode_page;
7700 int length;
7701
7702 ENTER;
7703 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7704 sizeof(struct ipr_mode_page24));
7705
7706 if (mode_page)
7707 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7708
7709 length = mode_pages->hdr.length + 1;
7710 mode_pages->hdr.length = 0;
7711
7712 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7713 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7714 length);
7715
7716 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7717 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7718
7719 LEAVE;
7720 return IPR_RC_JOB_RETURN;
7721}
7722
7723/**
7724 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7725 * @ipr_cmd: ipr command struct
7726 *
7727 * This function handles the failure of a Mode Sense to the IOAFP.
7728 * Some adapters do not handle all mode pages.
7729 *
7730 * Return value:
7731 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7732 **/
7733static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7734{
Wayne Boyer96d21f02010-05-10 09:13:27 -07007735 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian Kingac09c342007-04-26 16:00:16 -05007736
7737 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7738 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7739 return IPR_RC_JOB_CONTINUE;
7740 }
7741
7742 return ipr_reset_cmd_failed(ipr_cmd);
7743}
7744
7745/**
7746 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7747 * @ipr_cmd: ipr command struct
7748 *
7749 * This function send a mode sense to the IOA to retrieve
7750 * the IOA Advanced Function Control mode page.
7751 *
7752 * Return value:
7753 * IPR_RC_JOB_RETURN
7754 **/
7755static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7756{
7757 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7758
7759 ENTER;
7760 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7761 0x24, ioa_cfg->vpd_cbs_dma +
7762 offsetof(struct ipr_misc_cbs, mode_pages),
7763 sizeof(struct ipr_mode_pages));
7764
7765 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7766 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7767
7768 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7769
7770 LEAVE;
7771 return IPR_RC_JOB_RETURN;
7772}
7773
7774/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007775 * ipr_init_res_table - Initialize the resource table
7776 * @ipr_cmd: ipr command struct
7777 *
7778 * This function looks through the existing resource table, comparing
7779 * it with the config table. This function will take care of old/new
7780 * devices and schedule adding/removing them from the mid-layer
7781 * as appropriate.
7782 *
7783 * Return value:
7784 * IPR_RC_JOB_CONTINUE
7785 **/
7786static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7787{
7788 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7789 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007790 struct ipr_config_table_entry_wrapper cfgtew;
7791 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007792 LIST_HEAD(old_res);
7793
7794 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007795 if (ioa_cfg->sis64)
7796 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7797 else
7798 flag = ioa_cfg->u.cfg_table->hdr.flags;
7799
7800 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007801 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7802
7803 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7804 list_move_tail(&res->queue, &old_res);
7805
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007806 if (ioa_cfg->sis64)
Wayne Boyer438b0332010-05-10 09:13:00 -07007807 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007808 else
7809 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7810
7811 for (i = 0; i < entries; i++) {
7812 if (ioa_cfg->sis64)
7813 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7814 else
7815 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007816 found = 0;
7817
7818 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007819 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007820 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7821 found = 1;
7822 break;
7823 }
7824 }
7825
7826 if (!found) {
7827 if (list_empty(&ioa_cfg->free_res_q)) {
7828 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7829 break;
7830 }
7831
7832 found = 1;
7833 res = list_entry(ioa_cfg->free_res_q.next,
7834 struct ipr_resource_entry, queue);
7835 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007836 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007837 res->add_to_ml = 1;
Wayne Boyer56115592010-06-10 14:46:34 -07007838 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7839 res->sdev->allow_restart = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007840
7841 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007842 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007843 }
7844
7845 list_for_each_entry_safe(res, temp, &old_res, queue) {
7846 if (res->sdev) {
7847 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007848 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007849 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007850 }
7851 }
7852
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007853 list_for_each_entry_safe(res, temp, &old_res, queue) {
7854 ipr_clear_res_target(res);
7855 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7856 }
7857
Brian Kingac09c342007-04-26 16:00:16 -05007858 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7859 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7860 else
7861 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007862
7863 LEAVE;
7864 return IPR_RC_JOB_CONTINUE;
7865}
7866
7867/**
7868 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7869 * @ipr_cmd: ipr command struct
7870 *
7871 * This function sends a Query IOA Configuration command
7872 * to the adapter to retrieve the IOA configuration table.
7873 *
7874 * Return value:
7875 * IPR_RC_JOB_RETURN
7876 **/
7877static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7878{
7879 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7880 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007881 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05007882 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007883
7884 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05007885 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7886 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007887 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7888 ucode_vpd->major_release, ucode_vpd->card_type,
7889 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7890 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7891 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7892
7893 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer438b0332010-05-10 09:13:00 -07007894 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007895 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7896 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007897
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007898 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08007899 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007900
7901 ipr_cmd->job_step = ipr_init_res_table;
7902
7903 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7904
7905 LEAVE;
7906 return IPR_RC_JOB_RETURN;
7907}
7908
Gabriel Krisman Bertazi1a47af22015-11-03 16:26:09 -02007909static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7910{
7911 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7912
7913 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7914 return IPR_RC_JOB_CONTINUE;
7915
7916 return ipr_reset_cmd_failed(ipr_cmd);
7917}
7918
7919static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7920 __be32 res_handle, u8 sa_code)
7921{
7922 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7923
7924 ioarcb->res_handle = res_handle;
7925 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7926 ioarcb->cmd_pkt.cdb[1] = sa_code;
7927 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7928}
7929
7930/**
7931 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7932 * action
7933 *
7934 * Return value:
7935 * none
7936 **/
7937static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7938{
7939 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7940 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7941 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7942
7943 ENTER;
7944
7945 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7946
7947 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7948 ipr_build_ioa_service_action(ipr_cmd,
7949 cpu_to_be32(IPR_IOA_RES_HANDLE),
7950 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7951
7952 ioarcb->cmd_pkt.cdb[2] = 0x40;
7953
7954 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7955 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7956 IPR_SET_SUP_DEVICE_TIMEOUT);
7957
7958 LEAVE;
7959 return IPR_RC_JOB_RETURN;
7960 }
7961
7962 LEAVE;
7963 return IPR_RC_JOB_CONTINUE;
7964}
7965
Linus Torvalds1da177e2005-04-16 15:20:36 -07007966/**
7967 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7968 * @ipr_cmd: ipr command struct
7969 *
7970 * This utility function sends an inquiry to the adapter.
7971 *
7972 * Return value:
7973 * none
7974 **/
7975static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08007976 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007977{
7978 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007979
7980 ENTER;
7981 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7982 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7983
7984 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7985 ioarcb->cmd_pkt.cdb[1] = flags;
7986 ioarcb->cmd_pkt.cdb[2] = page;
7987 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7988
Wayne Boyera32c0552010-02-19 13:23:36 -08007989 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007990
7991 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7992 LEAVE;
7993}
7994
7995/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06007996 * ipr_inquiry_page_supported - Is the given inquiry page supported
7997 * @page0: inquiry page 0 buffer
7998 * @page: page code.
7999 *
8000 * This function determines if the specified inquiry page is supported.
8001 *
8002 * Return value:
8003 * 1 if page is supported / 0 if not
8004 **/
8005static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8006{
8007 int i;
8008
8009 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8010 if (page0->page[i] == page)
8011 return 1;
8012
8013 return 0;
8014}
8015
8016/**
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02008017 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8018 * @ipr_cmd: ipr command struct
8019 *
8020 * This function sends a Page 0xC4 inquiry to the adapter
8021 * to retrieve software VPD information.
8022 *
8023 * Return value:
8024 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8025 **/
8026static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8027{
8028 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8029 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8030 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8031
8032 ENTER;
Gabriel Krisman Bertazi1a47af22015-11-03 16:26:09 -02008033 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02008034 memset(pageC4, 0, sizeof(*pageC4));
8035
8036 if (ipr_inquiry_page_supported(page0, 0xC4)) {
8037 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8038 (ioa_cfg->vpd_cbs_dma
8039 + offsetof(struct ipr_misc_cbs,
8040 pageC4_data)),
8041 sizeof(struct ipr_inquiry_pageC4));
8042 return IPR_RC_JOB_RETURN;
8043 }
8044
8045 LEAVE;
8046 return IPR_RC_JOB_CONTINUE;
8047}
8048
8049/**
Brian Kingac09c342007-04-26 16:00:16 -05008050 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8051 * @ipr_cmd: ipr command struct
8052 *
8053 * This function sends a Page 0xD0 inquiry to the adapter
8054 * to retrieve adapter capabilities.
8055 *
8056 * Return value:
8057 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8058 **/
8059static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8060{
8061 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8062 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8063 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8064
8065 ENTER;
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02008066 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
Brian Kingac09c342007-04-26 16:00:16 -05008067 memset(cap, 0, sizeof(*cap));
8068
8069 if (ipr_inquiry_page_supported(page0, 0xD0)) {
8070 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8071 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8072 sizeof(struct ipr_inquiry_cap));
8073 return IPR_RC_JOB_RETURN;
8074 }
8075
8076 LEAVE;
8077 return IPR_RC_JOB_CONTINUE;
8078}
8079
8080/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008081 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8082 * @ipr_cmd: ipr command struct
8083 *
8084 * This function sends a Page 3 inquiry to the adapter
8085 * to retrieve software VPD information.
8086 *
8087 * Return value:
8088 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8089 **/
8090static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8091{
8092 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008093
8094 ENTER;
8095
Brian Kingac09c342007-04-26 16:00:16 -05008096 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008097
8098 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8099 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8100 sizeof(struct ipr_inquiry_page3));
8101
8102 LEAVE;
8103 return IPR_RC_JOB_RETURN;
8104}
8105
8106/**
8107 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8108 * @ipr_cmd: ipr command struct
8109 *
8110 * This function sends a Page 0 inquiry to the adapter
8111 * to retrieve supported inquiry pages.
8112 *
8113 * Return value:
8114 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8115 **/
8116static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8117{
8118 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008119 char type[5];
8120
8121 ENTER;
8122
8123 /* Grab the type out of the VPD and store it away */
8124 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8125 type[4] = '\0';
8126 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8127
Brian Kingf688f962014-12-02 12:47:37 -06008128 if (ipr_invalid_adapter(ioa_cfg)) {
8129 dev_err(&ioa_cfg->pdev->dev,
8130 "Adapter not supported in this hardware configuration.\n");
8131
8132 if (!ipr_testmode) {
8133 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8134 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8135 list_add_tail(&ipr_cmd->queue,
8136 &ioa_cfg->hrrq->hrrq_free_q);
8137 return IPR_RC_JOB_RETURN;
8138 }
8139 }
8140
brking@us.ibm.com62275042005-11-01 17:01:14 -06008141 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008142
brking@us.ibm.com62275042005-11-01 17:01:14 -06008143 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8144 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8145 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008146
8147 LEAVE;
8148 return IPR_RC_JOB_RETURN;
8149}
8150
8151/**
8152 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8153 * @ipr_cmd: ipr command struct
8154 *
8155 * This function sends a standard inquiry to the adapter.
8156 *
8157 * Return value:
8158 * IPR_RC_JOB_RETURN
8159 **/
8160static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8161{
8162 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8163
8164 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008165 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008166
8167 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8168 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8169 sizeof(struct ipr_ioa_vpd));
8170
8171 LEAVE;
8172 return IPR_RC_JOB_RETURN;
8173}
8174
8175/**
Wayne Boyer214777b2010-02-19 13:24:26 -08008176 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008177 * @ipr_cmd: ipr command struct
8178 *
8179 * This function send an Identify Host Request Response Queue
8180 * command to establish the HRRQ with the adapter.
8181 *
8182 * Return value:
8183 * IPR_RC_JOB_RETURN
8184 **/
Wayne Boyer214777b2010-02-19 13:24:26 -08008185static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008186{
8187 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8188 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008189 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008190
8191 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008192 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
Brian King87adbe02016-09-16 16:51:37 -05008193 if (ioa_cfg->identify_hrrq_index == 0)
8194 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008195
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008196 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8197 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -07008198
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008199 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8200 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008201
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008202 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8203 if (ioa_cfg->sis64)
8204 ioarcb->cmd_pkt.cdb[1] = 0x1;
8205
8206 if (ioa_cfg->nvectors == 1)
8207 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8208 else
8209 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8210
8211 ioarcb->cmd_pkt.cdb[2] =
8212 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8213 ioarcb->cmd_pkt.cdb[3] =
8214 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8215 ioarcb->cmd_pkt.cdb[4] =
8216 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8217 ioarcb->cmd_pkt.cdb[5] =
8218 ((u64) hrrq->host_rrq_dma) & 0xff;
8219 ioarcb->cmd_pkt.cdb[7] =
8220 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8221 ioarcb->cmd_pkt.cdb[8] =
8222 (sizeof(u32) * hrrq->size) & 0xff;
8223
8224 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008225 ioarcb->cmd_pkt.cdb[9] =
8226 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008227
8228 if (ioa_cfg->sis64) {
8229 ioarcb->cmd_pkt.cdb[10] =
8230 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8231 ioarcb->cmd_pkt.cdb[11] =
8232 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8233 ioarcb->cmd_pkt.cdb[12] =
8234 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8235 ioarcb->cmd_pkt.cdb[13] =
8236 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8237 }
8238
8239 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008240 ioarcb->cmd_pkt.cdb[14] =
8241 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008242
8243 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8244 IPR_INTERNAL_TIMEOUT);
8245
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008246 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8247 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008248
8249 LEAVE;
8250 return IPR_RC_JOB_RETURN;
Wayne Boyer214777b2010-02-19 13:24:26 -08008251 }
8252
Linus Torvalds1da177e2005-04-16 15:20:36 -07008253 LEAVE;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008254 return IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008255}
8256
8257/**
8258 * ipr_reset_timer_done - Adapter reset timer function
8259 * @ipr_cmd: ipr command struct
8260 *
8261 * Description: This function is used in adapter reset processing
8262 * for timing events. If the reset_cmd pointer in the IOA
8263 * config struct is not this adapter's we are doing nested
8264 * resets and fail_all_ops will take care of freeing the
8265 * command block.
8266 *
8267 * Return value:
8268 * none
8269 **/
8270static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8271{
8272 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8273 unsigned long lock_flags = 0;
8274
8275 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8276
8277 if (ioa_cfg->reset_cmd == ipr_cmd) {
8278 list_del(&ipr_cmd->queue);
8279 ipr_cmd->done(ipr_cmd);
8280 }
8281
8282 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8283}
8284
8285/**
8286 * ipr_reset_start_timer - Start a timer for adapter reset job
8287 * @ipr_cmd: ipr command struct
8288 * @timeout: timeout value
8289 *
8290 * Description: This function is used in adapter reset processing
8291 * for timing events. If the reset_cmd pointer in the IOA
8292 * config struct is not this adapter's we are doing nested
8293 * resets and fail_all_ops will take care of freeing the
8294 * command block.
8295 *
8296 * Return value:
8297 * none
8298 **/
8299static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8300 unsigned long timeout)
8301{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008302
8303 ENTER;
8304 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008305 ipr_cmd->done = ipr_reset_ioa_job;
8306
8307 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8308 ipr_cmd->timer.expires = jiffies + timeout;
8309 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8310 add_timer(&ipr_cmd->timer);
8311}
8312
8313/**
8314 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8315 * @ioa_cfg: ioa cfg struct
8316 *
8317 * Return value:
8318 * nothing
8319 **/
8320static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8321{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008322 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008323
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008324 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008325 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008326 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8327
8328 /* Initialize Host RRQ pointers */
8329 hrrq->hrrq_start = hrrq->host_rrq;
8330 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8331 hrrq->hrrq_curr = hrrq->hrrq_start;
8332 hrrq->toggle_bit = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008333 spin_unlock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008334 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008335 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008336
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008337 ioa_cfg->identify_hrrq_index = 0;
8338 if (ioa_cfg->hrrq_num == 1)
8339 atomic_set(&ioa_cfg->hrrq_index, 0);
8340 else
8341 atomic_set(&ioa_cfg->hrrq_index, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008342
8343 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008344 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008345}
8346
8347/**
Wayne Boyer214777b2010-02-19 13:24:26 -08008348 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8349 * @ipr_cmd: ipr command struct
8350 *
8351 * Return value:
8352 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8353 **/
8354static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8355{
8356 unsigned long stage, stage_time;
8357 u32 feedback;
8358 volatile u32 int_reg;
8359 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8360 u64 maskval = 0;
8361
8362 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8363 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8364 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8365
8366 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8367
8368 /* sanity check the stage_time value */
Wayne Boyer438b0332010-05-10 09:13:00 -07008369 if (stage_time == 0)
8370 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8371 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
Wayne Boyer214777b2010-02-19 13:24:26 -08008372 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8373 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8374 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8375
8376 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8377 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8378 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8379 stage_time = ioa_cfg->transop_timeout;
8380 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8381 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
Wayne Boyer1df79ca2010-07-14 10:49:43 -07008382 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8383 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8384 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8385 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8386 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8387 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8388 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8389 return IPR_RC_JOB_CONTINUE;
8390 }
Wayne Boyer214777b2010-02-19 13:24:26 -08008391 }
8392
8393 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8394 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8395 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8396 ipr_cmd->done = ipr_reset_ioa_job;
8397 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008398
8399 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Wayne Boyer214777b2010-02-19 13:24:26 -08008400
8401 return IPR_RC_JOB_RETURN;
8402}
8403
8404/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008405 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8406 * @ipr_cmd: ipr command struct
8407 *
8408 * This function reinitializes some control blocks and
8409 * enables destructive diagnostics on the adapter.
8410 *
8411 * Return value:
8412 * IPR_RC_JOB_RETURN
8413 **/
8414static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8415{
8416 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8417 volatile u32 int_reg;
Wayne Boyer7be96902010-05-10 09:14:07 -07008418 volatile u64 maskval;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008419 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008420
8421 ENTER;
Wayne Boyer214777b2010-02-19 13:24:26 -08008422 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008423 ipr_init_ioa_mem(ioa_cfg);
8424
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008425 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8426 spin_lock(&ioa_cfg->hrrq[i]._lock);
8427 ioa_cfg->hrrq[i].allow_interrupts = 1;
8428 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8429 }
8430 wmb();
Wayne Boyer8701f182010-06-04 10:26:50 -07008431 if (ioa_cfg->sis64) {
8432 /* Set the adapter to the correct endian mode. */
8433 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8434 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8435 }
8436
Wayne Boyer7be96902010-05-10 09:14:07 -07008437 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008438
8439 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8440 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
Wayne Boyer214777b2010-02-19 13:24:26 -08008441 ioa_cfg->regs.clr_interrupt_mask_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008442 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8443 return IPR_RC_JOB_CONTINUE;
8444 }
8445
8446 /* Enable destructive diagnostics on IOA */
Wayne Boyer214777b2010-02-19 13:24:26 -08008447 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008448
Wayne Boyer7be96902010-05-10 09:14:07 -07008449 if (ioa_cfg->sis64) {
8450 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8451 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8452 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8453 } else
8454 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer214777b2010-02-19 13:24:26 -08008455
Linus Torvalds1da177e2005-04-16 15:20:36 -07008456 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8457
8458 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8459
Wayne Boyer214777b2010-02-19 13:24:26 -08008460 if (ioa_cfg->sis64) {
8461 ipr_cmd->job_step = ipr_reset_next_stage;
8462 return IPR_RC_JOB_CONTINUE;
8463 }
8464
Linus Torvalds1da177e2005-04-16 15:20:36 -07008465 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
Brian King5469cb52007-03-29 12:42:40 -05008466 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008467 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8468 ipr_cmd->done = ipr_reset_ioa_job;
8469 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008470 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008471
8472 LEAVE;
8473 return IPR_RC_JOB_RETURN;
8474}
8475
8476/**
8477 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8478 * @ipr_cmd: ipr command struct
8479 *
8480 * This function is invoked when an adapter dump has run out
8481 * of processing time.
8482 *
8483 * Return value:
8484 * IPR_RC_JOB_CONTINUE
8485 **/
8486static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8487{
8488 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8489
8490 if (ioa_cfg->sdt_state == GET_DUMP)
Brian King41e9a692011-09-21 08:51:11 -05008491 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8492 else if (ioa_cfg->sdt_state == READ_DUMP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008493 ioa_cfg->sdt_state = ABORT_DUMP;
8494
Brian King4c647e92011-10-15 09:08:56 -05008495 ioa_cfg->dump_timeout = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008496 ipr_cmd->job_step = ipr_reset_alert;
8497
8498 return IPR_RC_JOB_CONTINUE;
8499}
8500
8501/**
8502 * ipr_unit_check_no_data - Log a unit check/no data error log
8503 * @ioa_cfg: ioa config struct
8504 *
8505 * Logs an error indicating the adapter unit checked, but for some
8506 * reason, we were unable to fetch the unit check buffer.
8507 *
8508 * Return value:
8509 * nothing
8510 **/
8511static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8512{
8513 ioa_cfg->errors_logged++;
8514 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8515}
8516
8517/**
8518 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8519 * @ioa_cfg: ioa config struct
8520 *
8521 * Fetches the unit check buffer from the adapter by clocking the data
8522 * through the mailbox register.
8523 *
8524 * Return value:
8525 * nothing
8526 **/
8527static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8528{
8529 unsigned long mailbox;
8530 struct ipr_hostrcb *hostrcb;
8531 struct ipr_uc_sdt sdt;
8532 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05008533 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008534
8535 mailbox = readl(ioa_cfg->ioa_mailbox);
8536
Wayne Boyerdcbad002010-02-19 13:24:14 -08008537 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008538 ipr_unit_check_no_data(ioa_cfg);
8539 return;
8540 }
8541
8542 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8543 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8544 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8545
Wayne Boyerdcbad002010-02-19 13:24:14 -08008546 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8547 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8548 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008549 ipr_unit_check_no_data(ioa_cfg);
8550 return;
8551 }
8552
8553 /* Find length of the first sdt entry (UC buffer) */
Wayne Boyerdcbad002010-02-19 13:24:14 -08008554 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8555 length = be32_to_cpu(sdt.entry[0].end_token);
8556 else
8557 length = (be32_to_cpu(sdt.entry[0].end_token) -
8558 be32_to_cpu(sdt.entry[0].start_token)) &
8559 IPR_FMT2_MBX_ADDR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008560
8561 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8562 struct ipr_hostrcb, queue);
Brian Kingafc3f832016-08-24 12:56:51 -05008563 list_del_init(&hostrcb->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008564 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8565
8566 rc = ipr_get_ldump_data_section(ioa_cfg,
Wayne Boyerdcbad002010-02-19 13:24:14 -08008567 be32_to_cpu(sdt.entry[0].start_token),
Linus Torvalds1da177e2005-04-16 15:20:36 -07008568 (__be32 *)&hostrcb->hcam,
8569 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8570
Brian King65f56472007-04-26 16:00:12 -05008571 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008572 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08008573 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05008574 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8575 ioa_cfg->sdt_state == GET_DUMP)
8576 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8577 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07008578 ipr_unit_check_no_data(ioa_cfg);
8579
8580 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8581}
8582
8583/**
Wayne Boyer110def82010-11-04 09:36:16 -07008584 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8585 * @ipr_cmd: ipr command struct
8586 *
8587 * Description: This function will call to get the unit check buffer.
8588 *
8589 * Return value:
8590 * IPR_RC_JOB_RETURN
8591 **/
8592static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8593{
8594 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8595
8596 ENTER;
8597 ioa_cfg->ioa_unit_checked = 0;
8598 ipr_get_unit_check_buffer(ioa_cfg);
8599 ipr_cmd->job_step = ipr_reset_alert;
8600 ipr_reset_start_timer(ipr_cmd, 0);
8601
8602 LEAVE;
8603 return IPR_RC_JOB_RETURN;
8604}
8605
Gabriel Krisman Bertazif41f1d92015-11-03 16:26:06 -02008606static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8607{
8608 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8609
8610 ENTER;
8611
8612 if (ioa_cfg->sdt_state != GET_DUMP)
8613 return IPR_RC_JOB_RETURN;
8614
8615 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8616 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8617 IPR_PCII_MAILBOX_STABLE)) {
8618
8619 if (!ipr_cmd->u.time_left)
8620 dev_err(&ioa_cfg->pdev->dev,
8621 "Timed out waiting for Mailbox register.\n");
8622
8623 ioa_cfg->sdt_state = READ_DUMP;
8624 ioa_cfg->dump_timeout = 0;
8625 if (ioa_cfg->sis64)
8626 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8627 else
8628 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8629 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8630 schedule_work(&ioa_cfg->work_q);
8631
8632 } else {
8633 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8634 ipr_reset_start_timer(ipr_cmd,
8635 IPR_CHECK_FOR_RESET_TIMEOUT);
8636 }
8637
8638 LEAVE;
8639 return IPR_RC_JOB_RETURN;
8640}
8641
Wayne Boyer110def82010-11-04 09:36:16 -07008642/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008643 * ipr_reset_restore_cfg_space - Restore PCI config space.
8644 * @ipr_cmd: ipr command struct
8645 *
8646 * Description: This function restores the saved PCI config space of
8647 * the adapter, fails all outstanding ops back to the callers, and
8648 * fetches the dump/unit check if applicable to this reset.
8649 *
8650 * Return value:
8651 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8652 **/
8653static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8654{
8655 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer630ad8312011-04-07 12:12:30 -07008656 u32 int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008657
8658 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02008659 ioa_cfg->pdev->state_saved = true;
Jon Mason1d3c16a2010-11-30 17:43:26 -06008660 pci_restore_state(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008661
8662 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
Wayne Boyer96d21f02010-05-10 09:13:27 -07008663 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008664 return IPR_RC_JOB_CONTINUE;
8665 }
8666
8667 ipr_fail_all_ops(ioa_cfg);
8668
Wayne Boyer8701f182010-06-04 10:26:50 -07008669 if (ioa_cfg->sis64) {
8670 /* Set the adapter to the correct endian mode. */
8671 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8672 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8673 }
8674
Linus Torvalds1da177e2005-04-16 15:20:36 -07008675 if (ioa_cfg->ioa_unit_checked) {
Wayne Boyer110def82010-11-04 09:36:16 -07008676 if (ioa_cfg->sis64) {
8677 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8678 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8679 return IPR_RC_JOB_RETURN;
8680 } else {
8681 ioa_cfg->ioa_unit_checked = 0;
8682 ipr_get_unit_check_buffer(ioa_cfg);
8683 ipr_cmd->job_step = ipr_reset_alert;
8684 ipr_reset_start_timer(ipr_cmd, 0);
8685 return IPR_RC_JOB_RETURN;
8686 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008687 }
8688
8689 if (ioa_cfg->in_ioa_bringdown) {
8690 ipr_cmd->job_step = ipr_ioa_bringdown_done;
Gabriel Krisman Bertazif41f1d92015-11-03 16:26:06 -02008691 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8692 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8693 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008694 } else {
8695 ipr_cmd->job_step = ipr_reset_enable_ioa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008696 }
8697
Wayne Boyer438b0332010-05-10 09:13:00 -07008698 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008699 return IPR_RC_JOB_CONTINUE;
8700}
8701
8702/**
Brian Kinge619e1a2007-01-23 11:25:37 -06008703 * ipr_reset_bist_done - BIST has completed on the adapter.
8704 * @ipr_cmd: ipr command struct
8705 *
8706 * Description: Unblock config space and resume the reset process.
8707 *
8708 * Return value:
8709 * IPR_RC_JOB_CONTINUE
8710 **/
8711static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8712{
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008713 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8714
Brian Kinge619e1a2007-01-23 11:25:37 -06008715 ENTER;
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008716 if (ioa_cfg->cfg_locked)
8717 pci_cfg_access_unlock(ioa_cfg->pdev);
8718 ioa_cfg->cfg_locked = 0;
Brian Kinge619e1a2007-01-23 11:25:37 -06008719 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8720 LEAVE;
8721 return IPR_RC_JOB_CONTINUE;
8722}
8723
8724/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008725 * ipr_reset_start_bist - Run BIST on the adapter.
8726 * @ipr_cmd: ipr command struct
8727 *
8728 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8729 *
8730 * Return value:
8731 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8732 **/
8733static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8734{
8735 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008736 int rc = PCIBIOS_SUCCESSFUL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008737
8738 ENTER;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008739 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8740 writel(IPR_UPROCI_SIS64_START_BIST,
8741 ioa_cfg->regs.set_uproc_interrupt_reg32);
8742 else
8743 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8744
8745 if (rc == PCIBIOS_SUCCESSFUL) {
Brian Kinge619e1a2007-01-23 11:25:37 -06008746 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008747 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8748 rc = IPR_RC_JOB_RETURN;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008749 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008750 if (ioa_cfg->cfg_locked)
8751 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8752 ioa_cfg->cfg_locked = 0;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008753 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8754 rc = IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008755 }
8756
8757 LEAVE;
8758 return rc;
8759}
8760
8761/**
Brian King463fc692007-05-07 17:09:05 -05008762 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8763 * @ipr_cmd: ipr command struct
8764 *
8765 * Description: This clears PCI reset to the adapter and delays two seconds.
8766 *
8767 * Return value:
8768 * IPR_RC_JOB_RETURN
8769 **/
8770static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8771{
8772 ENTER;
Brian King463fc692007-05-07 17:09:05 -05008773 ipr_cmd->job_step = ipr_reset_bist_done;
8774 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8775 LEAVE;
8776 return IPR_RC_JOB_RETURN;
8777}
8778
8779/**
Brian King2796ca52015-03-26 11:23:52 -05008780 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8781 * @work: work struct
8782 *
8783 * Description: This pulses warm reset to a slot.
8784 *
8785 **/
8786static void ipr_reset_reset_work(struct work_struct *work)
8787{
8788 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8789 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8790 struct pci_dev *pdev = ioa_cfg->pdev;
8791 unsigned long lock_flags = 0;
8792
8793 ENTER;
8794 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8795 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8796 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8797
8798 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8799 if (ioa_cfg->reset_cmd == ipr_cmd)
8800 ipr_reset_ioa_job(ipr_cmd);
8801 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8802 LEAVE;
8803}
8804
8805/**
Brian King463fc692007-05-07 17:09:05 -05008806 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8807 * @ipr_cmd: ipr command struct
8808 *
8809 * Description: This asserts PCI reset to the adapter.
8810 *
8811 * Return value:
8812 * IPR_RC_JOB_RETURN
8813 **/
8814static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8815{
8816 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Brian King463fc692007-05-07 17:09:05 -05008817
8818 ENTER;
Brian King2796ca52015-03-26 11:23:52 -05008819 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8820 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
Brian King463fc692007-05-07 17:09:05 -05008821 ipr_cmd->job_step = ipr_reset_slot_reset_done;
Brian King463fc692007-05-07 17:09:05 -05008822 LEAVE;
8823 return IPR_RC_JOB_RETURN;
8824}
8825
8826/**
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008827 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8828 * @ipr_cmd: ipr command struct
8829 *
8830 * Description: This attempts to block config access to the IOA.
8831 *
8832 * Return value:
8833 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8834 **/
8835static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8836{
8837 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8838 int rc = IPR_RC_JOB_CONTINUE;
8839
8840 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8841 ioa_cfg->cfg_locked = 1;
8842 ipr_cmd->job_step = ioa_cfg->reset;
8843 } else {
8844 if (ipr_cmd->u.time_left) {
8845 rc = IPR_RC_JOB_RETURN;
8846 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8847 ipr_reset_start_timer(ipr_cmd,
8848 IPR_CHECK_FOR_RESET_TIMEOUT);
8849 } else {
8850 ipr_cmd->job_step = ioa_cfg->reset;
8851 dev_err(&ioa_cfg->pdev->dev,
8852 "Timed out waiting to lock config access. Resetting anyway.\n");
8853 }
8854 }
8855
8856 return rc;
8857}
8858
8859/**
8860 * ipr_reset_block_config_access - Block config access to the IOA
8861 * @ipr_cmd: ipr command struct
8862 *
8863 * Description: This attempts to block config access to the IOA
8864 *
8865 * Return value:
8866 * IPR_RC_JOB_CONTINUE
8867 **/
8868static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8869{
8870 ipr_cmd->ioa_cfg->cfg_locked = 0;
8871 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8872 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8873 return IPR_RC_JOB_CONTINUE;
8874}
8875
8876/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008877 * ipr_reset_allowed - Query whether or not IOA can be reset
8878 * @ioa_cfg: ioa config struct
8879 *
8880 * Return value:
8881 * 0 if reset not allowed / non-zero if reset is allowed
8882 **/
8883static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8884{
8885 volatile u32 temp_reg;
8886
8887 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8888 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8889}
8890
8891/**
8892 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8893 * @ipr_cmd: ipr command struct
8894 *
8895 * Description: This function waits for adapter permission to run BIST,
8896 * then runs BIST. If the adapter does not give permission after a
8897 * reasonable time, we will reset the adapter anyway. The impact of
8898 * resetting the adapter without warning the adapter is the risk of
8899 * losing the persistent error log on the adapter. If the adapter is
8900 * reset while it is writing to the flash on the adapter, the flash
8901 * segment will have bad ECC and be zeroed.
8902 *
8903 * Return value:
8904 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8905 **/
8906static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8907{
8908 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8909 int rc = IPR_RC_JOB_RETURN;
8910
8911 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8912 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8913 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8914 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008915 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008916 rc = IPR_RC_JOB_CONTINUE;
8917 }
8918
8919 return rc;
8920}
8921
8922/**
Wayne Boyer8701f182010-06-04 10:26:50 -07008923 * ipr_reset_alert - Alert the adapter of a pending reset
Linus Torvalds1da177e2005-04-16 15:20:36 -07008924 * @ipr_cmd: ipr command struct
8925 *
8926 * Description: This function alerts the adapter that it will be reset.
8927 * If memory space is not currently enabled, proceed directly
8928 * to running BIST on the adapter. The timer must always be started
8929 * so we guarantee we do not run BIST from ipr_isr.
8930 *
8931 * Return value:
8932 * IPR_RC_JOB_RETURN
8933 **/
8934static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8935{
8936 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8937 u16 cmd_reg;
8938 int rc;
8939
8940 ENTER;
8941 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8942
8943 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8944 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
Wayne Boyer214777b2010-02-19 13:24:26 -08008945 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008946 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8947 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008948 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008949 }
8950
8951 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8952 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8953
8954 LEAVE;
8955 return IPR_RC_JOB_RETURN;
8956}
8957
8958/**
Brian King4fdd7c72015-03-26 11:23:50 -05008959 * ipr_reset_quiesce_done - Complete IOA disconnect
8960 * @ipr_cmd: ipr command struct
8961 *
8962 * Description: Freeze the adapter to complete quiesce processing
8963 *
8964 * Return value:
8965 * IPR_RC_JOB_CONTINUE
8966 **/
8967static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8968{
8969 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8970
8971 ENTER;
8972 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8973 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8974 LEAVE;
8975 return IPR_RC_JOB_CONTINUE;
8976}
8977
8978/**
8979 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8980 * @ipr_cmd: ipr command struct
8981 *
8982 * Description: Ensure nothing is outstanding to the IOA and
8983 * proceed with IOA disconnect. Otherwise reset the IOA.
8984 *
8985 * Return value:
8986 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8987 **/
8988static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8989{
8990 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8991 struct ipr_cmnd *loop_cmd;
8992 struct ipr_hrr_queue *hrrq;
8993 int rc = IPR_RC_JOB_CONTINUE;
8994 int count = 0;
8995
8996 ENTER;
8997 ipr_cmd->job_step = ipr_reset_quiesce_done;
8998
8999 for_each_hrrq(hrrq, ioa_cfg) {
9000 spin_lock(&hrrq->_lock);
9001 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9002 count++;
9003 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9004 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9005 rc = IPR_RC_JOB_RETURN;
9006 break;
9007 }
9008 spin_unlock(&hrrq->_lock);
9009
9010 if (count)
9011 break;
9012 }
9013
9014 LEAVE;
9015 return rc;
9016}
9017
9018/**
9019 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9020 * @ipr_cmd: ipr command struct
9021 *
9022 * Description: Cancel any oustanding HCAMs to the IOA.
9023 *
9024 * Return value:
9025 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9026 **/
9027static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9028{
9029 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9030 int rc = IPR_RC_JOB_CONTINUE;
9031 struct ipr_cmd_pkt *cmd_pkt;
9032 struct ipr_cmnd *hcam_cmd;
9033 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9034
9035 ENTER;
9036 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9037
9038 if (!hrrq->ioa_is_dead) {
9039 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9040 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9041 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9042 continue;
9043
9044 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9045 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9046 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9047 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9048 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9049 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9050 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9051 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9052 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9053 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9054 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9055 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9056 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9057 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9058
9059 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9060 IPR_CANCEL_TIMEOUT);
9061
9062 rc = IPR_RC_JOB_RETURN;
9063 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9064 break;
9065 }
9066 }
9067 } else
9068 ipr_cmd->job_step = ipr_reset_alert;
9069
9070 LEAVE;
9071 return rc;
9072}
9073
9074/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009075 * ipr_reset_ucode_download_done - Microcode download completion
9076 * @ipr_cmd: ipr command struct
9077 *
9078 * Description: This function unmaps the microcode download buffer.
9079 *
9080 * Return value:
9081 * IPR_RC_JOB_CONTINUE
9082 **/
9083static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9084{
9085 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9086 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9087
Anton Blanchardd73341b2014-10-30 17:27:08 -05009088 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009089 sglist->num_sg, DMA_TO_DEVICE);
9090
9091 ipr_cmd->job_step = ipr_reset_alert;
9092 return IPR_RC_JOB_CONTINUE;
9093}
9094
9095/**
9096 * ipr_reset_ucode_download - Download microcode to the adapter
9097 * @ipr_cmd: ipr command struct
9098 *
9099 * Description: This function checks to see if it there is microcode
9100 * to download to the adapter. If there is, a download is performed.
9101 *
9102 * Return value:
9103 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9104 **/
9105static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9106{
9107 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9108 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9109
9110 ENTER;
9111 ipr_cmd->job_step = ipr_reset_alert;
9112
9113 if (!sglist)
9114 return IPR_RC_JOB_CONTINUE;
9115
9116 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9117 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9118 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9119 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9120 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9121 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9122 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9123
Wayne Boyera32c0552010-02-19 13:23:36 -08009124 if (ioa_cfg->sis64)
9125 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9126 else
9127 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009128 ipr_cmd->job_step = ipr_reset_ucode_download_done;
9129
9130 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9131 IPR_WRITE_BUFFER_TIMEOUT);
9132
9133 LEAVE;
9134 return IPR_RC_JOB_RETURN;
9135}
9136
9137/**
9138 * ipr_reset_shutdown_ioa - Shutdown the adapter
9139 * @ipr_cmd: ipr command struct
9140 *
9141 * Description: This function issues an adapter shutdown of the
9142 * specified type to the specified adapter as part of the
9143 * adapter reset job.
9144 *
9145 * Return value:
9146 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9147 **/
9148static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9149{
9150 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9151 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9152 unsigned long timeout;
9153 int rc = IPR_RC_JOB_CONTINUE;
9154
9155 ENTER;
Brian King4fdd7c72015-03-26 11:23:50 -05009156 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9157 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9158 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009159 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009160 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9161 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9162 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9163 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9164
Brian Kingac09c342007-04-26 16:00:16 -05009165 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9166 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009167 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9168 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05009169 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9170 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009171 else
Brian Kingac09c342007-04-26 16:00:16 -05009172 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009173
9174 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9175
9176 rc = IPR_RC_JOB_RETURN;
9177 ipr_cmd->job_step = ipr_reset_ucode_download;
9178 } else
9179 ipr_cmd->job_step = ipr_reset_alert;
9180
9181 LEAVE;
9182 return rc;
9183}
9184
9185/**
9186 * ipr_reset_ioa_job - Adapter reset job
9187 * @ipr_cmd: ipr command struct
9188 *
9189 * Description: This function is the job router for the adapter reset job.
9190 *
9191 * Return value:
9192 * none
9193 **/
9194static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9195{
9196 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009197 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9198
9199 do {
Wayne Boyer96d21f02010-05-10 09:13:27 -07009200 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009201
9202 if (ioa_cfg->reset_cmd != ipr_cmd) {
9203 /*
9204 * We are doing nested adapter resets and this is
9205 * not the current reset job.
9206 */
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009207 list_add_tail(&ipr_cmd->queue,
9208 &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009209 return;
9210 }
9211
9212 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06009213 rc = ipr_cmd->job_step_failed(ipr_cmd);
9214 if (rc == IPR_RC_JOB_RETURN)
9215 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009216 }
9217
9218 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06009219 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009220 rc = ipr_cmd->job_step(ipr_cmd);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009221 } while (rc == IPR_RC_JOB_CONTINUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009222}
9223
9224/**
9225 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9226 * @ioa_cfg: ioa config struct
9227 * @job_step: first job step of reset job
9228 * @shutdown_type: shutdown type
9229 *
9230 * Description: This function will initiate the reset of the given adapter
9231 * starting at the selected job step.
9232 * If the caller needs to wait on the completion of the reset,
9233 * the caller must sleep on the reset_wait_q.
9234 *
9235 * Return value:
9236 * none
9237 **/
9238static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9239 int (*job_step) (struct ipr_cmnd *),
9240 enum ipr_shutdown_type shutdown_type)
9241{
9242 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009243 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009244
9245 ioa_cfg->in_reset_reload = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009246 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9247 spin_lock(&ioa_cfg->hrrq[i]._lock);
9248 ioa_cfg->hrrq[i].allow_cmds = 0;
9249 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9250 }
9251 wmb();
Brian Kingbfae7822013-01-30 23:45:08 -06009252 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
9253 scsi_block_requests(ioa_cfg->host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009254
9255 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9256 ioa_cfg->reset_cmd = ipr_cmd;
9257 ipr_cmd->job_step = job_step;
9258 ipr_cmd->u.shutdown_type = shutdown_type;
9259
9260 ipr_reset_ioa_job(ipr_cmd);
9261}
9262
9263/**
9264 * ipr_initiate_ioa_reset - Initiate an adapter reset
9265 * @ioa_cfg: ioa config struct
9266 * @shutdown_type: shutdown type
9267 *
9268 * Description: This function will initiate the reset of the given adapter.
9269 * If the caller needs to wait on the completion of the reset,
9270 * the caller must sleep on the reset_wait_q.
9271 *
9272 * Return value:
9273 * none
9274 **/
9275static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9276 enum ipr_shutdown_type shutdown_type)
9277{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009278 int i;
9279
9280 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009281 return;
9282
Brian King41e9a692011-09-21 08:51:11 -05009283 if (ioa_cfg->in_reset_reload) {
9284 if (ioa_cfg->sdt_state == GET_DUMP)
9285 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9286 else if (ioa_cfg->sdt_state == READ_DUMP)
9287 ioa_cfg->sdt_state = ABORT_DUMP;
9288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009289
9290 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9291 dev_err(&ioa_cfg->pdev->dev,
9292 "IOA taken offline - error recovery failed\n");
9293
9294 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009295 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9296 spin_lock(&ioa_cfg->hrrq[i]._lock);
9297 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9298 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9299 }
9300 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07009301
9302 if (ioa_cfg->in_ioa_bringdown) {
9303 ioa_cfg->reset_cmd = NULL;
9304 ioa_cfg->in_reset_reload = 0;
9305 ipr_fail_all_ops(ioa_cfg);
9306 wake_up_all(&ioa_cfg->reset_wait_q);
9307
Brian Kingbfae7822013-01-30 23:45:08 -06009308 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9309 spin_unlock_irq(ioa_cfg->host->host_lock);
9310 scsi_unblock_requests(ioa_cfg->host);
9311 spin_lock_irq(ioa_cfg->host->host_lock);
9312 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009313 return;
9314 } else {
9315 ioa_cfg->in_ioa_bringdown = 1;
9316 shutdown_type = IPR_SHUTDOWN_NONE;
9317 }
9318 }
9319
9320 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9321 shutdown_type);
9322}
9323
9324/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009325 * ipr_reset_freeze - Hold off all I/O activity
9326 * @ipr_cmd: ipr command struct
9327 *
9328 * Description: If the PCI slot is frozen, hold off all I/O
9329 * activity; then, as soon as the slot is available again,
9330 * initiate an adapter reset.
9331 */
9332static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9333{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009334 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9335 int i;
9336
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009337 /* Disallow new interrupts, avoid loop */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009338 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9339 spin_lock(&ioa_cfg->hrrq[i]._lock);
9340 ioa_cfg->hrrq[i].allow_interrupts = 0;
9341 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9342 }
9343 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009344 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009345 ipr_cmd->done = ipr_reset_ioa_job;
9346 return IPR_RC_JOB_RETURN;
9347}
9348
9349/**
Brian King6270e592014-01-21 12:16:41 -06009350 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9351 * @pdev: PCI device struct
9352 *
9353 * Description: This routine is called to tell us that the MMIO
9354 * access to the IOA has been restored
9355 */
9356static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9357{
9358 unsigned long flags = 0;
9359 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9360
9361 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9362 if (!ioa_cfg->probe_done)
9363 pci_save_state(pdev);
9364 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9365 return PCI_ERS_RESULT_NEED_RESET;
9366}
9367
9368/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009369 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9370 * @pdev: PCI device struct
9371 *
9372 * Description: This routine is called to tell us that the PCI bus
9373 * is down. Can't do anything here, except put the device driver
9374 * into a holding pattern, waiting for the PCI bus to come back.
9375 */
9376static void ipr_pci_frozen(struct pci_dev *pdev)
9377{
9378 unsigned long flags = 0;
9379 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9380
9381 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009382 if (ioa_cfg->probe_done)
9383 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9385}
9386
9387/**
9388 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9389 * @pdev: PCI device struct
9390 *
9391 * Description: This routine is called by the pci error recovery
9392 * code after the PCI slot has been reset, just before we
9393 * should resume normal operations.
9394 */
9395static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9396{
9397 unsigned long flags = 0;
9398 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9399
9400 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009401 if (ioa_cfg->probe_done) {
9402 if (ioa_cfg->needs_warm_reset)
9403 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9404 else
9405 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9406 IPR_SHUTDOWN_NONE);
9407 } else
9408 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9410 return PCI_ERS_RESULT_RECOVERED;
9411}
9412
9413/**
9414 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9415 * @pdev: PCI device struct
9416 *
9417 * Description: This routine is called when the PCI bus has
9418 * permanently failed.
9419 */
9420static void ipr_pci_perm_failure(struct pci_dev *pdev)
9421{
9422 unsigned long flags = 0;
9423 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009424 int i;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009425
9426 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009427 if (ioa_cfg->probe_done) {
9428 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9429 ioa_cfg->sdt_state = ABORT_DUMP;
9430 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9431 ioa_cfg->in_ioa_bringdown = 1;
9432 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9433 spin_lock(&ioa_cfg->hrrq[i]._lock);
9434 ioa_cfg->hrrq[i].allow_cmds = 0;
9435 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9436 }
9437 wmb();
9438 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9439 } else
9440 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009441 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9442}
9443
9444/**
9445 * ipr_pci_error_detected - Called when a PCI error is detected.
9446 * @pdev: PCI device struct
9447 * @state: PCI channel state
9448 *
9449 * Description: Called when a PCI error is detected.
9450 *
9451 * Return value:
9452 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9453 */
9454static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9455 pci_channel_state_t state)
9456{
9457 switch (state) {
9458 case pci_channel_io_frozen:
9459 ipr_pci_frozen(pdev);
Brian King6270e592014-01-21 12:16:41 -06009460 return PCI_ERS_RESULT_CAN_RECOVER;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009461 case pci_channel_io_perm_failure:
9462 ipr_pci_perm_failure(pdev);
9463 return PCI_ERS_RESULT_DISCONNECT;
9464 break;
9465 default:
9466 break;
9467 }
9468 return PCI_ERS_RESULT_NEED_RESET;
9469}
9470
9471/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009472 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9473 * @ioa_cfg: ioa cfg struct
9474 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08009475 * Description: This is the second phase of adapter initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -07009476 * This function takes care of initilizing the adapter to the point
9477 * where it can accept new commands.
9478
9479 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02009480 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009481 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009482static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009483{
9484 int rc = 0;
9485 unsigned long host_lock_flags = 0;
9486
9487 ENTER;
9488 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9489 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
Brian King6270e592014-01-21 12:16:41 -06009490 ioa_cfg->probe_done = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009491 if (ioa_cfg->needs_hard_reset) {
9492 ioa_cfg->needs_hard_reset = 0;
9493 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9494 } else
9495 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9496 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009497 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009498
9499 LEAVE;
9500 return rc;
9501}
9502
9503/**
9504 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9505 * @ioa_cfg: ioa config struct
9506 *
9507 * Return value:
9508 * none
9509 **/
9510static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9511{
9512 int i;
9513
Brian Kinga65e8f12015-03-26 11:23:55 -05009514 if (ioa_cfg->ipr_cmnd_list) {
9515 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9516 if (ioa_cfg->ipr_cmnd_list[i])
9517 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9518 ioa_cfg->ipr_cmnd_list[i],
9519 ioa_cfg->ipr_cmnd_list_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009520
Brian Kinga65e8f12015-03-26 11:23:55 -05009521 ioa_cfg->ipr_cmnd_list[i] = NULL;
9522 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009523 }
9524
9525 if (ioa_cfg->ipr_cmd_pool)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009526 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009527
Brian King89aad422012-03-14 21:20:10 -05009528 kfree(ioa_cfg->ipr_cmnd_list);
9529 kfree(ioa_cfg->ipr_cmnd_list_dma);
9530 ioa_cfg->ipr_cmnd_list = NULL;
9531 ioa_cfg->ipr_cmnd_list_dma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009532 ioa_cfg->ipr_cmd_pool = NULL;
9533}
9534
9535/**
9536 * ipr_free_mem - Frees memory allocated for an adapter
9537 * @ioa_cfg: ioa cfg struct
9538 *
9539 * Return value:
9540 * nothing
9541 **/
9542static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9543{
9544 int i;
9545
9546 kfree(ioa_cfg->res_entries);
Anton Blanchardd73341b2014-10-30 17:27:08 -05009547 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9548 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009549 ipr_free_cmd_blks(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009550
9551 for (i = 0; i < ioa_cfg->hrrq_num; i++)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009552 dma_free_coherent(&ioa_cfg->pdev->dev,
9553 sizeof(u32) * ioa_cfg->hrrq[i].size,
9554 ioa_cfg->hrrq[i].host_rrq,
9555 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009556
Anton Blanchardd73341b2014-10-30 17:27:08 -05009557 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9558 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009559
Brian Kingafc3f832016-08-24 12:56:51 -05009560 for (i = 0; i < IPR_MAX_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009561 dma_free_coherent(&ioa_cfg->pdev->dev,
9562 sizeof(struct ipr_hostrcb),
9563 ioa_cfg->hostrcb[i],
9564 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009565 }
9566
9567 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009568 kfree(ioa_cfg->trace);
9569}
9570
9571/**
Brian King2796ca52015-03-26 11:23:52 -05009572 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9573 * @ioa_cfg: ipr cfg struct
9574 *
9575 * This function frees all allocated IRQs for the
9576 * specified adapter.
9577 *
9578 * Return value:
9579 * none
9580 **/
9581static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9582{
9583 struct pci_dev *pdev = ioa_cfg->pdev;
Christoph Hellwiga299ee62016-09-11 15:31:24 +02009584 int i;
Brian King2796ca52015-03-26 11:23:52 -05009585
Christoph Hellwiga299ee62016-09-11 15:31:24 +02009586 for (i = 0; i < ioa_cfg->nvectors; i++)
9587 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9588 pci_free_irq_vectors(pdev);
Brian King2796ca52015-03-26 11:23:52 -05009589}
9590
9591/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009592 * ipr_free_all_resources - Free all allocated resources for an adapter.
9593 * @ipr_cmd: ipr command struct
9594 *
9595 * This function frees all allocated resources for the
9596 * specified adapter.
9597 *
9598 * Return value:
9599 * none
9600 **/
9601static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9602{
9603 struct pci_dev *pdev = ioa_cfg->pdev;
9604
9605 ENTER;
Brian King2796ca52015-03-26 11:23:52 -05009606 ipr_free_irqs(ioa_cfg);
9607 if (ioa_cfg->reset_work_q)
9608 destroy_workqueue(ioa_cfg->reset_work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009609 iounmap(ioa_cfg->hdw_dma_regs);
9610 pci_release_regions(pdev);
9611 ipr_free_mem(ioa_cfg);
9612 scsi_host_put(ioa_cfg->host);
9613 pci_disable_device(pdev);
9614 LEAVE;
9615}
9616
9617/**
9618 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9619 * @ioa_cfg: ioa config struct
9620 *
9621 * Return value:
9622 * 0 on success / -ENOMEM on allocation failure
9623 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009624static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009625{
9626 struct ipr_cmnd *ipr_cmd;
9627 struct ipr_ioarcb *ioarcb;
9628 dma_addr_t dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009629 int i, entries_each_hrrq, hrrq_id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009630
Anton Blanchardd73341b2014-10-30 17:27:08 -05009631 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009632 sizeof(struct ipr_cmnd), 512, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009633
9634 if (!ioa_cfg->ipr_cmd_pool)
9635 return -ENOMEM;
9636
Brian King89aad422012-03-14 21:20:10 -05009637 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9638 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9639
9640 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9641 ipr_free_cmd_blks(ioa_cfg);
9642 return -ENOMEM;
9643 }
9644
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009645 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9646 if (ioa_cfg->hrrq_num > 1) {
9647 if (i == 0) {
9648 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9649 ioa_cfg->hrrq[i].min_cmd_id = 0;
9650 ioa_cfg->hrrq[i].max_cmd_id =
9651 (entries_each_hrrq - 1);
9652 } else {
9653 entries_each_hrrq =
9654 IPR_NUM_BASE_CMD_BLKS/
9655 (ioa_cfg->hrrq_num - 1);
9656 ioa_cfg->hrrq[i].min_cmd_id =
9657 IPR_NUM_INTERNAL_CMD_BLKS +
9658 (i - 1) * entries_each_hrrq;
9659 ioa_cfg->hrrq[i].max_cmd_id =
9660 (IPR_NUM_INTERNAL_CMD_BLKS +
9661 i * entries_each_hrrq - 1);
9662 }
9663 } else {
9664 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9665 ioa_cfg->hrrq[i].min_cmd_id = 0;
9666 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9667 }
9668 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9669 }
9670
9671 BUG_ON(ioa_cfg->hrrq_num == 0);
9672
9673 i = IPR_NUM_CMD_BLKS -
9674 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9675 if (i > 0) {
9676 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9677 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9678 }
9679
Linus Torvalds1da177e2005-04-16 15:20:36 -07009680 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009681 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009682
9683 if (!ipr_cmd) {
9684 ipr_free_cmd_blks(ioa_cfg);
9685 return -ENOMEM;
9686 }
9687
9688 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9689 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9690 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9691
9692 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08009693 ipr_cmd->dma_addr = dma_addr;
9694 if (ioa_cfg->sis64)
9695 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9696 else
9697 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9698
Linus Torvalds1da177e2005-04-16 15:20:36 -07009699 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08009700 if (ioa_cfg->sis64) {
9701 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9702 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9703 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009704 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
Wayne Boyera32c0552010-02-19 13:23:36 -08009705 } else {
9706 ioarcb->write_ioadl_addr =
9707 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9708 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9709 ioarcb->ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009710 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
Wayne Boyera32c0552010-02-19 13:23:36 -08009711 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009712 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9713 ipr_cmd->cmd_index = i;
9714 ipr_cmd->ioa_cfg = ioa_cfg;
9715 ipr_cmd->sense_buffer_dma = dma_addr +
9716 offsetof(struct ipr_cmnd, sense_buffer);
9717
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009718 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9719 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9720 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9721 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9722 hrrq_id++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009723 }
9724
9725 return 0;
9726}
9727
9728/**
9729 * ipr_alloc_mem - Allocate memory for an adapter
9730 * @ioa_cfg: ioa config struct
9731 *
9732 * Return value:
9733 * 0 on success / non-zero for error
9734 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009735static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009736{
9737 struct pci_dev *pdev = ioa_cfg->pdev;
9738 int i, rc = -ENOMEM;
9739
9740 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06009741 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009742 ioa_cfg->max_devs_supported, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009743
9744 if (!ioa_cfg->res_entries)
9745 goto out;
9746
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009747 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009748 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009749 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9750 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009751
Anton Blanchardd73341b2014-10-30 17:27:08 -05009752 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9753 sizeof(struct ipr_misc_cbs),
9754 &ioa_cfg->vpd_cbs_dma,
9755 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009756
9757 if (!ioa_cfg->vpd_cbs)
9758 goto out_free_res_entries;
9759
9760 if (ipr_alloc_cmd_blks(ioa_cfg))
9761 goto out_free_vpd_cbs;
9762
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009763 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009764 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009765 sizeof(u32) * ioa_cfg->hrrq[i].size,
Anton Blanchardd73341b2014-10-30 17:27:08 -05009766 &ioa_cfg->hrrq[i].host_rrq_dma,
9767 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009768
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009769 if (!ioa_cfg->hrrq[i].host_rrq) {
9770 while (--i > 0)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009771 dma_free_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009772 sizeof(u32) * ioa_cfg->hrrq[i].size,
9773 ioa_cfg->hrrq[i].host_rrq,
9774 ioa_cfg->hrrq[i].host_rrq_dma);
9775 goto out_ipr_free_cmd_blocks;
9776 }
9777 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9778 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009779
Anton Blanchardd73341b2014-10-30 17:27:08 -05009780 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9781 ioa_cfg->cfg_table_size,
9782 &ioa_cfg->cfg_table_dma,
9783 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009784
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009785 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009786 goto out_free_host_rrq;
9787
Brian Kingafc3f832016-08-24 12:56:51 -05009788 for (i = 0; i < IPR_MAX_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009789 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9790 sizeof(struct ipr_hostrcb),
9791 &ioa_cfg->hostrcb_dma[i],
9792 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009793
9794 if (!ioa_cfg->hostrcb[i])
9795 goto out_free_hostrcb_dma;
9796
9797 ioa_cfg->hostrcb[i]->hostrcb_dma =
9798 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06009799 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009800 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9801 }
9802
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06009803 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009804 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9805
9806 if (!ioa_cfg->trace)
9807 goto out_free_hostrcb_dma;
9808
Linus Torvalds1da177e2005-04-16 15:20:36 -07009809 rc = 0;
9810out:
9811 LEAVE;
9812 return rc;
9813
9814out_free_hostrcb_dma:
9815 while (i-- > 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009816 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9817 ioa_cfg->hostrcb[i],
9818 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009819 }
Anton Blanchardd73341b2014-10-30 17:27:08 -05009820 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9821 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009822out_free_host_rrq:
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009823 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009824 dma_free_coherent(&pdev->dev,
9825 sizeof(u32) * ioa_cfg->hrrq[i].size,
9826 ioa_cfg->hrrq[i].host_rrq,
9827 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009828 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009829out_ipr_free_cmd_blocks:
9830 ipr_free_cmd_blks(ioa_cfg);
9831out_free_vpd_cbs:
Anton Blanchardd73341b2014-10-30 17:27:08 -05009832 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9833 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009834out_free_res_entries:
9835 kfree(ioa_cfg->res_entries);
9836 goto out;
9837}
9838
9839/**
9840 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9841 * @ioa_cfg: ioa config struct
9842 *
9843 * Return value:
9844 * none
9845 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009846static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009847{
9848 int i;
9849
9850 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9851 ioa_cfg->bus_attr[i].bus = i;
9852 ioa_cfg->bus_attr[i].qas_enabled = 0;
9853 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9854 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9855 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9856 else
9857 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9858 }
9859}
9860
9861/**
Brian King6270e592014-01-21 12:16:41 -06009862 * ipr_init_regs - Initialize IOA registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07009863 * @ioa_cfg: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07009864 *
9865 * Return value:
Brian King6270e592014-01-21 12:16:41 -06009866 * none
Linus Torvalds1da177e2005-04-16 15:20:36 -07009867 **/
Brian King6270e592014-01-21 12:16:41 -06009868static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009869{
9870 const struct ipr_interrupt_offsets *p;
9871 struct ipr_interrupts *t;
9872 void __iomem *base;
9873
Linus Torvalds1da177e2005-04-16 15:20:36 -07009874 p = &ioa_cfg->chip_cfg->regs;
9875 t = &ioa_cfg->regs;
9876 base = ioa_cfg->hdw_dma_regs;
9877
9878 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9879 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009880 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009881 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009882 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009883 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009884 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009885 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009886 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009887 t->ioarrin_reg = base + p->ioarrin_reg;
9888 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009889 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009890 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009891 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009892 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009893 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009894
9895 if (ioa_cfg->sis64) {
Wayne Boyer214777b2010-02-19 13:24:26 -08009896 t->init_feedback_reg = base + p->init_feedback_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009897 t->dump_addr_reg = base + p->dump_addr_reg;
9898 t->dump_data_reg = base + p->dump_data_reg;
Wayne Boyer8701f182010-06-04 10:26:50 -07009899 t->endian_swap_reg = base + p->endian_swap_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009900 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009901}
9902
9903/**
Brian King6270e592014-01-21 12:16:41 -06009904 * ipr_init_ioa_cfg - Initialize IOA config struct
9905 * @ioa_cfg: ioa config struct
9906 * @host: scsi host struct
9907 * @pdev: PCI dev struct
9908 *
9909 * Return value:
9910 * none
9911 **/
9912static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9913 struct Scsi_Host *host, struct pci_dev *pdev)
9914{
9915 int i;
9916
9917 ioa_cfg->host = host;
9918 ioa_cfg->pdev = pdev;
9919 ioa_cfg->log_level = ipr_log_level;
9920 ioa_cfg->doorbell = IPR_DOORBELL;
9921 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9922 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9923 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9924 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9925 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9926 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9927
9928 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9929 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
Brian Kingafc3f832016-08-24 12:56:51 -05009930 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
Brian King6270e592014-01-21 12:16:41 -06009931 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9932 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9933 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9934 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9935 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9936 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9937 ioa_cfg->sdt_state = INACTIVE;
9938
9939 ipr_initialize_bus_attr(ioa_cfg);
9940 ioa_cfg->max_devs_supported = ipr_max_devs;
9941
9942 if (ioa_cfg->sis64) {
9943 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9944 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9945 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9946 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9947 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9948 + ((sizeof(struct ipr_config_table_entry64)
9949 * ioa_cfg->max_devs_supported)));
9950 } else {
9951 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9952 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9953 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9954 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9955 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9956 + ((sizeof(struct ipr_config_table_entry)
9957 * ioa_cfg->max_devs_supported)));
9958 }
9959
Brian Kingf688f962014-12-02 12:47:37 -06009960 host->max_channel = IPR_VSET_BUS;
Brian King6270e592014-01-21 12:16:41 -06009961 host->unique_id = host->host_no;
9962 host->max_cmd_len = IPR_MAX_CDB_LEN;
9963 host->can_queue = ioa_cfg->max_cmds;
9964 pci_set_drvdata(pdev, ioa_cfg);
9965
9966 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9967 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9968 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9969 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9970 if (i == 0)
9971 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9972 else
9973 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9974 }
9975}
9976
9977/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009978 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07009979 * @dev_id: PCI device id struct
9980 *
9981 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009982 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009983 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009984static const struct ipr_chip_t *
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009985ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009986{
9987 int i;
9988
Linus Torvalds1da177e2005-04-16 15:20:36 -07009989 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9990 if (ipr_chip[i].vendor == dev_id->vendor &&
9991 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009992 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07009993 return NULL;
9994}
9995
Brian King6270e592014-01-21 12:16:41 -06009996/**
9997 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9998 * during probe time
9999 * @ioa_cfg: ioa config struct
10000 *
10001 * Return value:
10002 * None
10003 **/
10004static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10005{
10006 struct pci_dev *pdev = ioa_cfg->pdev;
10007
10008 if (pci_channel_offline(pdev)) {
10009 wait_event_timeout(ioa_cfg->eeh_wait_q,
10010 !pci_channel_offline(pdev),
10011 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10012 pci_restore_state(pdev);
10013 }
10014}
10015
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010016static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10017{
10018 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10019
10020 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10021 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10022 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10023 ioa_cfg->vectors_info[vec_idx].
10024 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10025 }
10026}
10027
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010028static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10029 struct pci_dev *pdev)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010030{
10031 int i, rc;
10032
10033 for (i = 1; i < ioa_cfg->nvectors; i++) {
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010034 rc = request_irq(pci_irq_vector(pdev, i),
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010035 ipr_isr_mhrrq,
10036 0,
10037 ioa_cfg->vectors_info[i].desc,
10038 &ioa_cfg->hrrq[i]);
10039 if (rc) {
10040 while (--i >= 0)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010041 free_irq(pci_irq_vector(pdev, i),
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010042 &ioa_cfg->hrrq[i]);
10043 return rc;
10044 }
10045 }
10046 return 0;
10047}
10048
Linus Torvalds1da177e2005-04-16 15:20:36 -070010049/**
Wayne Boyer95fecd92009-06-16 15:13:28 -070010050 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10051 * @pdev: PCI device struct
10052 *
10053 * Description: Simply set the msi_received flag to 1 indicating that
10054 * Message Signaled Interrupts are supported.
10055 *
10056 * Return value:
10057 * 0 on success / non-zero on failure
10058 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010059static irqreturn_t ipr_test_intr(int irq, void *devp)
Wayne Boyer95fecd92009-06-16 15:13:28 -070010060{
10061 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10062 unsigned long lock_flags = 0;
10063 irqreturn_t rc = IRQ_HANDLED;
10064
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010065 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010066 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10067
10068 ioa_cfg->msi_received = 1;
10069 wake_up(&ioa_cfg->msi_wait_q);
10070
10071 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10072 return rc;
10073}
10074
10075/**
10076 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10077 * @pdev: PCI device struct
10078 *
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010079 * Description: This routine sets up and initiates a test interrupt to determine
Wayne Boyer95fecd92009-06-16 15:13:28 -070010080 * if the interrupt is received via the ipr_test_intr() service routine.
10081 * If the tests fails, the driver will fall back to LSI.
10082 *
10083 * Return value:
10084 * 0 on success / non-zero on failure
10085 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010086static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
Wayne Boyer95fecd92009-06-16 15:13:28 -070010087{
10088 int rc;
10089 volatile u32 int_reg;
10090 unsigned long lock_flags = 0;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010091 int irq = pci_irq_vector(pdev, 0);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010092
10093 ENTER;
10094
10095 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10096 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10097 ioa_cfg->msi_received = 0;
10098 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer214777b2010-02-19 13:24:26 -080010099 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010100 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10101 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10102
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010103 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010104 if (rc) {
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010105 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010106 return rc;
10107 } else if (ipr_debug)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010108 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010109
Wayne Boyer214777b2010-02-19 13:24:26 -080010110 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010111 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10112 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010113 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010114 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10115
Wayne Boyer95fecd92009-06-16 15:13:28 -070010116 if (!ioa_cfg->msi_received) {
10117 /* MSI test failed */
10118 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10119 rc = -EOPNOTSUPP;
10120 } else if (ipr_debug)
10121 dev_info(&pdev->dev, "MSI test succeeded.\n");
10122
10123 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10124
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010125 free_irq(irq, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010126
10127 LEAVE;
10128
10129 return rc;
10130}
10131
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010132 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -070010133 * @pdev: PCI device struct
10134 * @dev_id: PCI device id struct
10135 *
10136 * Return value:
10137 * 0 on success / non-zero on failure
10138 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010139static int ipr_probe_ioa(struct pci_dev *pdev,
10140 const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010141{
10142 struct ipr_ioa_cfg *ioa_cfg;
10143 struct Scsi_Host *host;
10144 unsigned long ipr_regs_pci;
10145 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -070010146 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -050010147 volatile u32 mask, uproc, interrupts;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010148 unsigned long lock_flags, driver_lock_flags;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010149 unsigned int irq_flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010150
10151 ENTER;
10152
Linus Torvalds1da177e2005-04-16 15:20:36 -070010153 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010154 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10155
10156 if (!host) {
10157 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10158 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -060010159 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010160 }
10161
10162 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10163 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Dan Williams8d8e7d12012-07-09 21:06:08 -070010164 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010165
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010166 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010167
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010168 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010169 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10170 dev_id->vendor, dev_id->device);
10171 goto out_scsi_host_put;
10172 }
10173
Wayne Boyera32c0552010-02-19 13:23:36 -080010174 /* set SIS 32 or SIS 64 */
10175 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010176 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
Brian King7dd21302012-03-14 21:20:08 -050010177 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
Brian King89aad422012-03-14 21:20:10 -050010178 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010179
Brian King5469cb52007-03-29 12:42:40 -050010180 if (ipr_transop_timeout)
10181 ioa_cfg->transop_timeout = ipr_transop_timeout;
10182 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10183 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10184 else
10185 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10186
Auke Kok44c10132007-06-08 15:46:36 -070010187 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -050010188
Brian King6270e592014-01-21 12:16:41 -060010189 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10190
Linus Torvalds1da177e2005-04-16 15:20:36 -070010191 ipr_regs_pci = pci_resource_start(pdev, 0);
10192
10193 rc = pci_request_regions(pdev, IPR_NAME);
10194 if (rc < 0) {
10195 dev_err(&pdev->dev,
10196 "Couldn't register memory range of registers\n");
10197 goto out_scsi_host_put;
10198 }
10199
Brian King6270e592014-01-21 12:16:41 -060010200 rc = pci_enable_device(pdev);
10201
10202 if (rc || pci_channel_offline(pdev)) {
10203 if (pci_channel_offline(pdev)) {
10204 ipr_wait_for_pci_err_recovery(ioa_cfg);
10205 rc = pci_enable_device(pdev);
10206 }
10207
10208 if (rc) {
10209 dev_err(&pdev->dev, "Cannot enable adapter\n");
10210 ipr_wait_for_pci_err_recovery(ioa_cfg);
10211 goto out_release_regions;
10212 }
10213 }
10214
Arjan van de Ven25729a72008-09-28 16:18:02 -070010215 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010216
10217 if (!ipr_regs) {
10218 dev_err(&pdev->dev,
10219 "Couldn't map memory range of registers\n");
10220 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -060010221 goto out_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010222 }
10223
10224 ioa_cfg->hdw_dma_regs = ipr_regs;
10225 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10226 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10227
Brian King6270e592014-01-21 12:16:41 -060010228 ipr_init_regs(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010229
Wayne Boyera32c0552010-02-19 13:23:36 -080010230 if (ioa_cfg->sis64) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010231 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Wayne Boyera32c0552010-02-19 13:23:36 -080010232 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010233 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10234 rc = dma_set_mask_and_coherent(&pdev->dev,
10235 DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -080010236 }
Wayne Boyera32c0552010-02-19 13:23:36 -080010237 } else
Anton Blanchard869404c2014-10-30 17:27:09 -050010238 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -080010239
Linus Torvalds1da177e2005-04-16 15:20:36 -070010240 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010241 dev_err(&pdev->dev, "Failed to set DMA mask\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070010242 goto cleanup_nomem;
10243 }
10244
10245 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10246 ioa_cfg->chip_cfg->cache_line_size);
10247
10248 if (rc != PCIBIOS_SUCCESSFUL) {
10249 dev_err(&pdev->dev, "Write of cache line size failed\n");
Brian King6270e592014-01-21 12:16:41 -060010250 ipr_wait_for_pci_err_recovery(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010251 rc = -EIO;
10252 goto cleanup_nomem;
10253 }
10254
Brian King6270e592014-01-21 12:16:41 -060010255 /* Issue MMIO read to ensure card is not in EEH */
10256 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10257 ipr_wait_for_pci_err_recovery(ioa_cfg);
10258
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010259 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10260 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10261 IPR_MAX_MSIX_VECTORS);
10262 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10263 }
10264
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010265 irq_flag = PCI_IRQ_LEGACY;
10266 if (ioa_cfg->ipr_chip->has_msi)
10267 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10268 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10269 if (rc < 0) {
10270 ipr_wait_for_pci_err_recovery(ioa_cfg);
10271 goto cleanup_nomem;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010272 }
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010273 ioa_cfg->nvectors = rc;
10274
10275 if (!pdev->msi_enabled && !pdev->msix_enabled)
10276 ioa_cfg->clear_isr = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010277
Brian King6270e592014-01-21 12:16:41 -060010278 pci_set_master(pdev);
10279
10280 if (pci_channel_offline(pdev)) {
10281 ipr_wait_for_pci_err_recovery(ioa_cfg);
10282 pci_set_master(pdev);
10283 if (pci_channel_offline(pdev)) {
10284 rc = -EIO;
10285 goto out_msi_disable;
10286 }
10287 }
10288
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010289 if (pdev->msi_enabled || pdev->msix_enabled) {
Wayne Boyer95fecd92009-06-16 15:13:28 -070010290 rc = ipr_test_msi(ioa_cfg, pdev);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010291 switch (rc) {
10292 case 0:
10293 dev_info(&pdev->dev,
10294 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10295 pdev->msix_enabled ? "-X" : "");
10296 break;
10297 case -EOPNOTSUPP:
Brian King6270e592014-01-21 12:16:41 -060010298 ipr_wait_for_pci_err_recovery(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010299 pci_free_irq_vectors(pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010300
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010301 ioa_cfg->nvectors = 1;
Benjamin Herrenschmidt9dadfb92016-11-30 15:28:55 -060010302 ioa_cfg->clear_isr = 1;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010303 break;
10304 default:
Wayne Boyer95fecd92009-06-16 15:13:28 -070010305 goto out_msi_disable;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010306 }
10307 }
10308
10309 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10310 (unsigned int)num_online_cpus(),
10311 (unsigned int)IPR_MAX_HRRQ_NUM);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010312
Linus Torvalds1da177e2005-04-16 15:20:36 -070010313 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -070010314 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010315
10316 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -070010317 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010318
10319 rc = ipr_alloc_mem(ioa_cfg);
10320 if (rc < 0) {
10321 dev_err(&pdev->dev,
10322 "Couldn't allocate enough memory for device driver!\n");
Julia Lawallf170c682011-07-11 14:08:25 -070010323 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010324 }
10325
Brian King6270e592014-01-21 12:16:41 -060010326 /* Save away PCI config space for use following IOA reset */
10327 rc = pci_save_state(pdev);
10328
10329 if (rc != PCIBIOS_SUCCESSFUL) {
10330 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10331 rc = -EIO;
10332 goto cleanup_nolog;
10333 }
10334
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010335 /*
10336 * If HRRQ updated interrupt is not masked, or reset alert is set,
10337 * the card is in an unknown state and needs a hard reset
10338 */
Wayne Boyer214777b2010-02-19 13:24:26 -080010339 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10340 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10341 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010342 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10343 ioa_cfg->needs_hard_reset = 1;
Anton Blanchard5d7c20b2011-08-01 19:43:45 +100010344 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
Brian King473b1e82007-05-02 10:44:11 -050010345 ioa_cfg->needs_hard_reset = 1;
10346 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10347 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010348
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010349 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010350 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010351 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010352
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010353 if (pdev->msi_enabled || pdev->msix_enabled) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010354 name_msi_vectors(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010355 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010356 ioa_cfg->vectors_info[0].desc,
10357 &ioa_cfg->hrrq[0]);
10358 if (!rc)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010359 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010360 } else {
10361 rc = request_irq(pdev->irq, ipr_isr,
10362 IRQF_SHARED,
10363 IPR_NAME, &ioa_cfg->hrrq[0]);
10364 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010365 if (rc) {
10366 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10367 pdev->irq, rc);
10368 goto cleanup_nolog;
10369 }
10370
Brian King463fc692007-05-07 17:09:05 -050010371 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10372 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10373 ioa_cfg->needs_warm_reset = 1;
10374 ioa_cfg->reset = ipr_reset_slot_reset;
Brian King2796ca52015-03-26 11:23:52 -050010375
10376 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10377 WQ_MEM_RECLAIM, host->host_no);
10378
10379 if (!ioa_cfg->reset_work_q) {
10380 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
Wei Yongjunc8e18ac2016-07-29 16:00:45 +000010381 rc = -ENOMEM;
Brian King2796ca52015-03-26 11:23:52 -050010382 goto out_free_irq;
10383 }
Brian King463fc692007-05-07 17:09:05 -050010384 } else
10385 ioa_cfg->reset = ipr_reset_start_bist;
10386
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010387 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010388 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010389 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010390
10391 LEAVE;
10392out:
10393 return rc;
10394
Brian King2796ca52015-03-26 11:23:52 -050010395out_free_irq:
10396 ipr_free_irqs(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010397cleanup_nolog:
10398 ipr_free_mem(ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010399out_msi_disable:
Brian King6270e592014-01-21 12:16:41 -060010400 ipr_wait_for_pci_err_recovery(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010401 pci_free_irq_vectors(pdev);
Julia Lawallf170c682011-07-11 14:08:25 -070010402cleanup_nomem:
10403 iounmap(ipr_regs);
Brian King6270e592014-01-21 12:16:41 -060010404out_disable:
10405 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010406out_release_regions:
10407 pci_release_regions(pdev);
10408out_scsi_host_put:
10409 scsi_host_put(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010410 goto out;
10411}
10412
10413/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010414 * ipr_initiate_ioa_bringdown - Bring down an adapter
10415 * @ioa_cfg: ioa config struct
10416 * @shutdown_type: shutdown type
10417 *
10418 * Description: This function will initiate bringing down the adapter.
10419 * This consists of issuing an IOA shutdown to the adapter
10420 * to flush the cache, and running BIST.
10421 * If the caller needs to wait on the completion of the reset,
10422 * the caller must sleep on the reset_wait_q.
10423 *
10424 * Return value:
10425 * none
10426 **/
10427static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10428 enum ipr_shutdown_type shutdown_type)
10429{
10430 ENTER;
10431 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10432 ioa_cfg->sdt_state = ABORT_DUMP;
10433 ioa_cfg->reset_retries = 0;
10434 ioa_cfg->in_ioa_bringdown = 1;
10435 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10436 LEAVE;
10437}
10438
10439/**
10440 * __ipr_remove - Remove a single adapter
10441 * @pdev: pci device struct
10442 *
10443 * Adapter hot plug remove entry point.
10444 *
10445 * Return value:
10446 * none
10447 **/
10448static void __ipr_remove(struct pci_dev *pdev)
10449{
10450 unsigned long host_lock_flags = 0;
10451 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Brian Kingbfae7822013-01-30 23:45:08 -060010452 int i;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010453 unsigned long driver_lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010454 ENTER;
10455
10456 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -030010457 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -050010458 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10459 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10460 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10461 }
10462
Brian Kingbfae7822013-01-30 23:45:08 -060010463 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10464 spin_lock(&ioa_cfg->hrrq[i]._lock);
10465 ioa_cfg->hrrq[i].removing_ioa = 1;
10466 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10467 }
10468 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070010469 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10470
10471 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10472 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Tejun Heo43829732012-08-20 14:51:24 -070010473 flush_work(&ioa_cfg->work_q);
Brian King2796ca52015-03-26 11:23:52 -050010474 if (ioa_cfg->reset_work_q)
10475 flush_workqueue(ioa_cfg->reset_work_q);
wenxiong@linux.vnet.ibm.com9077a942013-03-14 13:52:24 -050010476 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010477 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10478
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010479 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010480 list_del(&ioa_cfg->queue);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010481 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010482
10483 if (ioa_cfg->sdt_state == ABORT_DUMP)
10484 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10485 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10486
10487 ipr_free_all_resources(ioa_cfg);
10488
10489 LEAVE;
10490}
10491
10492/**
10493 * ipr_remove - IOA hot plug remove entry point
10494 * @pdev: pci device struct
10495 *
10496 * Adapter hot plug remove entry point.
10497 *
10498 * Return value:
10499 * none
10500 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010501static void ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010502{
10503 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10504
10505 ENTER;
10506
Tony Jonesee959b02008-02-22 00:13:36 +010010507 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010508 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +010010509 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010510 &ipr_dump_attr);
Brian Kingafc3f832016-08-24 12:56:51 -050010511 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10512 &ipr_ioa_async_err_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010513 scsi_remove_host(ioa_cfg->host);
10514
10515 __ipr_remove(pdev);
10516
10517 LEAVE;
10518}
10519
10520/**
10521 * ipr_probe - Adapter hot plug add entry point
10522 *
10523 * Return value:
10524 * 0 on success / non-zero on failure
10525 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010526static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010527{
10528 struct ipr_ioa_cfg *ioa_cfg;
Brian Kingb195d5e2016-07-15 14:48:03 -050010529 unsigned long flags;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010530 int rc, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010531
10532 rc = ipr_probe_ioa(pdev, dev_id);
10533
10534 if (rc)
10535 return rc;
10536
10537 ioa_cfg = pci_get_drvdata(pdev);
10538 rc = ipr_probe_ioa_part2(ioa_cfg);
10539
10540 if (rc) {
10541 __ipr_remove(pdev);
10542 return rc;
10543 }
10544
10545 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10546
10547 if (rc) {
10548 __ipr_remove(pdev);
10549 return rc;
10550 }
10551
Tony Jonesee959b02008-02-22 00:13:36 +010010552 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010553 &ipr_trace_attr);
10554
10555 if (rc) {
10556 scsi_remove_host(ioa_cfg->host);
10557 __ipr_remove(pdev);
10558 return rc;
10559 }
10560
Brian Kingafc3f832016-08-24 12:56:51 -050010561 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10562 &ipr_ioa_async_err_log);
10563
10564 if (rc) {
10565 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10566 &ipr_dump_attr);
10567 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10568 &ipr_trace_attr);
10569 scsi_remove_host(ioa_cfg->host);
10570 __ipr_remove(pdev);
10571 return rc;
10572 }
10573
Tony Jonesee959b02008-02-22 00:13:36 +010010574 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010575 &ipr_dump_attr);
10576
10577 if (rc) {
Brian Kingafc3f832016-08-24 12:56:51 -050010578 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10579 &ipr_ioa_async_err_log);
Tony Jonesee959b02008-02-22 00:13:36 +010010580 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010581 &ipr_trace_attr);
10582 scsi_remove_host(ioa_cfg->host);
10583 __ipr_remove(pdev);
10584 return rc;
10585 }
Brian Kinga3d1ddd2016-08-08 17:53:12 -050010586 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10587 ioa_cfg->scan_enabled = 1;
10588 schedule_work(&ioa_cfg->work_q);
10589 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010590
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010591 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10592
Jens Axboe89f8b332014-03-13 09:38:42 -060010593 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010594 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +010010595 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010596 ioa_cfg->iopoll_weight, ipr_iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010597 }
10598 }
10599
Brian Kinga3d1ddd2016-08-08 17:53:12 -050010600 scsi_scan_host(ioa_cfg->host);
10601
Linus Torvalds1da177e2005-04-16 15:20:36 -070010602 return 0;
10603}
10604
10605/**
10606 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010607 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -070010608 *
10609 * This function is invoked upon system shutdown/reboot. It will issue
10610 * an adapter shutdown to the adapter to flush the write cache.
10611 *
10612 * Return value:
10613 * none
10614 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010615static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010616{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010617 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010618 unsigned long lock_flags = 0;
Brian King4fdd7c72015-03-26 11:23:50 -050010619 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010620 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010621
10622 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Jens Axboe89f8b332014-03-13 09:38:42 -060010623 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010624 ioa_cfg->iopoll_weight = 0;
10625 for (i = 1; i < ioa_cfg->hrrq_num; i++)
Christoph Hellwig511cbce2015-11-10 14:56:14 +010010626 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010627 }
10628
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -030010629 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -050010630 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10631 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10632 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10633 }
10634
Brian King4fdd7c72015-03-26 11:23:50 -050010635 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10636 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10637
10638 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10640 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Brian King4fdd7c72015-03-26 11:23:50 -050010641 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
Brian King2796ca52015-03-26 11:23:52 -050010642 ipr_free_irqs(ioa_cfg);
Brian King4fdd7c72015-03-26 11:23:50 -050010643 pci_disable_device(ioa_cfg->pdev);
10644 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010645}
10646
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010647static struct pci_device_id ipr_pci_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010648 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010649 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010650 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010651 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010652 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010653 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010654 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010655 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010656 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010657 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010658 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010659 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010660 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010661 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010662 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -050010663 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10664 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010665 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -060010666 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010667 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -050010668 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10669 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010670 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -050010671 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10672 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010673 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -060010674 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010675 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -050010676 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10677 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -060010678 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -050010679 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10680 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010681 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -050010682 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10683 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -050010684 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King185eb312007-03-29 12:42:53 -050010685 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10686 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Wayne Boyerb0f56d32010-06-24 13:34:14 -070010687 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10688 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -050010689 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -050010690 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010691 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -060010692 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010693 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -060010694 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010695 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010696 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10697 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010698 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010699 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10700 IPR_USE_LONG_TRANSOP_TIMEOUT },
Wayne Boyerd7b46272010-02-19 13:24:38 -080010701 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10702 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10703 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10704 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10705 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10706 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
Wayne Boyer32622bd2010-10-18 20:24:34 -070010707 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010708 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10709 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer5a918352011-10-27 11:58:21 -070010710 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10711 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer32622bd2010-10-18 20:24:34 -070010712 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010713 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010714 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010715 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010716 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010717 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010718 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010719 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10720 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10721 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010722 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010723 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10724 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10725 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10726 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10727 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10728 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10729 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10730 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010731 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10732 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10733 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wendy Xiongf94d9962014-01-21 12:16:40 -060010734 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10735 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010736 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10737 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10738 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10739 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10740 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10741 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10742 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10743 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10744 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10745 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10746 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
Wendy Xiong5eeac3e2014-03-12 16:08:52 -050010747 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10748 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10749 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10750 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10751 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10752 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
Wen Xiong00da9ff2016-07-12 16:02:07 -050010753 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10754 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10755 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10756 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010757 { }
10758};
10759MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10760
Stephen Hemmingera55b2d22012-09-07 09:33:16 -070010761static const struct pci_error_handlers ipr_err_handler = {
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010762 .error_detected = ipr_pci_error_detected,
Brian King6270e592014-01-21 12:16:41 -060010763 .mmio_enabled = ipr_pci_mmio_enabled,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010764 .slot_reset = ipr_pci_slot_reset,
10765};
10766
Linus Torvalds1da177e2005-04-16 15:20:36 -070010767static struct pci_driver ipr_driver = {
10768 .name = IPR_NAME,
10769 .id_table = ipr_pci_table,
10770 .probe = ipr_probe,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010771 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010772 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010773 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010774};
10775
10776/**
Wayne Boyerf72919e2010-02-19 13:24:21 -080010777 * ipr_halt_done - Shutdown prepare completion
10778 *
10779 * Return value:
10780 * none
10781 **/
10782static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10783{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010784 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010785}
10786
10787/**
10788 * ipr_halt - Issue shutdown prepare to all adapters
10789 *
10790 * Return value:
10791 * NOTIFY_OK on success / NOTIFY_DONE on failure
10792 **/
10793static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10794{
10795 struct ipr_cmnd *ipr_cmd;
10796 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010797 unsigned long flags = 0, driver_lock_flags;
Wayne Boyerf72919e2010-02-19 13:24:21 -080010798
10799 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10800 return NOTIFY_DONE;
10801
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010802 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010803
10804 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10805 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King4fdd7c72015-03-26 11:23:50 -050010806 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10807 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
Wayne Boyerf72919e2010-02-19 13:24:21 -080010808 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10809 continue;
10810 }
10811
10812 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10813 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10814 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10815 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10816 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10817
10818 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10819 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10820 }
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010821 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010822
10823 return NOTIFY_OK;
10824}
10825
10826static struct notifier_block ipr_notifier = {
10827 ipr_halt, NULL, 0
10828};
10829
10830/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010831 * ipr_init - Module entry point
10832 *
10833 * Return value:
10834 * 0 on success / negative value on failure
10835 **/
10836static int __init ipr_init(void)
10837{
10838 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10839 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10840
Wayne Boyerf72919e2010-02-19 13:24:21 -080010841 register_reboot_notifier(&ipr_notifier);
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -070010842 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010843}
10844
10845/**
10846 * ipr_exit - Module unload
10847 *
10848 * Module unload entry point.
10849 *
10850 * Return value:
10851 * none
10852 **/
10853static void __exit ipr_exit(void)
10854{
Wayne Boyerf72919e2010-02-19 13:24:21 -080010855 unregister_reboot_notifier(&ipr_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010856 pci_unregister_driver(&ipr_driver);
10857}
10858
10859module_init(ipr_init);
10860module_exit(ipr_exit);