blob: c5bc41d97f84bb885c730bb0d8c80be38ef7c3ca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -030063#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050075#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050076#include <linux/hdreg.h>
Wayne Boyerf72919e2010-02-19 13:24:21 -080077#include <linux/reboot.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080078#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include "ipr.h"
88
89/*
90 * Global Data
91 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080092static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050097static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060098static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080099static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -0500100static unsigned int ipr_dual_ioa_raid = 1;
Wen Xiongcb05cbb2016-07-12 16:02:08 -0500101static unsigned int ipr_number_of_msix = 16;
Brian King4fdd7c72015-03-26 11:23:50 -0500102static unsigned int ipr_fast_reboot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103static DEFINE_SPINLOCK(ipr_driver_lock);
104
105/* This table describes the differences between DMA controller chips */
106static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 .mailbox = 0x0042C,
Brian King89aad422012-03-14 21:20:10 -0500109 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500111 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600112 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 {
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
Wayne Boyer214777b2010-02-19 13:24:26 -0800116 .clr_interrupt_mask_reg32 = 0x00230,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 .sense_interrupt_mask_reg = 0x0022C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800118 .sense_interrupt_mask_reg32 = 0x0022C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .clr_interrupt_reg = 0x00228,
Wayne Boyer214777b2010-02-19 13:24:26 -0800120 .clr_interrupt_reg32 = 0x00228,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 .sense_interrupt_reg = 0x00224,
Wayne Boyer214777b2010-02-19 13:24:26 -0800122 .sense_interrupt_reg32 = 0x00224,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800125 .sense_uproc_interrupt_reg32 = 0x00214,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 .set_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
131 },
132 { /* Snipe and Scamp */
133 .mailbox = 0x0052C,
Brian King89aad422012-03-14 21:20:10 -0500134 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500136 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600137 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 {
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800141 .clr_interrupt_mask_reg32 = 0x0028C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 .sense_interrupt_mask_reg = 0x00288,
Wayne Boyer214777b2010-02-19 13:24:26 -0800143 .sense_interrupt_mask_reg32 = 0x00288,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 .clr_interrupt_reg = 0x00284,
Wayne Boyer214777b2010-02-19 13:24:26 -0800145 .clr_interrupt_reg32 = 0x00284,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 .sense_interrupt_reg = 0x00280,
Wayne Boyer214777b2010-02-19 13:24:26 -0800147 .sense_interrupt_reg32 = 0x00280,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800150 .sense_uproc_interrupt_reg32 = 0x00290,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 .set_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 }
156 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800157 { /* CRoC */
Wayne Boyer110def82010-11-04 09:36:16 -0700158 .mailbox = 0x00044,
Brian King89aad422012-03-14 21:20:10 -0500159 .max_cmds = 1000,
Wayne Boyera74c1632010-02-19 13:23:51 -0800160 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500161 .clear_isr = 0,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600162 .iopoll_weight = 64,
Wayne Boyera74c1632010-02-19 13:23:51 -0800163 {
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
Wayne Boyer214777b2010-02-19 13:24:26 -0800166 .clr_interrupt_mask_reg32 = 0x0001C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800167 .sense_interrupt_mask_reg = 0x00010,
Wayne Boyer214777b2010-02-19 13:24:26 -0800168 .sense_interrupt_mask_reg32 = 0x00014,
Wayne Boyera74c1632010-02-19 13:23:51 -0800169 .clr_interrupt_reg = 0x00008,
Wayne Boyer214777b2010-02-19 13:24:26 -0800170 .clr_interrupt_reg32 = 0x0000C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800171 .sense_interrupt_reg = 0x00000,
Wayne Boyer214777b2010-02-19 13:24:26 -0800172 .sense_interrupt_reg32 = 0x00004,
Wayne Boyera74c1632010-02-19 13:23:51 -0800173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800175 .sense_uproc_interrupt_reg32 = 0x00024,
Wayne Boyera74c1632010-02-19 13:23:51 -0800176 .set_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800177 .set_uproc_interrupt_reg32 = 0x00024,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800178 .clr_uproc_interrupt_reg = 0x00028,
Wayne Boyer214777b2010-02-19 13:24:26 -0800179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800181 .dump_addr_reg = 0x00064,
Wayne Boyer8701f182010-06-04 10:26:50 -0700182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
Wayne Boyera74c1632010-02-19 13:23:51 -0800184 }
185 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186};
187
188static const struct ipr_chip_t ipr_chip[] = {
Wayne Boyercb237ef2010-06-17 11:51:40 -0700189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
Wen Xiong00da9ff2016-07-12 16:02:07 -0500197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199};
200
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300201static int ipr_max_bus_speeds[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203};
204
205MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207module_param_named(max_speed, ipr_max_speed, uint, 0);
208MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209module_param_named(log_level, ipr_log_level, uint, 0);
210MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211module_param_named(testmode, ipr_testmode, int, 0);
212MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800213module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800217module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600218MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500219module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800221module_param_named(max_devs, ipr_max_devs, int, 0);
222MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600224module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
Wen Xiongcb05cbb2016-07-12 16:02:08 -0500225MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
Brian King4fdd7c72015-03-26 11:23:50 -0500226module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228MODULE_LICENSE("GPL");
229MODULE_VERSION(IPR_DRIVER_VERSION);
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231/* A constant array of IOASCs/URCs/Error Messages */
232static const
233struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 "8155: An unknown error was received"},
236 {0x00330000, 0, 0,
237 "Soft underlength error"},
238 {0x005A0000, 0, 0,
239 "Command to be cancelled not found"},
240 {0x00808000, 0, 0,
241 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500245 "4101: Soft device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFFD: Logical block guard error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500283 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 "8009: Impending cache battery pack failure"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500288 {0x02040100, 0, 0,
289 "Logical Unit in process of becoming ready"},
290 {0x02040200, 0, 0,
291 "Initializing command required"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 {0x02040400, 0, 0,
293 "34FF: Disk device format in progress"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500294 {0x02040C00, 0, 0,
295 "Logical unit not accessible, target port in unavailable state"},
Brian King65f56472007-04-26 16:00:12 -0500296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 {0x023F0000, 0, 0,
299 "Synchronization required"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500300 {0x02408500, 0, 0,
301 "IOA microcode download required"},
302 {0x02408600, 0, 0,
303 "Device bus connection is prohibited by host"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 {0x024E0000, 0, 0,
305 "No ready, IOA shutdown"},
306 {0x025A0000, 0, 0,
307 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 "3020: Storage subsystem configuration error"},
310 {0x03110B00, 0, 0,
311 "FFF5: Medium error, data unreadable, recommend reassign"},
312 {0x03110C00, 0, 0,
313 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 "3109: IOA timed out a device command"},
322 {0x04088000, 0, 0,
323 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500325 "4100: Hard device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "310D: Logical block guard error detected by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 "9002: IOA reserved area LRC error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347 "Hardware Error, IOA metadata access error"},
Brian King933916f2007-03-29 12:43:30 -0500348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 "8150: PCI bus error"},
360 {0x04430000, 1, 0,
361 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 "8151: IOA microcode error"},
370 {0x04448500, 0, 0,
371 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500374 {0x04448700, 0, 0,
375 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 {0x04490000, 0, 0,
377 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500389 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600393 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500395 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500397 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500399 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500401 "4110: Unsupported enclosure function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403 "4120: SAS cable VPD cannot be read"},
Brian King933916f2007-03-29 12:43:30 -0500404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 "FFF4: Command to logical unit failed"},
406 {0x05240000, 1, 0,
407 "Illegal request, invalid request type or request packet"},
408 {0x05250000, 0, 0,
409 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600410 {0x05258000, 0, 0,
411 "Illegal request, commands not allowed to this device"},
412 {0x05258100, 0, 0,
413 "Illegal request, command not allowed to a secondary adapter"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800414 {0x05258200, 0, 0,
415 "Illegal request, command not allowed to a non-optimized resource"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 {0x05260000, 0, 0,
417 "Illegal request, invalid field in parameter list"},
418 {0x05260100, 0, 0,
419 "Illegal request, parameter not supported"},
420 {0x05260200, 0, 0,
421 "Illegal request, parameter value invalid"},
422 {0x052C0000, 0, 0,
423 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600424 {0x052C8000, 1, 0,
425 "Illegal request, dual adapter support not enabled"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500426 {0x052C8100, 1, 0,
427 "Illegal request, another cable connector was physically disabled"},
428 {0x054E8000, 1, 0,
429 "Illegal request, inconsistent group id/group count"},
Brian King933916f2007-03-29 12:43:30 -0500430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 "9040: Array protection temporarily suspended, protection resuming"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "4085: Service required"},
Brian King933916f2007-03-29 12:43:30 -0500438 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500439 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500440 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 "FFFB: SCSI bus was reset"},
442 {0x06290500, 0, 0,
443 "FFFE: SCSI bus transition to single ended"},
444 {0x06290600, 0, 0,
445 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500446 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500448 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 "3029: A device replacement has occurred"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500450 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
451 "4102: Device bus fabric performance degradation"},
Brian King933916f2007-03-29 12:43:30 -0500452 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500454 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600455 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500456 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500458 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500460 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500462 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600463 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500464 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500465 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500466 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500467 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500468 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500469 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500470 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500471 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500472 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500473 "4050: Enclosure does not support a required multipath function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500474 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
475 "4121: Configuration error, required cable is missing"},
476 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4122: Cable is not plugged into the correct location on remote IOA"},
478 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4123: Configuration error, invalid cable vital product data"},
480 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
481 "4124: Configuration error, both cable ends are plugged into the same IOA"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800482 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
483 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500484 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500486 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500488 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500490 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600491 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500492 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600493 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500494 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 "9032: Array exposed but still protected"},
Brian King7b3871f2016-09-16 16:51:36 -0500496 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
Brian Kinge4353402007-03-29 12:43:37 -0500497 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500498 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500499 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500500 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500501 "4060: Multipath redundancy level got worse"},
Brian King7b3871f2016-09-16 16:51:36 -0500502 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
Wen Xiongf8ee25d2015-03-26 11:23:58 -0500503 "9083: Device raw mode enabled"},
Brian King7b3871f2016-09-16 16:51:36 -0500504 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
Wen Xiongf8ee25d2015-03-26 11:23:58 -0500505 "9084: Device raw mode disabled"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 {0x07270000, 0, 0,
507 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500508 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500510 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500512 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500514 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500516 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500518 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500520 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500522 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500524 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500526 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500528 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500530 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500532 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500534 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500536 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500538 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500540 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500542 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500544 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500546 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 "9063: Maximum number of functional arrays has been exceeded"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500548 {0x07279A00, 0, 0,
549 "Data protect, other volume set problem"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 {0x0B260000, 0, 0,
551 "Aborted command, invalid descriptor"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500552 {0x0B3F9000, 0, 0,
553 "Target operating conditions have changed, dual adapter takeover"},
554 {0x0B530200, 0, 0,
555 "Aborted command, medium removal prevented"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 {0x0B5A0000, 0, 0,
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500557 "Command terminated by host"},
558 {0x0B5B8000, 0, 0,
559 "Aborted command, command terminated by host"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560};
561
562static const struct ipr_ses_table_entry ipr_ses_table[] = {
563 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
564 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
565 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
570 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
571 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
574 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
576};
577
578/*
579 * Function Prototypes
580 */
581static int ipr_reset_alert(struct ipr_cmnd *);
582static void ipr_process_ccn(struct ipr_cmnd *);
583static void ipr_process_error(struct ipr_cmnd *);
584static void ipr_reset_ioa_job(struct ipr_cmnd *);
585static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
586 enum ipr_shutdown_type);
587
588#ifdef CONFIG_SCSI_IPR_TRACE
589/**
590 * ipr_trc_hook - Add a trace entry to the driver trace
591 * @ipr_cmd: ipr command struct
592 * @type: trace type
593 * @add_data: additional data
594 *
595 * Return value:
596 * none
597 **/
598static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599 u8 type, u32 add_data)
600{
601 struct ipr_trace_entry *trace_entry;
602 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Brian Kingbb7c5432015-07-14 11:41:31 -0500603 unsigned int trace_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Brian Kingbb7c5432015-07-14 11:41:31 -0500605 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
606 trace_entry = &ioa_cfg->trace[trace_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 trace_entry->time = jiffies;
608 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
609 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800610 if (ipr_cmd->ioa_cfg->sis64)
611 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
612 else
613 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500614 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
616 trace_entry->u.add_data = add_data;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600617 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618}
619#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300620#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621#endif
622
623/**
Brian King172cd6e2012-07-17 08:14:40 -0500624 * ipr_lock_and_done - Acquire lock and complete command
625 * @ipr_cmd: ipr command struct
626 *
627 * Return value:
628 * none
629 **/
630static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
631{
632 unsigned long lock_flags;
633 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
634
635 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
636 ipr_cmd->done(ipr_cmd);
637 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
638}
639
640/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642 * @ipr_cmd: ipr command struct
643 *
644 * Return value:
645 * none
646 **/
647static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
648{
649 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700650 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
651 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
Wayne Boyera32c0552010-02-19 13:23:36 -0800652 dma_addr_t dma_addr = ipr_cmd->dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600653 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600655 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600657 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
Wayne Boyera32c0552010-02-19 13:23:36 -0800658 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800660 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800662
Wayne Boyer96d21f02010-05-10 09:13:27 -0700663 if (ipr_cmd->ioa_cfg->sis64) {
Wayne Boyera32c0552010-02-19 13:23:36 -0800664 ioarcb->u.sis64_addr_data.data_ioadl_addr =
665 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
Wayne Boyer96d21f02010-05-10 09:13:27 -0700666 ioasa64->u.gata.status = 0;
667 } else {
Wayne Boyera32c0552010-02-19 13:23:36 -0800668 ioarcb->write_ioadl_addr =
669 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
670 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700671 ioasa->u.gata.status = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800672 }
673
Wayne Boyer96d21f02010-05-10 09:13:27 -0700674 ioasa->hdr.ioasc = 0;
675 ioasa->hdr.residual_data_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500677 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 ipr_cmd->sense_buffer[0] = 0;
679 ipr_cmd->dma_use_sg = 0;
680}
681
682/**
683 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684 * @ipr_cmd: ipr command struct
685 *
686 * Return value:
687 * none
688 **/
Brian King172cd6e2012-07-17 08:14:40 -0500689static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
690 void (*fast_done) (struct ipr_cmnd *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691{
692 ipr_reinit_ipr_cmnd(ipr_cmd);
693 ipr_cmd->u.scratch = 0;
694 ipr_cmd->sibling = NULL;
Brian King6cdb0812014-10-30 17:27:10 -0500695 ipr_cmd->eh_comp = NULL;
Brian King172cd6e2012-07-17 08:14:40 -0500696 ipr_cmd->fast_done = fast_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 init_timer(&ipr_cmd->timer);
698}
699
700/**
Brian King00bfef22012-07-17 08:13:52 -0500701 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 * @ioa_cfg: ioa config struct
703 *
704 * Return value:
705 * pointer to ipr command struct
706 **/
707static
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600708struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600710 struct ipr_cmnd *ipr_cmd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600712 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
713 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
714 struct ipr_cmnd, queue);
715 list_del(&ipr_cmd->queue);
716 }
717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
719 return ipr_cmd;
720}
721
722/**
Brian King00bfef22012-07-17 08:13:52 -0500723 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724 * @ioa_cfg: ioa config struct
725 *
726 * Return value:
727 * pointer to ipr command struct
728 **/
729static
730struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
731{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600732 struct ipr_cmnd *ipr_cmd =
733 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
Brian King172cd6e2012-07-17 08:14:40 -0500734 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King00bfef22012-07-17 08:13:52 -0500735 return ipr_cmd;
736}
737
738/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740 * @ioa_cfg: ioa config struct
741 * @clr_ints: interrupts to clear
742 *
743 * This function masks all interrupts on the adapter, then clears the
744 * interrupts specified in the mask
745 *
746 * Return value:
747 * none
748 **/
749static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
750 u32 clr_ints)
751{
752 volatile u32 int_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600753 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
755 /* Stop new interrupts */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600756 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
757 spin_lock(&ioa_cfg->hrrq[i]._lock);
758 ioa_cfg->hrrq[i].allow_interrupts = 0;
759 spin_unlock(&ioa_cfg->hrrq[i]._lock);
760 }
761 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
763 /* Set interrupt mask to stop all new interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800764 if (ioa_cfg->sis64)
765 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
766 else
767 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
769 /* Clear any pending interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800770 if (ioa_cfg->sis64)
771 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
772 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
774}
775
776/**
777 * ipr_save_pcix_cmd_reg - Save PCI-X command register
778 * @ioa_cfg: ioa config struct
779 *
780 * Return value:
781 * 0 on success / -EIO on failure
782 **/
783static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
784{
785 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
786
Brian King7dce0e12007-01-23 11:25:30 -0600787 if (pcix_cmd_reg == 0)
788 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
790 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
793 return -EIO;
794 }
795
796 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
797 return 0;
798}
799
800/**
801 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802 * @ioa_cfg: ioa config struct
803 *
804 * Return value:
805 * 0 on success / -EIO on failure
806 **/
807static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
808{
809 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
810
811 if (pcix_cmd_reg) {
812 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
813 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
814 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
815 return -EIO;
816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 }
818
819 return 0;
820}
821
822/**
Brian King35a39692006-09-25 12:39:20 -0500823 * ipr_sata_eh_done - done function for aborted SATA commands
824 * @ipr_cmd: ipr command struct
825 *
826 * This function is invoked for ops generated to SATA
827 * devices which are being aborted.
828 *
829 * Return value:
830 * none
831 **/
832static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
833{
Brian King35a39692006-09-25 12:39:20 -0500834 struct ata_queued_cmd *qc = ipr_cmd->qc;
835 struct ipr_sata_port *sata_port = qc->ap->private_data;
836
837 qc->err_mask |= AC_ERR_OTHER;
838 sata_port->ioasa.status |= ATA_BUSY;
Brian King35a39692006-09-25 12:39:20 -0500839 ata_qc_complete(qc);
Brian King2f1b0a82017-03-15 16:58:36 -0500840 if (ipr_cmd->eh_comp)
841 complete(ipr_cmd->eh_comp);
842 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King35a39692006-09-25 12:39:20 -0500843}
844
845/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 * ipr_scsi_eh_done - mid-layer done function for aborted ops
847 * @ipr_cmd: ipr command struct
848 *
849 * This function is invoked by the interrupt handler for
850 * ops generated by the SCSI mid-layer which are being aborted.
851 *
852 * Return value:
853 * none
854 **/
855static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
856{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
858
859 scsi_cmd->result |= (DID_ERROR << 16);
860
FUJITA Tomonori63015bc2007-05-26 00:26:59 +0900861 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 scsi_cmd->scsi_done(scsi_cmd);
Brian King6cdb0812014-10-30 17:27:10 -0500863 if (ipr_cmd->eh_comp)
864 complete(ipr_cmd->eh_comp);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600865 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866}
867
868/**
869 * ipr_fail_all_ops - Fails all outstanding ops.
870 * @ioa_cfg: ioa config struct
871 *
872 * This function fails all outstanding ops.
873 *
874 * Return value:
875 * none
876 **/
877static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
878{
879 struct ipr_cmnd *ipr_cmd, *temp;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600880 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
882 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600883 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600884 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600885 list_for_each_entry_safe(ipr_cmd,
886 temp, &hrrq->hrrq_pending_q, queue) {
887 list_del(&ipr_cmd->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600889 ipr_cmd->s.ioasa.hdr.ioasc =
890 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
891 ipr_cmd->s.ioasa.hdr.ilid =
892 cpu_to_be32(IPR_DRIVER_ILID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600894 if (ipr_cmd->scsi_cmd)
895 ipr_cmd->done = ipr_scsi_eh_done;
896 else if (ipr_cmd->qc)
897 ipr_cmd->done = ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600899 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
900 IPR_IOASC_IOA_WAS_RESET);
901 del_timer(&ipr_cmd->timer);
902 ipr_cmd->done(ipr_cmd);
903 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600904 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 LEAVE;
907}
908
909/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800910 * ipr_send_command - Send driver initiated requests.
911 * @ipr_cmd: ipr command struct
912 *
913 * This function sends a command to the adapter using the correct write call.
914 * In the case of sis64, calculate the ioarcb size required. Then or in the
915 * appropriate bits.
916 *
917 * Return value:
918 * none
919 **/
920static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
921{
922 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
923 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
924
925 if (ioa_cfg->sis64) {
926 /* The default size is 256 bytes */
927 send_dma_addr |= 0x1;
928
929 /* If the number of ioadls * size of ioadl > 128 bytes,
930 then use a 512 byte ioarcb */
931 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
932 send_dma_addr |= 0x4;
933 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
934 } else
935 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
936}
937
938/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 * ipr_do_req - Send driver initiated requests.
940 * @ipr_cmd: ipr command struct
941 * @done: done function
942 * @timeout_func: timeout function
943 * @timeout: timeout value
944 *
945 * This function sends the specified command to the adapter with the
946 * timeout given. The done function is invoked on command completion.
947 *
948 * Return value:
949 * none
950 **/
951static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
952 void (*done) (struct ipr_cmnd *),
953 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
954{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600955 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956
957 ipr_cmd->done = done;
958
959 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
960 ipr_cmd->timer.expires = jiffies + timeout;
961 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
962
963 add_timer(&ipr_cmd->timer);
964
965 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
966
Wayne Boyera32c0552010-02-19 13:23:36 -0800967 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968}
969
970/**
971 * ipr_internal_cmd_done - Op done function for an internally generated op.
972 * @ipr_cmd: ipr command struct
973 *
974 * This function is the op done function for an internally generated,
975 * blocking op. It simply wakes the sleeping thread.
976 *
977 * Return value:
978 * none
979 **/
980static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
981{
982 if (ipr_cmd->sibling)
983 ipr_cmd->sibling = NULL;
984 else
985 complete(&ipr_cmd->completion);
986}
987
988/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800989 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
990 * @ipr_cmd: ipr command struct
991 * @dma_addr: dma address
992 * @len: transfer length
993 * @flags: ioadl flag value
994 *
995 * This function initializes an ioadl in the case where there is only a single
996 * descriptor.
997 *
998 * Return value:
999 * nothing
1000 **/
1001static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1002 u32 len, int flags)
1003{
1004 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1005 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1006
1007 ipr_cmd->dma_use_sg = 1;
1008
1009 if (ipr_cmd->ioa_cfg->sis64) {
1010 ioadl64->flags = cpu_to_be32(flags);
1011 ioadl64->data_len = cpu_to_be32(len);
1012 ioadl64->address = cpu_to_be64(dma_addr);
1013
1014 ipr_cmd->ioarcb.ioadl_len =
1015 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1016 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1017 } else {
1018 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1019 ioadl->address = cpu_to_be32(dma_addr);
1020
1021 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1022 ipr_cmd->ioarcb.read_ioadl_len =
1023 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1024 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1025 } else {
1026 ipr_cmd->ioarcb.ioadl_len =
1027 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1028 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1029 }
1030 }
1031}
1032
1033/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1035 * @ipr_cmd: ipr command struct
1036 * @timeout_func: function to invoke if command times out
1037 * @timeout: timeout
1038 *
1039 * Return value:
1040 * none
1041 **/
1042static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1043 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1044 u32 timeout)
1045{
1046 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1047
1048 init_completion(&ipr_cmd->completion);
1049 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1050
1051 spin_unlock_irq(ioa_cfg->host->host_lock);
1052 wait_for_completion(&ipr_cmd->completion);
1053 spin_lock_irq(ioa_cfg->host->host_lock);
1054}
1055
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001056static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1057{
Brian King3f1c0582015-07-14 11:41:33 -05001058 unsigned int hrrq;
1059
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001060 if (ioa_cfg->hrrq_num == 1)
Brian King3f1c0582015-07-14 11:41:33 -05001061 hrrq = 0;
1062 else {
1063 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1064 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1065 }
1066 return hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001067}
1068
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069/**
1070 * ipr_send_hcam - Send an HCAM to the adapter.
1071 * @ioa_cfg: ioa config struct
1072 * @type: HCAM type
1073 * @hostrcb: hostrcb struct
1074 *
1075 * This function will send a Host Controlled Async command to the adapter.
1076 * If HCAMs are currently not allowed to be issued to the adapter, it will
1077 * place the hostrcb on the free queue.
1078 *
1079 * Return value:
1080 * none
1081 **/
1082static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1083 struct ipr_hostrcb *hostrcb)
1084{
1085 struct ipr_cmnd *ipr_cmd;
1086 struct ipr_ioarcb *ioarcb;
1087
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001088 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001090 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1092
1093 ipr_cmd->u.hostrcb = hostrcb;
1094 ioarcb = &ipr_cmd->ioarcb;
1095
1096 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1097 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1098 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1099 ioarcb->cmd_pkt.cdb[1] = type;
1100 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1101 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1102
Wayne Boyera32c0552010-02-19 13:23:36 -08001103 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1104 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
1106 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1107 ipr_cmd->done = ipr_process_ccn;
1108 else
1109 ipr_cmd->done = ipr_process_error;
1110
1111 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1112
Wayne Boyera32c0552010-02-19 13:23:36 -08001113 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 } else {
1115 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1116 }
1117}
1118
1119/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001120 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001122 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 *
1124 * Return value:
1125 * none
1126 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001127static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03001129 switch (proto) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001130 case IPR_PROTO_SATA:
1131 case IPR_PROTO_SAS_STP:
1132 res->ata_class = ATA_DEV_ATA;
1133 break;
1134 case IPR_PROTO_SATA_ATAPI:
1135 case IPR_PROTO_SAS_STP_ATAPI:
1136 res->ata_class = ATA_DEV_ATAPI;
1137 break;
1138 default:
1139 res->ata_class = ATA_DEV_UNKNOWN;
1140 break;
1141 };
1142}
1143
1144/**
1145 * ipr_init_res_entry - Initialize a resource entry struct.
1146 * @res: resource entry struct
1147 * @cfgtew: config table entry wrapper struct
1148 *
1149 * Return value:
1150 * none
1151 **/
1152static void ipr_init_res_entry(struct ipr_resource_entry *res,
1153 struct ipr_config_table_entry_wrapper *cfgtew)
1154{
1155 int found = 0;
1156 unsigned int proto;
1157 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1158 struct ipr_resource_entry *gscsi_res = NULL;
1159
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06001160 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 res->in_erp = 0;
1162 res->add_to_ml = 0;
1163 res->del_from_ml = 0;
1164 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06001165 res->reset_occurred = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05001167 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001168
1169 if (ioa_cfg->sis64) {
1170 proto = cfgtew->u.cfgte64->proto;
Brian King359d96e2015-06-11 20:45:20 -05001171 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1172 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001173 res->qmodel = IPR_QUEUEING_MODEL64(res);
Wayne Boyer438b0332010-05-10 09:13:00 -07001174 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001175
1176 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1177 sizeof(res->res_path));
1178
1179 res->bus = 0;
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001180 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1181 sizeof(res->dev_lun.scsi_lun));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001182 res->lun = scsilun_to_int(&res->dev_lun);
1183
1184 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1185 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1186 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1187 found = 1;
1188 res->target = gscsi_res->target;
1189 break;
1190 }
1191 }
1192 if (!found) {
1193 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1194 ioa_cfg->max_devs_supported);
1195 set_bit(res->target, ioa_cfg->target_ids);
1196 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001197 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1198 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1199 res->target = 0;
1200 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1201 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1202 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1203 ioa_cfg->max_devs_supported);
1204 set_bit(res->target, ioa_cfg->array_ids);
1205 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1206 res->bus = IPR_VSET_VIRTUAL_BUS;
1207 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1208 ioa_cfg->max_devs_supported);
1209 set_bit(res->target, ioa_cfg->vset_ids);
1210 } else {
1211 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1212 ioa_cfg->max_devs_supported);
1213 set_bit(res->target, ioa_cfg->target_ids);
1214 }
1215 } else {
1216 proto = cfgtew->u.cfgte->proto;
1217 res->qmodel = IPR_QUEUEING_MODEL(res);
1218 res->flags = cfgtew->u.cfgte->flags;
1219 if (res->flags & IPR_IS_IOA_RESOURCE)
1220 res->type = IPR_RES_TYPE_IOAFP;
1221 else
1222 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1223
1224 res->bus = cfgtew->u.cfgte->res_addr.bus;
1225 res->target = cfgtew->u.cfgte->res_addr.target;
1226 res->lun = cfgtew->u.cfgte->res_addr.lun;
Wayne Boyer46d74562010-08-11 07:15:17 -07001227 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001228 }
1229
1230 ipr_update_ata_class(res, proto);
1231}
1232
1233/**
1234 * ipr_is_same_device - Determine if two devices are the same.
1235 * @res: resource entry struct
1236 * @cfgtew: config table entry wrapper struct
1237 *
1238 * Return value:
1239 * 1 if the devices are the same / 0 otherwise
1240 **/
1241static int ipr_is_same_device(struct ipr_resource_entry *res,
1242 struct ipr_config_table_entry_wrapper *cfgtew)
1243{
1244 if (res->ioa_cfg->sis64) {
1245 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1246 sizeof(cfgtew->u.cfgte64->dev_id)) &&
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001247 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001248 sizeof(cfgtew->u.cfgte64->lun))) {
1249 return 1;
1250 }
1251 } else {
1252 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1253 res->target == cfgtew->u.cfgte->res_addr.target &&
1254 res->lun == cfgtew->u.cfgte->res_addr.lun)
1255 return 1;
1256 }
1257
1258 return 0;
1259}
1260
1261/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001262 * __ipr_format_res_path - Format the resource path for printing.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001263 * @res_path: resource path
1264 * @buf: buffer
Brian Kingb3b3b402013-01-11 17:43:49 -06001265 * @len: length of buffer provided
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001266 *
1267 * Return value:
1268 * pointer to buffer
1269 **/
Brian Kingb3b3b402013-01-11 17:43:49 -06001270static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001271{
1272 int i;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001273 char *p = buffer;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001274
Wayne Boyer46d74562010-08-11 07:15:17 -07001275 *p = '\0';
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001276 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1277 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1278 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001279
1280 return buffer;
1281}
1282
1283/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001284 * ipr_format_res_path - Format the resource path for printing.
1285 * @ioa_cfg: ioa config struct
1286 * @res_path: resource path
1287 * @buf: buffer
1288 * @len: length of buffer provided
1289 *
1290 * Return value:
1291 * pointer to buffer
1292 **/
1293static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1294 u8 *res_path, char *buffer, int len)
1295{
1296 char *p = buffer;
1297
1298 *p = '\0';
1299 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1300 __ipr_format_res_path(res_path, p, len - (buffer - p));
1301 return buffer;
1302}
1303
1304/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001305 * ipr_update_res_entry - Update the resource entry.
1306 * @res: resource entry struct
1307 * @cfgtew: config table entry wrapper struct
1308 *
1309 * Return value:
1310 * none
1311 **/
1312static void ipr_update_res_entry(struct ipr_resource_entry *res,
1313 struct ipr_config_table_entry_wrapper *cfgtew)
1314{
1315 char buffer[IPR_MAX_RES_PATH_LENGTH];
1316 unsigned int proto;
1317 int new_path = 0;
1318
1319 if (res->ioa_cfg->sis64) {
Brian King359d96e2015-06-11 20:45:20 -05001320 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1321 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
Wayne Boyer75576bb2010-07-14 10:50:14 -07001322 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001323
1324 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1325 sizeof(struct ipr_std_inq_data));
1326
1327 res->qmodel = IPR_QUEUEING_MODEL64(res);
1328 proto = cfgtew->u.cfgte64->proto;
1329 res->res_handle = cfgtew->u.cfgte64->res_handle;
1330 res->dev_id = cfgtew->u.cfgte64->dev_id;
1331
1332 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1333 sizeof(res->dev_lun.scsi_lun));
1334
1335 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1336 sizeof(res->res_path))) {
1337 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1338 sizeof(res->res_path));
1339 new_path = 1;
1340 }
1341
1342 if (res->sdev && new_path)
1343 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06001344 ipr_format_res_path(res->ioa_cfg,
1345 res->res_path, buffer, sizeof(buffer)));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001346 } else {
1347 res->flags = cfgtew->u.cfgte->flags;
1348 if (res->flags & IPR_IS_IOA_RESOURCE)
1349 res->type = IPR_RES_TYPE_IOAFP;
1350 else
1351 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1352
1353 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1354 sizeof(struct ipr_std_inq_data));
1355
1356 res->qmodel = IPR_QUEUEING_MODEL(res);
1357 proto = cfgtew->u.cfgte->proto;
1358 res->res_handle = cfgtew->u.cfgte->res_handle;
1359 }
1360
1361 ipr_update_ata_class(res, proto);
1362}
1363
1364/**
1365 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1366 * for the resource.
1367 * @res: resource entry struct
1368 * @cfgtew: config table entry wrapper struct
1369 *
1370 * Return value:
1371 * none
1372 **/
1373static void ipr_clear_res_target(struct ipr_resource_entry *res)
1374{
1375 struct ipr_resource_entry *gscsi_res = NULL;
1376 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1377
1378 if (!ioa_cfg->sis64)
1379 return;
1380
1381 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1382 clear_bit(res->target, ioa_cfg->array_ids);
1383 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1384 clear_bit(res->target, ioa_cfg->vset_ids);
1385 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1386 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1387 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1388 return;
1389 clear_bit(res->target, ioa_cfg->target_ids);
1390
1391 } else if (res->bus == 0)
1392 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393}
1394
1395/**
1396 * ipr_handle_config_change - Handle a config change from the adapter
1397 * @ioa_cfg: ioa config struct
1398 * @hostrcb: hostrcb
1399 *
1400 * Return value:
1401 * none
1402 **/
1403static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001404 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405{
1406 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001407 struct ipr_config_table_entry_wrapper cfgtew;
1408 __be32 cc_res_handle;
1409
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 u32 is_ndn = 1;
1411
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001412 if (ioa_cfg->sis64) {
1413 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1414 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1415 } else {
1416 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1417 cc_res_handle = cfgtew.u.cfgte->res_handle;
1418 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
1420 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001421 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 is_ndn = 0;
1423 break;
1424 }
1425 }
1426
1427 if (is_ndn) {
1428 if (list_empty(&ioa_cfg->free_res_q)) {
1429 ipr_send_hcam(ioa_cfg,
1430 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1431 hostrcb);
1432 return;
1433 }
1434
1435 res = list_entry(ioa_cfg->free_res_q.next,
1436 struct ipr_resource_entry, queue);
1437
1438 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001439 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1441 }
1442
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001443 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
1445 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1446 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001448 res->res_handle = IPR_INVALID_RES_HANDLE;
Brian Kingf688f962014-12-02 12:47:37 -06001449 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001450 } else {
1451 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001453 }
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02001454 } else if (!res->sdev || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 res->add_to_ml = 1;
Brian Kingf688f962014-12-02 12:47:37 -06001456 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 }
1458
1459 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1460}
1461
1462/**
1463 * ipr_process_ccn - Op done function for a CCN.
1464 * @ipr_cmd: ipr command struct
1465 *
1466 * This function is the op done function for a configuration
1467 * change notification host controlled async from the adapter.
1468 *
1469 * Return value:
1470 * none
1471 **/
1472static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1473{
1474 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1475 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07001476 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
Brian Kingafc3f832016-08-24 12:56:51 -05001478 list_del_init(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001479 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480
1481 if (ioasc) {
Brian King4fdd7c72015-03-26 11:23:50 -05001482 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1483 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 dev_err(&ioa_cfg->pdev->dev,
1485 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1486
1487 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1488 } else {
1489 ipr_handle_config_change(ioa_cfg, hostrcb);
1490 }
1491}
1492
1493/**
Brian King8cf093e2007-04-26 16:00:14 -05001494 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1495 * @i: index into buffer
1496 * @buf: string to modify
1497 *
1498 * This function will strip all trailing whitespace, pad the end
1499 * of the string with a single space, and NULL terminate the string.
1500 *
1501 * Return value:
1502 * new length of string
1503 **/
1504static int strip_and_pad_whitespace(int i, char *buf)
1505{
1506 while (i && buf[i] == ' ')
1507 i--;
1508 buf[i+1] = ' ';
1509 buf[i+2] = '\0';
1510 return i + 2;
1511}
1512
1513/**
1514 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1515 * @prefix: string to print at start of printk
1516 * @hostrcb: hostrcb pointer
1517 * @vpd: vendor/product id/sn struct
1518 *
1519 * Return value:
1520 * none
1521 **/
1522static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1523 struct ipr_vpd *vpd)
1524{
1525 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1526 int i = 0;
1527
1528 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1529 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1530
1531 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1532 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1533
1534 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1535 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1536
1537 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1538}
1539
1540/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001542 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 *
1544 * Return value:
1545 * none
1546 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001547static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548{
1549 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1550 + IPR_SERIAL_NUM_LEN];
1551
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001552 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1553 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 IPR_PROD_ID_LEN);
1555 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1556 ipr_err("Vendor/Product ID: %s\n", buffer);
1557
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001558 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1560 ipr_err(" Serial Number: %s\n", buffer);
1561}
1562
1563/**
Brian King8cf093e2007-04-26 16:00:14 -05001564 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1565 * @prefix: string to print at start of printk
1566 * @hostrcb: hostrcb pointer
1567 * @vpd: vendor/product id/sn/wwn struct
1568 *
1569 * Return value:
1570 * none
1571 **/
1572static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1573 struct ipr_ext_vpd *vpd)
1574{
1575 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1576 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1577 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1578}
1579
1580/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001581 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1582 * @vpd: vendor/product id/sn/wwn struct
1583 *
1584 * Return value:
1585 * none
1586 **/
1587static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1588{
1589 ipr_log_vpd(&vpd->vpd);
1590 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1591 be32_to_cpu(vpd->wwid[1]));
1592}
1593
1594/**
1595 * ipr_log_enhanced_cache_error - Log a cache error.
1596 * @ioa_cfg: ioa config struct
1597 * @hostrcb: hostrcb struct
1598 *
1599 * Return value:
1600 * none
1601 **/
1602static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1603 struct ipr_hostrcb *hostrcb)
1604{
Wayne Boyer4565e372010-02-19 13:24:07 -08001605 struct ipr_hostrcb_type_12_error *error;
1606
1607 if (ioa_cfg->sis64)
1608 error = &hostrcb->hcam.u.error64.u.type_12_error;
1609 else
1610 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001611
1612 ipr_err("-----Current Configuration-----\n");
1613 ipr_err("Cache Directory Card Information:\n");
1614 ipr_log_ext_vpd(&error->ioa_vpd);
1615 ipr_err("Adapter Card Information:\n");
1616 ipr_log_ext_vpd(&error->cfc_vpd);
1617
1618 ipr_err("-----Expected Configuration-----\n");
1619 ipr_err("Cache Directory Card Information:\n");
1620 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1621 ipr_err("Adapter Card Information:\n");
1622 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1623
1624 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1625 be32_to_cpu(error->ioa_data[0]),
1626 be32_to_cpu(error->ioa_data[1]),
1627 be32_to_cpu(error->ioa_data[2]));
1628}
1629
1630/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 * ipr_log_cache_error - Log a cache error.
1632 * @ioa_cfg: ioa config struct
1633 * @hostrcb: hostrcb struct
1634 *
1635 * Return value:
1636 * none
1637 **/
1638static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1639 struct ipr_hostrcb *hostrcb)
1640{
1641 struct ipr_hostrcb_type_02_error *error =
1642 &hostrcb->hcam.u.error.u.type_02_error;
1643
1644 ipr_err("-----Current Configuration-----\n");
1645 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001646 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001648 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
1650 ipr_err("-----Expected Configuration-----\n");
1651 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001652 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001654 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655
1656 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1657 be32_to_cpu(error->ioa_data[0]),
1658 be32_to_cpu(error->ioa_data[1]),
1659 be32_to_cpu(error->ioa_data[2]));
1660}
1661
1662/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001663 * ipr_log_enhanced_config_error - Log a configuration error.
1664 * @ioa_cfg: ioa config struct
1665 * @hostrcb: hostrcb struct
1666 *
1667 * Return value:
1668 * none
1669 **/
1670static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1671 struct ipr_hostrcb *hostrcb)
1672{
1673 int errors_logged, i;
1674 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1675 struct ipr_hostrcb_type_13_error *error;
1676
1677 error = &hostrcb->hcam.u.error.u.type_13_error;
1678 errors_logged = be32_to_cpu(error->errors_logged);
1679
1680 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1681 be32_to_cpu(error->errors_detected), errors_logged);
1682
1683 dev_entry = error->dev;
1684
1685 for (i = 0; i < errors_logged; i++, dev_entry++) {
1686 ipr_err_separator;
1687
1688 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1689 ipr_log_ext_vpd(&dev_entry->vpd);
1690
1691 ipr_err("-----New Device Information-----\n");
1692 ipr_log_ext_vpd(&dev_entry->new_vpd);
1693
1694 ipr_err("Cache Directory Card Information:\n");
1695 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1696
1697 ipr_err("Adapter Card Information:\n");
1698 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1699 }
1700}
1701
1702/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001703 * ipr_log_sis64_config_error - Log a device error.
1704 * @ioa_cfg: ioa config struct
1705 * @hostrcb: hostrcb struct
1706 *
1707 * Return value:
1708 * none
1709 **/
1710static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1711 struct ipr_hostrcb *hostrcb)
1712{
1713 int errors_logged, i;
1714 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1715 struct ipr_hostrcb_type_23_error *error;
1716 char buffer[IPR_MAX_RES_PATH_LENGTH];
1717
1718 error = &hostrcb->hcam.u.error64.u.type_23_error;
1719 errors_logged = be32_to_cpu(error->errors_logged);
1720
1721 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1722 be32_to_cpu(error->errors_detected), errors_logged);
1723
1724 dev_entry = error->dev;
1725
1726 for (i = 0; i < errors_logged; i++, dev_entry++) {
1727 ipr_err_separator;
1728
1729 ipr_err("Device %d : %s", i + 1,
Brian Kingb3b3b402013-01-11 17:43:49 -06001730 __ipr_format_res_path(dev_entry->res_path,
1731 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08001732 ipr_log_ext_vpd(&dev_entry->vpd);
1733
1734 ipr_err("-----New Device Information-----\n");
1735 ipr_log_ext_vpd(&dev_entry->new_vpd);
1736
1737 ipr_err("Cache Directory Card Information:\n");
1738 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1739
1740 ipr_err("Adapter Card Information:\n");
1741 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1742 }
1743}
1744
1745/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 * ipr_log_config_error - Log a configuration error.
1747 * @ioa_cfg: ioa config struct
1748 * @hostrcb: hostrcb struct
1749 *
1750 * Return value:
1751 * none
1752 **/
1753static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1754 struct ipr_hostrcb *hostrcb)
1755{
1756 int errors_logged, i;
1757 struct ipr_hostrcb_device_data_entry *dev_entry;
1758 struct ipr_hostrcb_type_03_error *error;
1759
1760 error = &hostrcb->hcam.u.error.u.type_03_error;
1761 errors_logged = be32_to_cpu(error->errors_logged);
1762
1763 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1764 be32_to_cpu(error->errors_detected), errors_logged);
1765
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001766 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
1768 for (i = 0; i < errors_logged; i++, dev_entry++) {
1769 ipr_err_separator;
1770
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001771 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001772 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
1774 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001775 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
1777 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001778 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
1780 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001781 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
1783 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1784 be32_to_cpu(dev_entry->ioa_data[0]),
1785 be32_to_cpu(dev_entry->ioa_data[1]),
1786 be32_to_cpu(dev_entry->ioa_data[2]),
1787 be32_to_cpu(dev_entry->ioa_data[3]),
1788 be32_to_cpu(dev_entry->ioa_data[4]));
1789 }
1790}
1791
1792/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001793 * ipr_log_enhanced_array_error - Log an array configuration error.
1794 * @ioa_cfg: ioa config struct
1795 * @hostrcb: hostrcb struct
1796 *
1797 * Return value:
1798 * none
1799 **/
1800static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1801 struct ipr_hostrcb *hostrcb)
1802{
1803 int i, num_entries;
1804 struct ipr_hostrcb_type_14_error *error;
1805 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1806 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1807
1808 error = &hostrcb->hcam.u.error.u.type_14_error;
1809
1810 ipr_err_separator;
1811
1812 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1813 error->protection_level,
1814 ioa_cfg->host->host_no,
1815 error->last_func_vset_res_addr.bus,
1816 error->last_func_vset_res_addr.target,
1817 error->last_func_vset_res_addr.lun);
1818
1819 ipr_err_separator;
1820
1821 array_entry = error->array_member;
1822 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
Wayne Boyer72620262010-09-27 10:45:28 -07001823 ARRAY_SIZE(error->array_member));
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001824
1825 for (i = 0; i < num_entries; i++, array_entry++) {
1826 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1827 continue;
1828
1829 if (be32_to_cpu(error->exposed_mode_adn) == i)
1830 ipr_err("Exposed Array Member %d:\n", i);
1831 else
1832 ipr_err("Array Member %d:\n", i);
1833
1834 ipr_log_ext_vpd(&array_entry->vpd);
1835 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1836 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1837 "Expected Location");
1838
1839 ipr_err_separator;
1840 }
1841}
1842
1843/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 * ipr_log_array_error - Log an array configuration error.
1845 * @ioa_cfg: ioa config struct
1846 * @hostrcb: hostrcb struct
1847 *
1848 * Return value:
1849 * none
1850 **/
1851static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1852 struct ipr_hostrcb *hostrcb)
1853{
1854 int i;
1855 struct ipr_hostrcb_type_04_error *error;
1856 struct ipr_hostrcb_array_data_entry *array_entry;
1857 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1858
1859 error = &hostrcb->hcam.u.error.u.type_04_error;
1860
1861 ipr_err_separator;
1862
1863 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1864 error->protection_level,
1865 ioa_cfg->host->host_no,
1866 error->last_func_vset_res_addr.bus,
1867 error->last_func_vset_res_addr.target,
1868 error->last_func_vset_res_addr.lun);
1869
1870 ipr_err_separator;
1871
1872 array_entry = error->array_member;
1873
1874 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001875 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 continue;
1877
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001878 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001880 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001883 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001885 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1886 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1887 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
1889 ipr_err_separator;
1890
1891 if (i == 9)
1892 array_entry = error->array_member2;
1893 else
1894 array_entry++;
1895 }
1896}
1897
1898/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001899 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001900 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001901 * @data: IOA error data
1902 * @len: data length
1903 *
1904 * Return value:
1905 * none
1906 **/
Brian King359d96e2015-06-11 20:45:20 -05001907static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001908{
1909 int i;
1910
1911 if (len == 0)
1912 return;
1913
Brian Kingac719ab2006-11-21 10:28:42 -06001914 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1915 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1916
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001917 for (i = 0; i < len / 4; i += 4) {
1918 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1919 be32_to_cpu(data[i]),
1920 be32_to_cpu(data[i+1]),
1921 be32_to_cpu(data[i+2]),
1922 be32_to_cpu(data[i+3]));
1923 }
1924}
1925
1926/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001927 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1928 * @ioa_cfg: ioa config struct
1929 * @hostrcb: hostrcb struct
1930 *
1931 * Return value:
1932 * none
1933 **/
1934static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1935 struct ipr_hostrcb *hostrcb)
1936{
1937 struct ipr_hostrcb_type_17_error *error;
1938
Wayne Boyer4565e372010-02-19 13:24:07 -08001939 if (ioa_cfg->sis64)
1940 error = &hostrcb->hcam.u.error64.u.type_17_error;
1941 else
1942 error = &hostrcb->hcam.u.error.u.type_17_error;
1943
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001944 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001945 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001946
Brian King8cf093e2007-04-26 16:00:14 -05001947 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1948 be32_to_cpu(hostrcb->hcam.u.error.prc));
1949 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001950 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001951 be32_to_cpu(hostrcb->hcam.length) -
1952 (offsetof(struct ipr_hostrcb_error, u) +
1953 offsetof(struct ipr_hostrcb_type_17_error, data)));
1954}
1955
1956/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001957 * ipr_log_dual_ioa_error - Log a dual adapter error.
1958 * @ioa_cfg: ioa config struct
1959 * @hostrcb: hostrcb struct
1960 *
1961 * Return value:
1962 * none
1963 **/
1964static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1965 struct ipr_hostrcb *hostrcb)
1966{
1967 struct ipr_hostrcb_type_07_error *error;
1968
1969 error = &hostrcb->hcam.u.error.u.type_07_error;
1970 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001971 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001972
Brian King8cf093e2007-04-26 16:00:14 -05001973 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1974 be32_to_cpu(hostrcb->hcam.u.error.prc));
1975 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001976 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001977 be32_to_cpu(hostrcb->hcam.length) -
1978 (offsetof(struct ipr_hostrcb_error, u) +
1979 offsetof(struct ipr_hostrcb_type_07_error, data)));
1980}
1981
Brian King49dc6a12006-11-21 10:28:35 -06001982static const struct {
1983 u8 active;
1984 char *desc;
1985} path_active_desc[] = {
1986 { IPR_PATH_NO_INFO, "Path" },
1987 { IPR_PATH_ACTIVE, "Active path" },
1988 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1989};
1990
1991static const struct {
1992 u8 state;
1993 char *desc;
1994} path_state_desc[] = {
1995 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1996 { IPR_PATH_HEALTHY, "is healthy" },
1997 { IPR_PATH_DEGRADED, "is degraded" },
1998 { IPR_PATH_FAILED, "is failed" }
1999};
2000
2001/**
2002 * ipr_log_fabric_path - Log a fabric path error
2003 * @hostrcb: hostrcb struct
2004 * @fabric: fabric descriptor
2005 *
2006 * Return value:
2007 * none
2008 **/
2009static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2010 struct ipr_hostrcb_fabric_desc *fabric)
2011{
2012 int i, j;
2013 u8 path_state = fabric->path_state;
2014 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2015 u8 state = path_state & IPR_PATH_STATE_MASK;
2016
2017 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2018 if (path_active_desc[i].active != active)
2019 continue;
2020
2021 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2022 if (path_state_desc[j].state != state)
2023 continue;
2024
2025 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2026 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2027 path_active_desc[i].desc, path_state_desc[j].desc,
2028 fabric->ioa_port);
2029 } else if (fabric->cascaded_expander == 0xff) {
2030 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2031 path_active_desc[i].desc, path_state_desc[j].desc,
2032 fabric->ioa_port, fabric->phy);
2033 } else if (fabric->phy == 0xff) {
2034 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2035 path_active_desc[i].desc, path_state_desc[j].desc,
2036 fabric->ioa_port, fabric->cascaded_expander);
2037 } else {
2038 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2039 path_active_desc[i].desc, path_state_desc[j].desc,
2040 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2041 }
2042 return;
2043 }
2044 }
2045
2046 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2047 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2048}
2049
Wayne Boyer4565e372010-02-19 13:24:07 -08002050/**
2051 * ipr_log64_fabric_path - Log a fabric path error
2052 * @hostrcb: hostrcb struct
2053 * @fabric: fabric descriptor
2054 *
2055 * Return value:
2056 * none
2057 **/
2058static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2059 struct ipr_hostrcb64_fabric_desc *fabric)
2060{
2061 int i, j;
2062 u8 path_state = fabric->path_state;
2063 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2064 u8 state = path_state & IPR_PATH_STATE_MASK;
2065 char buffer[IPR_MAX_RES_PATH_LENGTH];
2066
2067 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2068 if (path_active_desc[i].active != active)
2069 continue;
2070
2071 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2072 if (path_state_desc[j].state != state)
2073 continue;
2074
2075 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2076 path_active_desc[i].desc, path_state_desc[j].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002077 ipr_format_res_path(hostrcb->ioa_cfg,
2078 fabric->res_path,
2079 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002080 return;
2081 }
2082 }
2083
2084 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
Brian Kingb3b3b402013-01-11 17:43:49 -06002085 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2086 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002087}
2088
Brian King49dc6a12006-11-21 10:28:35 -06002089static const struct {
2090 u8 type;
2091 char *desc;
2092} path_type_desc[] = {
2093 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2094 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2095 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2096 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2097};
2098
2099static const struct {
2100 u8 status;
2101 char *desc;
2102} path_status_desc[] = {
2103 { IPR_PATH_CFG_NO_PROB, "Functional" },
2104 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2105 { IPR_PATH_CFG_FAILED, "Failed" },
2106 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2107 { IPR_PATH_NOT_DETECTED, "Missing" },
2108 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2109};
2110
2111static const char *link_rate[] = {
2112 "unknown",
2113 "disabled",
2114 "phy reset problem",
2115 "spinup hold",
2116 "port selector",
2117 "unknown",
2118 "unknown",
2119 "unknown",
2120 "1.5Gbps",
2121 "3.0Gbps",
2122 "unknown",
2123 "unknown",
2124 "unknown",
2125 "unknown",
2126 "unknown",
2127 "unknown"
2128};
2129
2130/**
2131 * ipr_log_path_elem - Log a fabric path element.
2132 * @hostrcb: hostrcb struct
2133 * @cfg: fabric path element struct
2134 *
2135 * Return value:
2136 * none
2137 **/
2138static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2139 struct ipr_hostrcb_config_element *cfg)
2140{
2141 int i, j;
2142 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2143 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2144
2145 if (type == IPR_PATH_CFG_NOT_EXIST)
2146 return;
2147
2148 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2149 if (path_type_desc[i].type != type)
2150 continue;
2151
2152 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2153 if (path_status_desc[j].status != status)
2154 continue;
2155
2156 if (type == IPR_PATH_CFG_IOA_PORT) {
2157 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2158 path_status_desc[j].desc, path_type_desc[i].desc,
2159 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2160 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2161 } else {
2162 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2163 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2164 path_status_desc[j].desc, path_type_desc[i].desc,
2165 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2166 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2167 } else if (cfg->cascaded_expander == 0xff) {
2168 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2169 "WWN=%08X%08X\n", path_status_desc[j].desc,
2170 path_type_desc[i].desc, cfg->phy,
2171 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2172 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2173 } else if (cfg->phy == 0xff) {
2174 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2175 "WWN=%08X%08X\n", path_status_desc[j].desc,
2176 path_type_desc[i].desc, cfg->cascaded_expander,
2177 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2178 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2179 } else {
2180 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2181 "WWN=%08X%08X\n", path_status_desc[j].desc,
2182 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2183 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2184 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2185 }
2186 }
2187 return;
2188 }
2189 }
2190
2191 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2192 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2193 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2194 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2195}
2196
2197/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002198 * ipr_log64_path_elem - Log a fabric path element.
2199 * @hostrcb: hostrcb struct
2200 * @cfg: fabric path element struct
2201 *
2202 * Return value:
2203 * none
2204 **/
2205static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2206 struct ipr_hostrcb64_config_element *cfg)
2207{
2208 int i, j;
2209 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2210 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2211 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2212 char buffer[IPR_MAX_RES_PATH_LENGTH];
2213
2214 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2215 return;
2216
2217 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2218 if (path_type_desc[i].type != type)
2219 continue;
2220
2221 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2222 if (path_status_desc[j].status != status)
2223 continue;
2224
2225 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2226 path_status_desc[j].desc, path_type_desc[i].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002227 ipr_format_res_path(hostrcb->ioa_cfg,
2228 cfg->res_path, buffer, sizeof(buffer)),
2229 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2230 be32_to_cpu(cfg->wwid[0]),
2231 be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002232 return;
2233 }
2234 }
2235 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2236 "WWN=%08X%08X\n", cfg->type_status,
Brian Kingb3b3b402013-01-11 17:43:49 -06002237 ipr_format_res_path(hostrcb->ioa_cfg,
2238 cfg->res_path, buffer, sizeof(buffer)),
2239 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2240 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002241}
2242
2243/**
Brian King49dc6a12006-11-21 10:28:35 -06002244 * ipr_log_fabric_error - Log a fabric error.
2245 * @ioa_cfg: ioa config struct
2246 * @hostrcb: hostrcb struct
2247 *
2248 * Return value:
2249 * none
2250 **/
2251static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2252 struct ipr_hostrcb *hostrcb)
2253{
2254 struct ipr_hostrcb_type_20_error *error;
2255 struct ipr_hostrcb_fabric_desc *fabric;
2256 struct ipr_hostrcb_config_element *cfg;
2257 int i, add_len;
2258
2259 error = &hostrcb->hcam.u.error.u.type_20_error;
2260 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2261 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2262
2263 add_len = be32_to_cpu(hostrcb->hcam.length) -
2264 (offsetof(struct ipr_hostrcb_error, u) +
2265 offsetof(struct ipr_hostrcb_type_20_error, desc));
2266
2267 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2268 ipr_log_fabric_path(hostrcb, fabric);
2269 for_each_fabric_cfg(fabric, cfg)
2270 ipr_log_path_elem(hostrcb, cfg);
2271
2272 add_len -= be16_to_cpu(fabric->length);
2273 fabric = (struct ipr_hostrcb_fabric_desc *)
2274 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2275 }
2276
Brian King359d96e2015-06-11 20:45:20 -05002277 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002278}
2279
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002280/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002281 * ipr_log_sis64_array_error - Log a sis64 array error.
2282 * @ioa_cfg: ioa config struct
2283 * @hostrcb: hostrcb struct
2284 *
2285 * Return value:
2286 * none
2287 **/
2288static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2289 struct ipr_hostrcb *hostrcb)
2290{
2291 int i, num_entries;
2292 struct ipr_hostrcb_type_24_error *error;
2293 struct ipr_hostrcb64_array_data_entry *array_entry;
2294 char buffer[IPR_MAX_RES_PATH_LENGTH];
2295 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2296
2297 error = &hostrcb->hcam.u.error64.u.type_24_error;
2298
2299 ipr_err_separator;
2300
2301 ipr_err("RAID %s Array Configuration: %s\n",
2302 error->protection_level,
Brian Kingb3b3b402013-01-11 17:43:49 -06002303 ipr_format_res_path(ioa_cfg, error->last_res_path,
2304 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002305
2306 ipr_err_separator;
2307
2308 array_entry = error->array_member;
Wayne Boyer72620262010-09-27 10:45:28 -07002309 num_entries = min_t(u32, error->num_entries,
2310 ARRAY_SIZE(error->array_member));
Wayne Boyer4565e372010-02-19 13:24:07 -08002311
2312 for (i = 0; i < num_entries; i++, array_entry++) {
2313
2314 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2315 continue;
2316
2317 if (error->exposed_mode_adn == i)
2318 ipr_err("Exposed Array Member %d:\n", i);
2319 else
2320 ipr_err("Array Member %d:\n", i);
2321
2322 ipr_err("Array Member %d:\n", i);
2323 ipr_log_ext_vpd(&array_entry->vpd);
Wayne Boyer72620262010-09-27 10:45:28 -07002324 ipr_err("Current Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002325 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2326 buffer, sizeof(buffer)));
Wayne Boyer72620262010-09-27 10:45:28 -07002327 ipr_err("Expected Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002328 ipr_format_res_path(ioa_cfg,
2329 array_entry->expected_res_path,
2330 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002331
2332 ipr_err_separator;
2333 }
2334}
2335
2336/**
2337 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2338 * @ioa_cfg: ioa config struct
2339 * @hostrcb: hostrcb struct
2340 *
2341 * Return value:
2342 * none
2343 **/
2344static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2345 struct ipr_hostrcb *hostrcb)
2346{
2347 struct ipr_hostrcb_type_30_error *error;
2348 struct ipr_hostrcb64_fabric_desc *fabric;
2349 struct ipr_hostrcb64_config_element *cfg;
2350 int i, add_len;
2351
2352 error = &hostrcb->hcam.u.error64.u.type_30_error;
2353
2354 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2355 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2356
2357 add_len = be32_to_cpu(hostrcb->hcam.length) -
2358 (offsetof(struct ipr_hostrcb64_error, u) +
2359 offsetof(struct ipr_hostrcb_type_30_error, desc));
2360
2361 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2362 ipr_log64_fabric_path(hostrcb, fabric);
2363 for_each_fabric_cfg(fabric, cfg)
2364 ipr_log64_path_elem(hostrcb, cfg);
2365
2366 add_len -= be16_to_cpu(fabric->length);
2367 fabric = (struct ipr_hostrcb64_fabric_desc *)
2368 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2369 }
2370
Brian King359d96e2015-06-11 20:45:20 -05002371 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
Wayne Boyer4565e372010-02-19 13:24:07 -08002372}
2373
2374/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 * ipr_log_generic_error - Log an adapter error.
2376 * @ioa_cfg: ioa config struct
2377 * @hostrcb: hostrcb struct
2378 *
2379 * Return value:
2380 * none
2381 **/
2382static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2383 struct ipr_hostrcb *hostrcb)
2384{
Brian Kingac719ab2006-11-21 10:28:42 -06002385 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002386 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387}
2388
2389/**
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002390 * ipr_log_sis64_device_error - Log a cache error.
2391 * @ioa_cfg: ioa config struct
2392 * @hostrcb: hostrcb struct
2393 *
2394 * Return value:
2395 * none
2396 **/
2397static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2398 struct ipr_hostrcb *hostrcb)
2399{
2400 struct ipr_hostrcb_type_21_error *error;
2401 char buffer[IPR_MAX_RES_PATH_LENGTH];
2402
2403 error = &hostrcb->hcam.u.error64.u.type_21_error;
2404
2405 ipr_err("-----Failing Device Information-----\n");
2406 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2407 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2408 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2409 ipr_err("Device Resource Path: %s\n",
2410 __ipr_format_res_path(error->res_path,
2411 buffer, sizeof(buffer)));
2412 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2413 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2414 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2415 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2416 ipr_err("SCSI Sense Data:\n");
2417 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2418 ipr_err("SCSI Command Descriptor Block: \n");
2419 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2420
2421 ipr_err("Additional IOA Data:\n");
2422 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2423}
2424
2425/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2427 * @ioasc: IOASC
2428 *
2429 * This function will return the index of into the ipr_error_table
2430 * for the specified IOASC. If the IOASC is not in the table,
2431 * 0 will be returned, which points to the entry used for unknown errors.
2432 *
2433 * Return value:
2434 * index into the ipr_error_table
2435 **/
2436static u32 ipr_get_error(u32 ioasc)
2437{
2438 int i;
2439
2440 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002441 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 return i;
2443
2444 return 0;
2445}
2446
2447/**
2448 * ipr_handle_log_data - Log an adapter error.
2449 * @ioa_cfg: ioa config struct
2450 * @hostrcb: hostrcb struct
2451 *
2452 * This function logs an adapter error to the system.
2453 *
2454 * Return value:
2455 * none
2456 **/
2457static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2458 struct ipr_hostrcb *hostrcb)
2459{
2460 u32 ioasc;
2461 int error_index;
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002462 struct ipr_hostrcb_type_21_error *error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463
2464 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2465 return;
2466
2467 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2468 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2469
Wayne Boyer4565e372010-02-19 13:24:07 -08002470 if (ioa_cfg->sis64)
2471 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2472 else
2473 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474
Wayne Boyer4565e372010-02-19 13:24:07 -08002475 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2476 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2478 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002479 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 }
2481
2482 error_index = ipr_get_error(ioasc);
2483
2484 if (!ipr_error_table[error_index].log_hcam)
2485 return;
2486
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002487 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2488 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2489 error = &hostrcb->hcam.u.error64.u.type_21_error;
2490
2491 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2492 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2493 return;
2494 }
2495
Brian King49dc6a12006-11-21 10:28:35 -06002496 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497
2498 /* Set indication we have logged an error */
2499 ioa_cfg->errors_logged++;
2500
Brian King933916f2007-03-29 12:43:30 -05002501 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002503 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2504 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505
2506 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 case IPR_HOST_RCB_OVERLAY_ID_2:
2508 ipr_log_cache_error(ioa_cfg, hostrcb);
2509 break;
2510 case IPR_HOST_RCB_OVERLAY_ID_3:
2511 ipr_log_config_error(ioa_cfg, hostrcb);
2512 break;
2513 case IPR_HOST_RCB_OVERLAY_ID_4:
2514 case IPR_HOST_RCB_OVERLAY_ID_6:
2515 ipr_log_array_error(ioa_cfg, hostrcb);
2516 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002517 case IPR_HOST_RCB_OVERLAY_ID_7:
2518 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2519 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002520 case IPR_HOST_RCB_OVERLAY_ID_12:
2521 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2522 break;
2523 case IPR_HOST_RCB_OVERLAY_ID_13:
2524 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2525 break;
2526 case IPR_HOST_RCB_OVERLAY_ID_14:
2527 case IPR_HOST_RCB_OVERLAY_ID_16:
2528 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2529 break;
2530 case IPR_HOST_RCB_OVERLAY_ID_17:
2531 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2532 break;
Brian King49dc6a12006-11-21 10:28:35 -06002533 case IPR_HOST_RCB_OVERLAY_ID_20:
2534 ipr_log_fabric_error(ioa_cfg, hostrcb);
2535 break;
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002536 case IPR_HOST_RCB_OVERLAY_ID_21:
2537 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2538 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002539 case IPR_HOST_RCB_OVERLAY_ID_23:
2540 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2541 break;
2542 case IPR_HOST_RCB_OVERLAY_ID_24:
2543 case IPR_HOST_RCB_OVERLAY_ID_26:
2544 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2545 break;
2546 case IPR_HOST_RCB_OVERLAY_ID_30:
2547 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2548 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002549 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002552 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 break;
2554 }
2555}
2556
Brian Kingafc3f832016-08-24 12:56:51 -05002557static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2558{
2559 struct ipr_hostrcb *hostrcb;
2560
2561 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2562 struct ipr_hostrcb, queue);
2563
2564 if (unlikely(!hostrcb)) {
2565 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2566 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2567 struct ipr_hostrcb, queue);
2568 }
2569
2570 list_del_init(&hostrcb->queue);
2571 return hostrcb;
2572}
2573
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574/**
2575 * ipr_process_error - Op done function for an adapter error log.
2576 * @ipr_cmd: ipr command struct
2577 *
2578 * This function is the op done function for an error log host
2579 * controlled async from the adapter. It will log the error and
2580 * send the HCAM back to the adapter.
2581 *
2582 * Return value:
2583 * none
2584 **/
2585static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2586{
2587 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2588 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07002589 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002590 u32 fd_ioasc;
2591
2592 if (ioa_cfg->sis64)
2593 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2594 else
2595 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596
Brian Kingafc3f832016-08-24 12:56:51 -05002597 list_del_init(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06002598 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599
2600 if (!ioasc) {
2601 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002602 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2603 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Brian King4fdd7c72015-03-26 11:23:50 -05002604 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2605 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 dev_err(&ioa_cfg->pdev->dev,
2607 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2608 }
2609
Brian Kingafc3f832016-08-24 12:56:51 -05002610 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
Brian King8a4236a2016-10-13 14:45:24 -05002611 schedule_work(&ioa_cfg->work_q);
Brian Kingafc3f832016-08-24 12:56:51 -05002612 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
Brian Kingafc3f832016-08-24 12:56:51 -05002613
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2615}
2616
2617/**
2618 * ipr_timeout - An internally generated op has timed out.
2619 * @ipr_cmd: ipr command struct
2620 *
2621 * This function blocks host requests and initiates an
2622 * adapter reset.
2623 *
2624 * Return value:
2625 * none
2626 **/
2627static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2628{
2629 unsigned long lock_flags = 0;
2630 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2631
2632 ENTER;
2633 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2634
2635 ioa_cfg->errors_logged++;
2636 dev_err(&ioa_cfg->pdev->dev,
2637 "Adapter being reset due to command timeout.\n");
2638
2639 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2640 ioa_cfg->sdt_state = GET_DUMP;
2641
2642 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2643 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2644
2645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2646 LEAVE;
2647}
2648
2649/**
2650 * ipr_oper_timeout - Adapter timed out transitioning to operational
2651 * @ipr_cmd: ipr command struct
2652 *
2653 * This function blocks host requests and initiates an
2654 * adapter reset.
2655 *
2656 * Return value:
2657 * none
2658 **/
2659static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2660{
2661 unsigned long lock_flags = 0;
2662 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2663
2664 ENTER;
2665 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2666
2667 ioa_cfg->errors_logged++;
2668 dev_err(&ioa_cfg->pdev->dev,
2669 "Adapter timed out transitioning to operational.\n");
2670
2671 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2672 ioa_cfg->sdt_state = GET_DUMP;
2673
2674 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2675 if (ipr_fastfail)
2676 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2677 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2678 }
2679
2680 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2681 LEAVE;
2682}
2683
2684/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 * ipr_find_ses_entry - Find matching SES in SES table
2686 * @res: resource entry struct of SES
2687 *
2688 * Return value:
2689 * pointer to SES table entry / NULL on failure
2690 **/
2691static const struct ipr_ses_table_entry *
2692ipr_find_ses_entry(struct ipr_resource_entry *res)
2693{
2694 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002695 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2697
2698 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2699 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2700 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002701 vpids = &res->std_inq_data.vpids;
2702 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 matches++;
2704 else
2705 break;
2706 } else
2707 matches++;
2708 }
2709
2710 if (matches == IPR_PROD_ID_LEN)
2711 return ste;
2712 }
2713
2714 return NULL;
2715}
2716
2717/**
2718 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2719 * @ioa_cfg: ioa config struct
2720 * @bus: SCSI bus
2721 * @bus_width: bus width
2722 *
2723 * Return value:
2724 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2725 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2726 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2727 * max 160MHz = max 320MB/sec).
2728 **/
2729static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2730{
2731 struct ipr_resource_entry *res;
2732 const struct ipr_ses_table_entry *ste;
2733 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2734
2735 /* Loop through each config table entry in the config table buffer */
2736 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002737 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 continue;
2739
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002740 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 continue;
2742
2743 if (!(ste = ipr_find_ses_entry(res)))
2744 continue;
2745
2746 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2747 }
2748
2749 return max_xfer_rate;
2750}
2751
2752/**
2753 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2754 * @ioa_cfg: ioa config struct
2755 * @max_delay: max delay in micro-seconds to wait
2756 *
2757 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2758 *
2759 * Return value:
2760 * 0 on success / other on failure
2761 **/
2762static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2763{
2764 volatile u32 pcii_reg;
2765 int delay = 1;
2766
2767 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2768 while (delay < max_delay) {
2769 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2770
2771 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2772 return 0;
2773
2774 /* udelay cannot be used if delay is more than a few milliseconds */
2775 if ((delay / 1000) > MAX_UDELAY_MS)
2776 mdelay(delay / 1000);
2777 else
2778 udelay(delay);
2779
2780 delay += delay;
2781 }
2782 return -EIO;
2783}
2784
2785/**
Wayne Boyerdcbad002010-02-19 13:24:14 -08002786 * ipr_get_sis64_dump_data_section - Dump IOA memory
2787 * @ioa_cfg: ioa config struct
2788 * @start_addr: adapter address to dump
2789 * @dest: destination kernel buffer
2790 * @length_in_words: length to dump in 4 byte words
2791 *
2792 * Return value:
2793 * 0 on success
2794 **/
2795static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2796 u32 start_addr,
2797 __be32 *dest, u32 length_in_words)
2798{
2799 int i;
2800
2801 for (i = 0; i < length_in_words; i++) {
2802 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2803 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2804 dest++;
2805 }
2806
2807 return 0;
2808}
2809
2810/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 * ipr_get_ldump_data_section - Dump IOA memory
2812 * @ioa_cfg: ioa config struct
2813 * @start_addr: adapter address to dump
2814 * @dest: destination kernel buffer
2815 * @length_in_words: length to dump in 4 byte words
2816 *
2817 * Return value:
2818 * 0 on success / -EIO on failure
2819 **/
2820static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2821 u32 start_addr,
2822 __be32 *dest, u32 length_in_words)
2823{
2824 volatile u32 temp_pcii_reg;
2825 int i, delay = 0;
2826
Wayne Boyerdcbad002010-02-19 13:24:14 -08002827 if (ioa_cfg->sis64)
2828 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2829 dest, length_in_words);
2830
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 /* Write IOA interrupt reg starting LDUMP state */
2832 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
Wayne Boyer214777b2010-02-19 13:24:26 -08002833 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834
2835 /* Wait for IO debug acknowledge */
2836 if (ipr_wait_iodbg_ack(ioa_cfg,
2837 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2838 dev_err(&ioa_cfg->pdev->dev,
2839 "IOA dump long data transfer timeout\n");
2840 return -EIO;
2841 }
2842
2843 /* Signal LDUMP interlocked - clear IO debug ack */
2844 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2845 ioa_cfg->regs.clr_interrupt_reg);
2846
2847 /* Write Mailbox with starting address */
2848 writel(start_addr, ioa_cfg->ioa_mailbox);
2849
2850 /* Signal address valid - clear IOA Reset alert */
2851 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002852 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853
2854 for (i = 0; i < length_in_words; i++) {
2855 /* Wait for IO debug acknowledge */
2856 if (ipr_wait_iodbg_ack(ioa_cfg,
2857 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2858 dev_err(&ioa_cfg->pdev->dev,
2859 "IOA dump short data transfer timeout\n");
2860 return -EIO;
2861 }
2862
2863 /* Read data from mailbox and increment destination pointer */
2864 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2865 dest++;
2866
2867 /* For all but the last word of data, signal data received */
2868 if (i < (length_in_words - 1)) {
2869 /* Signal dump data received - Clear IO debug Ack */
2870 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2871 ioa_cfg->regs.clr_interrupt_reg);
2872 }
2873 }
2874
2875 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2876 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002877 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878
2879 writel(IPR_UPROCI_IO_DEBUG_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002880 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881
2882 /* Signal dump data received - Clear IO debug Ack */
2883 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2884 ioa_cfg->regs.clr_interrupt_reg);
2885
2886 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2887 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2888 temp_pcii_reg =
Wayne Boyer214777b2010-02-19 13:24:26 -08002889 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890
2891 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2892 return 0;
2893
2894 udelay(10);
2895 delay += 10;
2896 }
2897
2898 return 0;
2899}
2900
2901#ifdef CONFIG_SCSI_IPR_DUMP
2902/**
2903 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2904 * @ioa_cfg: ioa config struct
2905 * @pci_address: adapter address
2906 * @length: length of data to copy
2907 *
2908 * Copy data from PCI adapter to kernel buffer.
2909 * Note: length MUST be a 4 byte multiple
2910 * Return value:
2911 * 0 on success / other on failure
2912 **/
2913static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2914 unsigned long pci_address, u32 length)
2915{
2916 int bytes_copied = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002917 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 __be32 *page;
2919 unsigned long lock_flags = 0;
2920 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2921
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002922 if (ioa_cfg->sis64)
2923 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2924 else
2925 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2926
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 while (bytes_copied < length &&
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002928 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 if (ioa_dump->page_offset >= PAGE_SIZE ||
2930 ioa_dump->page_offset == 0) {
2931 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2932
2933 if (!page) {
2934 ipr_trace;
2935 return bytes_copied;
2936 }
2937
2938 ioa_dump->page_offset = 0;
2939 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2940 ioa_dump->next_page_index++;
2941 } else
2942 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2943
2944 rem_len = length - bytes_copied;
2945 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2946 cur_len = min(rem_len, rem_page_len);
2947
2948 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2949 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2950 rc = -EIO;
2951 } else {
2952 rc = ipr_get_ldump_data_section(ioa_cfg,
2953 pci_address + bytes_copied,
2954 &page[ioa_dump->page_offset / 4],
2955 (cur_len / sizeof(u32)));
2956 }
2957 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2958
2959 if (!rc) {
2960 ioa_dump->page_offset += cur_len;
2961 bytes_copied += cur_len;
2962 } else {
2963 ipr_trace;
2964 break;
2965 }
2966 schedule();
2967 }
2968
2969 return bytes_copied;
2970}
2971
2972/**
2973 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2974 * @hdr: dump entry header struct
2975 *
2976 * Return value:
2977 * nothing
2978 **/
2979static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2980{
2981 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2982 hdr->num_elems = 1;
2983 hdr->offset = sizeof(*hdr);
2984 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2985}
2986
2987/**
2988 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2989 * @ioa_cfg: ioa config struct
2990 * @driver_dump: driver dump struct
2991 *
2992 * Return value:
2993 * nothing
2994 **/
2995static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2996 struct ipr_driver_dump *driver_dump)
2997{
2998 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2999
3000 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3001 driver_dump->ioa_type_entry.hdr.len =
3002 sizeof(struct ipr_dump_ioa_type_entry) -
3003 sizeof(struct ipr_dump_entry_header);
3004 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3005 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3006 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3007 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3008 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3009 ucode_vpd->minor_release[1];
3010 driver_dump->hdr.num_entries++;
3011}
3012
3013/**
3014 * ipr_dump_version_data - Fill in the driver version in the dump.
3015 * @ioa_cfg: ioa config struct
3016 * @driver_dump: driver dump struct
3017 *
3018 * Return value:
3019 * nothing
3020 **/
3021static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3022 struct ipr_driver_dump *driver_dump)
3023{
3024 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3025 driver_dump->version_entry.hdr.len =
3026 sizeof(struct ipr_dump_version_entry) -
3027 sizeof(struct ipr_dump_entry_header);
3028 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3029 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3030 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3031 driver_dump->hdr.num_entries++;
3032}
3033
3034/**
3035 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3036 * @ioa_cfg: ioa config struct
3037 * @driver_dump: driver dump struct
3038 *
3039 * Return value:
3040 * nothing
3041 **/
3042static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3043 struct ipr_driver_dump *driver_dump)
3044{
3045 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3046 driver_dump->trace_entry.hdr.len =
3047 sizeof(struct ipr_dump_trace_entry) -
3048 sizeof(struct ipr_dump_entry_header);
3049 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3050 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3051 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3052 driver_dump->hdr.num_entries++;
3053}
3054
3055/**
3056 * ipr_dump_location_data - Fill in the IOA location in the dump.
3057 * @ioa_cfg: ioa config struct
3058 * @driver_dump: driver dump struct
3059 *
3060 * Return value:
3061 * nothing
3062 **/
3063static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3064 struct ipr_driver_dump *driver_dump)
3065{
3066 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3067 driver_dump->location_entry.hdr.len =
3068 sizeof(struct ipr_dump_location_entry) -
3069 sizeof(struct ipr_dump_entry_header);
3070 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3071 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01003072 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073 driver_dump->hdr.num_entries++;
3074}
3075
3076/**
3077 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3078 * @ioa_cfg: ioa config struct
3079 * @dump: dump struct
3080 *
3081 * Return value:
3082 * nothing
3083 **/
3084static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3085{
3086 unsigned long start_addr, sdt_word;
3087 unsigned long lock_flags = 0;
3088 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3089 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003090 u32 num_entries, max_num_entries, start_off, end_off;
3091 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092 struct ipr_sdt *sdt;
Wayne Boyerdcbad002010-02-19 13:24:14 -08003093 int valid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 int i;
3095
3096 ENTER;
3097
3098 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3099
Brian King41e9a692011-09-21 08:51:11 -05003100 if (ioa_cfg->sdt_state != READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3102 return;
3103 }
3104
Wayne Boyer110def82010-11-04 09:36:16 -07003105 if (ioa_cfg->sis64) {
3106 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3107 ssleep(IPR_DUMP_DELAY_SECONDS);
3108 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3109 }
3110
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 start_addr = readl(ioa_cfg->ioa_mailbox);
3112
Wayne Boyerdcbad002010-02-19 13:24:14 -08003113 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 dev_err(&ioa_cfg->pdev->dev,
3115 "Invalid dump table format: %lx\n", start_addr);
3116 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3117 return;
3118 }
3119
3120 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3121
3122 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3123
3124 /* Initialize the overall dump header */
3125 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3126 driver_dump->hdr.num_entries = 1;
3127 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3128 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3129 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3130 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3131
3132 ipr_dump_version_data(ioa_cfg, driver_dump);
3133 ipr_dump_location_data(ioa_cfg, driver_dump);
3134 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3135 ipr_dump_trace_data(ioa_cfg, driver_dump);
3136
3137 /* Update dump_header */
3138 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3139
3140 /* IOA Dump entry */
3141 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 ioa_dump->hdr.len = 0;
3143 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3144 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3145
3146 /* First entries in sdt are actually a list of dump addresses and
3147 lengths to gather the real dump data. sdt represents the pointer
3148 to the ioa generated dump table. Dump data will be extracted based
3149 on entries in this table */
3150 sdt = &ioa_dump->sdt;
3151
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003152 if (ioa_cfg->sis64) {
3153 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3154 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3155 } else {
3156 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3157 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3158 }
3159
3160 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3161 (max_num_entries * sizeof(struct ipr_sdt_entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003163 bytes_to_copy / sizeof(__be32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164
3165 /* Smart Dump table is ready to use and the first entry is valid */
Wayne Boyerdcbad002010-02-19 13:24:14 -08003166 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3167 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168 dev_err(&ioa_cfg->pdev->dev,
3169 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3170 rc, be32_to_cpu(sdt->hdr.state));
3171 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3172 ioa_cfg->sdt_state = DUMP_OBTAINED;
3173 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3174 return;
3175 }
3176
3177 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3178
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003179 if (num_entries > max_num_entries)
3180 num_entries = max_num_entries;
3181
3182 /* Update dump length to the actual data to be copied */
3183 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3184 if (ioa_cfg->sis64)
3185 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3186 else
3187 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188
3189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3190
3191 for (i = 0; i < num_entries; i++) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003192 if (ioa_dump->hdr.len > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3194 break;
3195 }
3196
3197 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
Wayne Boyerdcbad002010-02-19 13:24:14 -08003198 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3199 if (ioa_cfg->sis64)
3200 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3201 else {
3202 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3203 end_off = be32_to_cpu(sdt->entry[i].end_token);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204
Wayne Boyerdcbad002010-02-19 13:24:14 -08003205 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3206 bytes_to_copy = end_off - start_off;
3207 else
3208 valid = 0;
3209 }
3210 if (valid) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003211 if (bytes_to_copy > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3213 continue;
3214 }
3215
3216 /* Copy data from adapter to driver buffers */
3217 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3218 bytes_to_copy);
3219
3220 ioa_dump->hdr.len += bytes_copied;
3221
3222 if (bytes_copied != bytes_to_copy) {
3223 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3224 break;
3225 }
3226 }
3227 }
3228 }
3229
3230 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3231
3232 /* Update dump_header */
3233 driver_dump->hdr.len += ioa_dump->hdr.len;
3234 wmb();
3235 ioa_cfg->sdt_state = DUMP_OBTAINED;
3236 LEAVE;
3237}
3238
3239#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003240#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241#endif
3242
3243/**
3244 * ipr_release_dump - Free adapter dump memory
3245 * @kref: kref struct
3246 *
3247 * Return value:
3248 * nothing
3249 **/
3250static void ipr_release_dump(struct kref *kref)
3251{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003252 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3254 unsigned long lock_flags = 0;
3255 int i;
3256
3257 ENTER;
3258 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3259 ioa_cfg->dump = NULL;
3260 ioa_cfg->sdt_state = INACTIVE;
3261 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3262
3263 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3264 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3265
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003266 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 kfree(dump);
3268 LEAVE;
3269}
3270
3271/**
3272 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00003273 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274 *
3275 * Called at task level from a work thread. This function takes care
3276 * of adding and removing device from the mid-layer as configuration
3277 * changes are detected by the adapter.
3278 *
3279 * Return value:
3280 * nothing
3281 **/
David Howellsc4028952006-11-22 14:57:56 +00003282static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283{
3284 unsigned long lock_flags;
3285 struct ipr_resource_entry *res;
3286 struct scsi_device *sdev;
3287 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00003288 struct ipr_ioa_cfg *ioa_cfg =
3289 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 u8 bus, target, lun;
3291 int did_work;
3292
3293 ENTER;
3294 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3295
Brian King41e9a692011-09-21 08:51:11 -05003296 if (ioa_cfg->sdt_state == READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 dump = ioa_cfg->dump;
3298 if (!dump) {
3299 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3300 return;
3301 }
3302 kref_get(&dump->kref);
3303 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3304 ipr_get_ioa_dump(ioa_cfg, dump);
3305 kref_put(&dump->kref, ipr_release_dump);
3306
3307 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King4c647e92011-10-15 09:08:56 -05003308 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3310 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3311 return;
3312 }
3313
Brian Kingb195d5e2016-07-15 14:48:03 -05003314 if (!ioa_cfg->scan_enabled) {
3315 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3316 return;
3317 }
3318
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319restart:
3320 do {
3321 did_work = 0;
Brian Kingf688f962014-12-02 12:47:37 -06003322 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3324 return;
3325 }
3326
3327 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3328 if (res->del_from_ml && res->sdev) {
3329 did_work = 1;
3330 sdev = res->sdev;
3331 if (!scsi_device_get(sdev)) {
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02003332 if (!res->add_to_ml)
3333 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3334 else
3335 res->del_from_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3337 scsi_remove_device(sdev);
3338 scsi_device_put(sdev);
3339 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3340 }
3341 break;
3342 }
3343 }
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003344 } while (did_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345
3346 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3347 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003348 bus = res->bus;
3349 target = res->target;
3350 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003351 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3353 scsi_add_device(ioa_cfg->host, bus, target, lun);
3354 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3355 goto restart;
3356 }
3357 }
3358
Brian Kingf688f962014-12-02 12:47:37 -06003359 ioa_cfg->scan_done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003361 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 LEAVE;
3363}
3364
3365#ifdef CONFIG_SCSI_IPR_TRACE
3366/**
3367 * ipr_read_trace - Dump the adapter trace
Chris Wright2c3c8be2010-05-12 18:28:57 -07003368 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003370 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 * @buf: buffer
3372 * @off: offset
3373 * @count: buffer size
3374 *
3375 * Return value:
3376 * number of bytes printed to buffer
3377 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003378static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003379 struct bin_attribute *bin_attr,
3380 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381{
Tony Jonesee959b02008-02-22 00:13:36 +01003382 struct device *dev = container_of(kobj, struct device, kobj);
3383 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3385 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003386 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387
3388 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003389 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3390 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003392
3393 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394}
3395
3396static struct bin_attribute ipr_trace_attr = {
3397 .attr = {
3398 .name = "trace",
3399 .mode = S_IRUGO,
3400 },
3401 .size = 0,
3402 .read = ipr_read_trace,
3403};
3404#endif
3405
3406/**
3407 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003408 * @dev: class device struct
3409 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 *
3411 * Return value:
3412 * number of bytes printed to buffer
3413 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003414static ssize_t ipr_show_fw_version(struct device *dev,
3415 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416{
Tony Jonesee959b02008-02-22 00:13:36 +01003417 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3419 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3420 unsigned long lock_flags = 0;
3421 int len;
3422
3423 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3424 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3425 ucode_vpd->major_release, ucode_vpd->card_type,
3426 ucode_vpd->minor_release[0],
3427 ucode_vpd->minor_release[1]);
3428 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3429 return len;
3430}
3431
Tony Jonesee959b02008-02-22 00:13:36 +01003432static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433 .attr = {
3434 .name = "fw_version",
3435 .mode = S_IRUGO,
3436 },
3437 .show = ipr_show_fw_version,
3438};
3439
3440/**
3441 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003442 * @dev: class device struct
3443 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444 *
3445 * Return value:
3446 * number of bytes printed to buffer
3447 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003448static ssize_t ipr_show_log_level(struct device *dev,
3449 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450{
Tony Jonesee959b02008-02-22 00:13:36 +01003451 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3453 unsigned long lock_flags = 0;
3454 int len;
3455
3456 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3457 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3458 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3459 return len;
3460}
3461
3462/**
3463 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003464 * @dev: class device struct
3465 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466 *
3467 * Return value:
3468 * number of bytes printed to buffer
3469 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003470static ssize_t ipr_store_log_level(struct device *dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003471 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472 const char *buf, size_t count)
3473{
Tony Jonesee959b02008-02-22 00:13:36 +01003474 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3476 unsigned long lock_flags = 0;
3477
3478 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3479 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3480 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3481 return strlen(buf);
3482}
3483
Tony Jonesee959b02008-02-22 00:13:36 +01003484static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 .attr = {
3486 .name = "log_level",
3487 .mode = S_IRUGO | S_IWUSR,
3488 },
3489 .show = ipr_show_log_level,
3490 .store = ipr_store_log_level
3491};
3492
3493/**
3494 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003495 * @dev: device struct
3496 * @buf: buffer
3497 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 *
3499 * This function will reset the adapter and wait a reasonable
3500 * amount of time for any errors that the adapter might log.
3501 *
3502 * Return value:
3503 * count on success / other on failure
3504 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003505static ssize_t ipr_store_diagnostics(struct device *dev,
3506 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 const char *buf, size_t count)
3508{
Tony Jonesee959b02008-02-22 00:13:36 +01003509 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3511 unsigned long lock_flags = 0;
3512 int rc = count;
3513
3514 if (!capable(CAP_SYS_ADMIN))
3515 return -EACCES;
3516
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003518 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003519 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3520 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3521 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3522 }
3523
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 ioa_cfg->errors_logged = 0;
3525 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3526
3527 if (ioa_cfg->in_reset_reload) {
3528 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3529 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3530
3531 /* Wait for a second for any errors to be logged */
3532 msleep(1000);
3533 } else {
3534 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3535 return -EIO;
3536 }
3537
3538 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3539 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3540 rc = -EIO;
3541 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3542
3543 return rc;
3544}
3545
Tony Jonesee959b02008-02-22 00:13:36 +01003546static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547 .attr = {
3548 .name = "run_diagnostics",
3549 .mode = S_IWUSR,
3550 },
3551 .store = ipr_store_diagnostics
3552};
3553
3554/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003555 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003556 * @class_dev: device struct
3557 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003558 *
3559 * Return value:
3560 * number of bytes printed to buffer
3561 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003562static ssize_t ipr_show_adapter_state(struct device *dev,
3563 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003564{
Tony Jonesee959b02008-02-22 00:13:36 +01003565 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003566 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3567 unsigned long lock_flags = 0;
3568 int len;
3569
3570 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003571 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003572 len = snprintf(buf, PAGE_SIZE, "offline\n");
3573 else
3574 len = snprintf(buf, PAGE_SIZE, "online\n");
3575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3576 return len;
3577}
3578
3579/**
3580 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003581 * @dev: device struct
3582 * @buf: buffer
3583 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003584 *
3585 * This function will change the adapter's state.
3586 *
3587 * Return value:
3588 * count on success / other on failure
3589 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003590static ssize_t ipr_store_adapter_state(struct device *dev,
3591 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003592 const char *buf, size_t count)
3593{
Tony Jonesee959b02008-02-22 00:13:36 +01003594 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003595 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3596 unsigned long lock_flags;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003597 int result = count, i;
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003598
3599 if (!capable(CAP_SYS_ADMIN))
3600 return -EACCES;
3601
3602 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003603 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3604 !strncmp(buf, "online", 6)) {
3605 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3606 spin_lock(&ioa_cfg->hrrq[i]._lock);
3607 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3608 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3609 }
3610 wmb();
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003611 ioa_cfg->reset_retries = 0;
3612 ioa_cfg->in_ioa_bringdown = 0;
3613 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3614 }
3615 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3616 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3617
3618 return result;
3619}
3620
Tony Jonesee959b02008-02-22 00:13:36 +01003621static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003622 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003623 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003624 .mode = S_IRUGO | S_IWUSR,
3625 },
3626 .show = ipr_show_adapter_state,
3627 .store = ipr_store_adapter_state
3628};
3629
3630/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003632 * @dev: device struct
3633 * @buf: buffer
3634 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003635 *
3636 * This function will reset the adapter.
3637 *
3638 * Return value:
3639 * count on success / other on failure
3640 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003641static ssize_t ipr_store_reset_adapter(struct device *dev,
3642 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643 const char *buf, size_t count)
3644{
Tony Jonesee959b02008-02-22 00:13:36 +01003645 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3647 unsigned long lock_flags;
3648 int result = count;
3649
3650 if (!capable(CAP_SYS_ADMIN))
3651 return -EACCES;
3652
3653 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3654 if (!ioa_cfg->in_reset_reload)
3655 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3656 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3657 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3658
3659 return result;
3660}
3661
Tony Jonesee959b02008-02-22 00:13:36 +01003662static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663 .attr = {
3664 .name = "reset_host",
3665 .mode = S_IWUSR,
3666 },
3667 .store = ipr_store_reset_adapter
3668};
3669
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003670static int ipr_iopoll(struct irq_poll *iop, int budget);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003671 /**
3672 * ipr_show_iopoll_weight - Show ipr polling mode
3673 * @dev: class device struct
3674 * @buf: buffer
3675 *
3676 * Return value:
3677 * number of bytes printed to buffer
3678 **/
3679static ssize_t ipr_show_iopoll_weight(struct device *dev,
3680 struct device_attribute *attr, char *buf)
3681{
3682 struct Scsi_Host *shost = class_to_shost(dev);
3683 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3684 unsigned long lock_flags = 0;
3685 int len;
3686
3687 spin_lock_irqsave(shost->host_lock, lock_flags);
3688 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3689 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3690
3691 return len;
3692}
3693
3694/**
3695 * ipr_store_iopoll_weight - Change the adapter's polling mode
3696 * @dev: class device struct
3697 * @buf: buffer
3698 *
3699 * Return value:
3700 * number of bytes printed to buffer
3701 **/
3702static ssize_t ipr_store_iopoll_weight(struct device *dev,
3703 struct device_attribute *attr,
3704 const char *buf, size_t count)
3705{
3706 struct Scsi_Host *shost = class_to_shost(dev);
3707 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3708 unsigned long user_iopoll_weight;
3709 unsigned long lock_flags = 0;
3710 int i;
3711
3712 if (!ioa_cfg->sis64) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003713 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003714 return -EINVAL;
3715 }
3716 if (kstrtoul(buf, 10, &user_iopoll_weight))
3717 return -EINVAL;
3718
3719 if (user_iopoll_weight > 256) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003720 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003721 return -EINVAL;
3722 }
3723
3724 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003725 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003726 return strlen(buf);
3727 }
3728
Jens Axboe89f8b332014-03-13 09:38:42 -06003729 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003730 for (i = 1; i < ioa_cfg->hrrq_num; i++)
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003731 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003732 }
3733
3734 spin_lock_irqsave(shost->host_lock, lock_flags);
3735 ioa_cfg->iopoll_weight = user_iopoll_weight;
Jens Axboe89f8b332014-03-13 09:38:42 -06003736 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003737 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003738 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003739 ioa_cfg->iopoll_weight, ipr_iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003740 }
3741 }
3742 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3743
3744 return strlen(buf);
3745}
3746
3747static struct device_attribute ipr_iopoll_weight_attr = {
3748 .attr = {
3749 .name = "iopoll_weight",
3750 .mode = S_IRUGO | S_IWUSR,
3751 },
3752 .show = ipr_show_iopoll_weight,
3753 .store = ipr_store_iopoll_weight
3754};
3755
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756/**
3757 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3758 * @buf_len: buffer length
3759 *
3760 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3761 * list to use for microcode download
3762 *
3763 * Return value:
3764 * pointer to sglist / NULL on failure
3765 **/
3766static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3767{
3768 int sg_size, order, bsize_elem, num_elem, i, j;
3769 struct ipr_sglist *sglist;
3770 struct scatterlist *scatterlist;
3771 struct page *page;
3772
3773 /* Get the minimum size per scatter/gather element */
3774 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3775
3776 /* Get the actual size per element */
3777 order = get_order(sg_size);
3778
3779 /* Determine the actual number of bytes per element */
3780 bsize_elem = PAGE_SIZE * (1 << order);
3781
3782 /* Determine the actual number of sg entries needed */
3783 if (buf_len % bsize_elem)
3784 num_elem = (buf_len / bsize_elem) + 1;
3785 else
3786 num_elem = buf_len / bsize_elem;
3787
3788 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003789 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790 (sizeof(struct scatterlist) * (num_elem - 1)),
3791 GFP_KERNEL);
3792
3793 if (sglist == NULL) {
3794 ipr_trace;
3795 return NULL;
3796 }
3797
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798 scatterlist = sglist->scatterlist;
Jens Axboe45711f12007-10-22 21:19:53 +02003799 sg_init_table(scatterlist, num_elem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800
3801 sglist->order = order;
3802 sglist->num_sg = num_elem;
3803
3804 /* Allocate a bunch of sg elements */
3805 for (i = 0; i < num_elem; i++) {
3806 page = alloc_pages(GFP_KERNEL, order);
3807 if (!page) {
3808 ipr_trace;
3809
3810 /* Free up what we already allocated */
3811 for (j = i - 1; j >= 0; j--)
Jens Axboe45711f12007-10-22 21:19:53 +02003812 __free_pages(sg_page(&scatterlist[j]), order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813 kfree(sglist);
3814 return NULL;
3815 }
3816
Jens Axboe642f1492007-10-24 11:20:47 +02003817 sg_set_page(&scatterlist[i], page, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818 }
3819
3820 return sglist;
3821}
3822
3823/**
3824 * ipr_free_ucode_buffer - Frees a microcode download buffer
3825 * @p_dnld: scatter/gather list pointer
3826 *
3827 * Free a DMA'able ucode download buffer previously allocated with
3828 * ipr_alloc_ucode_buffer
3829 *
3830 * Return value:
3831 * nothing
3832 **/
3833static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3834{
3835 int i;
3836
3837 for (i = 0; i < sglist->num_sg; i++)
Jens Axboe45711f12007-10-22 21:19:53 +02003838 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839
3840 kfree(sglist);
3841}
3842
3843/**
3844 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3845 * @sglist: scatter/gather list pointer
3846 * @buffer: buffer pointer
3847 * @len: buffer length
3848 *
3849 * Copy a microcode image from a user buffer into a buffer allocated by
3850 * ipr_alloc_ucode_buffer
3851 *
3852 * Return value:
3853 * 0 on success / other on failure
3854 **/
3855static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3856 u8 *buffer, u32 len)
3857{
3858 int bsize_elem, i, result = 0;
3859 struct scatterlist *scatterlist;
3860 void *kaddr;
3861
3862 /* Determine the actual number of bytes per element */
3863 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3864
3865 scatterlist = sglist->scatterlist;
3866
3867 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003868 struct page *page = sg_page(&scatterlist[i]);
3869
3870 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003872 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873
3874 scatterlist[i].length = bsize_elem;
3875
3876 if (result != 0) {
3877 ipr_trace;
3878 return result;
3879 }
3880 }
3881
3882 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003883 struct page *page = sg_page(&scatterlist[i]);
3884
3885 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003887 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888
3889 scatterlist[i].length = len % bsize_elem;
3890 }
3891
3892 sglist->buffer_len = len;
3893 return result;
3894}
3895
3896/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003897 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3898 * @ipr_cmd: ipr command struct
3899 * @sglist: scatter/gather list
3900 *
3901 * Builds a microcode download IOA data list (IOADL).
3902 *
3903 **/
3904static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3905 struct ipr_sglist *sglist)
3906{
3907 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3908 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3909 struct scatterlist *scatterlist = sglist->scatterlist;
3910 int i;
3911
3912 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3913 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3914 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3915
3916 ioarcb->ioadl_len =
3917 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3918 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3919 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3920 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3921 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3922 }
3923
3924 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3925}
3926
3927/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003928 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003929 * @ipr_cmd: ipr command struct
3930 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003932 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003935static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3936 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003938 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003939 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940 struct scatterlist *scatterlist = sglist->scatterlist;
3941 int i;
3942
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003943 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08003945 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3946
3947 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3949
3950 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3951 ioadl[i].flags_and_data_len =
3952 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3953 ioadl[i].address =
3954 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3955 }
3956
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003957 ioadl[i-1].flags_and_data_len |=
3958 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3959}
3960
3961/**
3962 * ipr_update_ioa_ucode - Update IOA's microcode
3963 * @ioa_cfg: ioa config struct
3964 * @sglist: scatter/gather list
3965 *
3966 * Initiate an adapter reset to update the IOA's microcode
3967 *
3968 * Return value:
3969 * 0 on success / -EIO on failure
3970 **/
3971static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3972 struct ipr_sglist *sglist)
3973{
3974 unsigned long lock_flags;
3975
3976 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003977 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003978 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3979 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3980 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3981 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003982
3983 if (ioa_cfg->ucode_sglist) {
3984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3985 dev_err(&ioa_cfg->pdev->dev,
3986 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987 return -EIO;
3988 }
3989
Anton Blanchardd73341b2014-10-30 17:27:08 -05003990 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3991 sglist->scatterlist, sglist->num_sg,
3992 DMA_TO_DEVICE);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003993
3994 if (!sglist->num_dma_sg) {
3995 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3996 dev_err(&ioa_cfg->pdev->dev,
3997 "Failed to map microcode download buffer!\n");
3998 return -EIO;
3999 }
4000
4001 ioa_cfg->ucode_sglist = sglist;
4002 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4003 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4004 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4005
4006 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4007 ioa_cfg->ucode_sglist = NULL;
4008 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009 return 0;
4010}
4011
4012/**
4013 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01004014 * @class_dev: device struct
4015 * @buf: buffer
4016 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017 *
4018 * This function will update the firmware on the adapter.
4019 *
4020 * Return value:
4021 * count on success / other on failure
4022 **/
Tony Jonesee959b02008-02-22 00:13:36 +01004023static ssize_t ipr_store_update_fw(struct device *dev,
4024 struct device_attribute *attr,
4025 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026{
Tony Jonesee959b02008-02-22 00:13:36 +01004027 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4029 struct ipr_ucode_image_header *image_hdr;
4030 const struct firmware *fw_entry;
4031 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032 char fname[100];
4033 char *src;
Gabriel Krisman Bertazi21b81712016-02-25 13:54:20 -03004034 char *endline;
Insu Yund63c7dd2016-01-06 12:44:01 -05004035 int result, dnld_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036
4037 if (!capable(CAP_SYS_ADMIN))
4038 return -EACCES;
4039
Insu Yund63c7dd2016-01-06 12:44:01 -05004040 snprintf(fname, sizeof(fname), "%s", buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041
Gabriel Krisman Bertazi21b81712016-02-25 13:54:20 -03004042 endline = strchr(fname, '\n');
4043 if (endline)
4044 *endline = '\0';
4045
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004046 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4048 return -EIO;
4049 }
4050
4051 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4052
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4054 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4055 sglist = ipr_alloc_ucode_buffer(dnld_size);
4056
4057 if (!sglist) {
4058 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4059 release_firmware(fw_entry);
4060 return -ENOMEM;
4061 }
4062
4063 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4064
4065 if (result) {
4066 dev_err(&ioa_cfg->pdev->dev,
4067 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004068 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069 }
4070
Wayne Boyer14ed9cc2011-10-03 20:54:37 -07004071 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4072
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004073 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004075 if (!result)
4076 result = count;
4077out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078 ipr_free_ucode_buffer(sglist);
4079 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004080 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081}
4082
Tony Jonesee959b02008-02-22 00:13:36 +01004083static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084 .attr = {
4085 .name = "update_fw",
4086 .mode = S_IWUSR,
4087 },
4088 .store = ipr_store_update_fw
4089};
4090
Wayne Boyer75576bb2010-07-14 10:50:14 -07004091/**
4092 * ipr_show_fw_type - Show the adapter's firmware type.
4093 * @dev: class device struct
4094 * @buf: buffer
4095 *
4096 * Return value:
4097 * number of bytes printed to buffer
4098 **/
4099static ssize_t ipr_show_fw_type(struct device *dev,
4100 struct device_attribute *attr, char *buf)
4101{
4102 struct Scsi_Host *shost = class_to_shost(dev);
4103 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4104 unsigned long lock_flags = 0;
4105 int len;
4106
4107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4108 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4110 return len;
4111}
4112
4113static struct device_attribute ipr_ioa_fw_type_attr = {
4114 .attr = {
4115 .name = "fw_type",
4116 .mode = S_IRUGO,
4117 },
4118 .show = ipr_show_fw_type
4119};
4120
Brian Kingafc3f832016-08-24 12:56:51 -05004121static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4122 struct bin_attribute *bin_attr, char *buf,
4123 loff_t off, size_t count)
4124{
4125 struct device *cdev = container_of(kobj, struct device, kobj);
4126 struct Scsi_Host *shost = class_to_shost(cdev);
4127 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4128 struct ipr_hostrcb *hostrcb;
4129 unsigned long lock_flags = 0;
4130 int ret;
4131
4132 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4133 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4134 struct ipr_hostrcb, queue);
4135 if (!hostrcb) {
4136 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4137 return 0;
4138 }
4139 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4140 sizeof(hostrcb->hcam));
4141 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4142 return ret;
4143}
4144
4145static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4146 struct bin_attribute *bin_attr, char *buf,
4147 loff_t off, size_t count)
4148{
4149 struct device *cdev = container_of(kobj, struct device, kobj);
4150 struct Scsi_Host *shost = class_to_shost(cdev);
4151 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4152 struct ipr_hostrcb *hostrcb;
4153 unsigned long lock_flags = 0;
4154
4155 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4156 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4157 struct ipr_hostrcb, queue);
4158 if (!hostrcb) {
4159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4160 return count;
4161 }
4162
4163 /* Reclaim hostrcb before exit */
4164 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4165 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4166 return count;
4167}
4168
4169static struct bin_attribute ipr_ioa_async_err_log = {
4170 .attr = {
4171 .name = "async_err_log",
4172 .mode = S_IRUGO | S_IWUSR,
4173 },
4174 .size = 0,
4175 .read = ipr_read_async_err_log,
4176 .write = ipr_next_async_err_log
4177};
4178
Tony Jonesee959b02008-02-22 00:13:36 +01004179static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180 &ipr_fw_version_attr,
4181 &ipr_log_level_attr,
4182 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06004183 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184 &ipr_ioa_reset_attr,
4185 &ipr_update_fw_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004186 &ipr_ioa_fw_type_attr,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06004187 &ipr_iopoll_weight_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 NULL,
4189};
4190
4191#ifdef CONFIG_SCSI_IPR_DUMP
4192/**
4193 * ipr_read_dump - Dump the adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004194 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004196 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197 * @buf: buffer
4198 * @off: offset
4199 * @count: buffer size
4200 *
4201 * Return value:
4202 * number of bytes printed to buffer
4203 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004204static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004205 struct bin_attribute *bin_attr,
4206 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207{
Tony Jonesee959b02008-02-22 00:13:36 +01004208 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 struct Scsi_Host *shost = class_to_shost(cdev);
4210 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4211 struct ipr_dump *dump;
4212 unsigned long lock_flags = 0;
4213 char *src;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004214 int len, sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 size_t rc = count;
4216
4217 if (!capable(CAP_SYS_ADMIN))
4218 return -EACCES;
4219
4220 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4221 dump = ioa_cfg->dump;
4222
4223 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4224 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4225 return 0;
4226 }
4227 kref_get(&dump->kref);
4228 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4229
4230 if (off > dump->driver_dump.hdr.len) {
4231 kref_put(&dump->kref, ipr_release_dump);
4232 return 0;
4233 }
4234
4235 if (off + count > dump->driver_dump.hdr.len) {
4236 count = dump->driver_dump.hdr.len - off;
4237 rc = count;
4238 }
4239
4240 if (count && off < sizeof(dump->driver_dump)) {
4241 if (off + count > sizeof(dump->driver_dump))
4242 len = sizeof(dump->driver_dump) - off;
4243 else
4244 len = count;
4245 src = (u8 *)&dump->driver_dump + off;
4246 memcpy(buf, src, len);
4247 buf += len;
4248 off += len;
4249 count -= len;
4250 }
4251
4252 off -= sizeof(dump->driver_dump);
4253
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004254 if (ioa_cfg->sis64)
4255 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4256 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4257 sizeof(struct ipr_sdt_entry));
4258 else
4259 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4260 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4261
4262 if (count && off < sdt_end) {
4263 if (off + count > sdt_end)
4264 len = sdt_end - off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265 else
4266 len = count;
4267 src = (u8 *)&dump->ioa_dump + off;
4268 memcpy(buf, src, len);
4269 buf += len;
4270 off += len;
4271 count -= len;
4272 }
4273
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004274 off -= sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275
4276 while (count) {
4277 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4278 len = PAGE_ALIGN(off) - off;
4279 else
4280 len = count;
4281 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4282 src += off & ~PAGE_MASK;
4283 memcpy(buf, src, len);
4284 buf += len;
4285 off += len;
4286 count -= len;
4287 }
4288
4289 kref_put(&dump->kref, ipr_release_dump);
4290 return rc;
4291}
4292
4293/**
4294 * ipr_alloc_dump - Prepare for adapter dump
4295 * @ioa_cfg: ioa config struct
4296 *
4297 * Return value:
4298 * 0 on success / other on failure
4299 **/
4300static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4301{
4302 struct ipr_dump *dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004303 __be32 **ioa_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304 unsigned long lock_flags = 0;
4305
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06004306 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307
4308 if (!dump) {
4309 ipr_err("Dump memory allocation failed\n");
4310 return -ENOMEM;
4311 }
4312
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004313 if (ioa_cfg->sis64)
4314 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4315 else
4316 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4317
4318 if (!ioa_data) {
4319 ipr_err("Dump memory allocation failed\n");
4320 kfree(dump);
4321 return -ENOMEM;
4322 }
4323
4324 dump->ioa_dump.ioa_data = ioa_data;
4325
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326 kref_init(&dump->kref);
4327 dump->ioa_cfg = ioa_cfg;
4328
4329 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4330
4331 if (INACTIVE != ioa_cfg->sdt_state) {
4332 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004333 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334 kfree(dump);
4335 return 0;
4336 }
4337
4338 ioa_cfg->dump = dump;
4339 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004340 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341 ioa_cfg->dump_taken = 1;
4342 schedule_work(&ioa_cfg->work_q);
4343 }
4344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4345
Linus Torvalds1da177e2005-04-16 15:20:36 -07004346 return 0;
4347}
4348
4349/**
4350 * ipr_free_dump - Free adapter dump memory
4351 * @ioa_cfg: ioa config struct
4352 *
4353 * Return value:
4354 * 0 on success / other on failure
4355 **/
4356static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4357{
4358 struct ipr_dump *dump;
4359 unsigned long lock_flags = 0;
4360
4361 ENTER;
4362
4363 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4364 dump = ioa_cfg->dump;
4365 if (!dump) {
4366 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4367 return 0;
4368 }
4369
4370 ioa_cfg->dump = NULL;
4371 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4372
4373 kref_put(&dump->kref, ipr_release_dump);
4374
4375 LEAVE;
4376 return 0;
4377}
4378
4379/**
4380 * ipr_write_dump - Setup dump state of adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004381 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004382 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004383 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384 * @buf: buffer
4385 * @off: offset
4386 * @count: buffer size
4387 *
4388 * Return value:
4389 * number of bytes printed to buffer
4390 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004391static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004392 struct bin_attribute *bin_attr,
4393 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394{
Tony Jonesee959b02008-02-22 00:13:36 +01004395 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396 struct Scsi_Host *shost = class_to_shost(cdev);
4397 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4398 int rc;
4399
4400 if (!capable(CAP_SYS_ADMIN))
4401 return -EACCES;
4402
4403 if (buf[0] == '1')
4404 rc = ipr_alloc_dump(ioa_cfg);
4405 else if (buf[0] == '0')
4406 rc = ipr_free_dump(ioa_cfg);
4407 else
4408 return -EINVAL;
4409
4410 if (rc)
4411 return rc;
4412 else
4413 return count;
4414}
4415
4416static struct bin_attribute ipr_dump_attr = {
4417 .attr = {
4418 .name = "dump",
4419 .mode = S_IRUSR | S_IWUSR,
4420 },
4421 .size = 0,
4422 .read = ipr_read_dump,
4423 .write = ipr_write_dump
4424};
4425#else
4426static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4427#endif
4428
4429/**
4430 * ipr_change_queue_depth - Change the device's queue depth
4431 * @sdev: scsi device struct
4432 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07004433 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434 *
4435 * Return value:
4436 * actual depth set
4437 **/
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004438static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439{
Brian King35a39692006-09-25 12:39:20 -05004440 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4441 struct ipr_resource_entry *res;
4442 unsigned long lock_flags = 0;
4443
4444 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4445 res = (struct ipr_resource_entry *)sdev->hostdata;
4446
4447 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4448 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4449 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4450
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004451 scsi_change_queue_depth(sdev, qdepth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452 return sdev->queue_depth;
4453}
4454
4455/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4457 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004458 * @attr: device attribute structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459 * @buf: buffer
4460 *
4461 * Return value:
4462 * number of bytes printed to buffer
4463 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04004464static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465{
4466 struct scsi_device *sdev = to_scsi_device(dev);
4467 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4468 struct ipr_resource_entry *res;
4469 unsigned long lock_flags = 0;
4470 ssize_t len = -ENXIO;
4471
4472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4473 res = (struct ipr_resource_entry *)sdev->hostdata;
4474 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004475 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4477 return len;
4478}
4479
4480static struct device_attribute ipr_adapter_handle_attr = {
4481 .attr = {
4482 .name = "adapter_handle",
4483 .mode = S_IRUSR,
4484 },
4485 .show = ipr_show_adapter_handle
4486};
4487
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004488/**
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004489 * ipr_show_resource_path - Show the resource path or the resource address for
4490 * this device.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004491 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004492 * @attr: device attribute structure
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004493 * @buf: buffer
4494 *
4495 * Return value:
4496 * number of bytes printed to buffer
4497 **/
4498static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4499{
4500 struct scsi_device *sdev = to_scsi_device(dev);
4501 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4502 struct ipr_resource_entry *res;
4503 unsigned long lock_flags = 0;
4504 ssize_t len = -ENXIO;
4505 char buffer[IPR_MAX_RES_PATH_LENGTH];
4506
4507 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4508 res = (struct ipr_resource_entry *)sdev->hostdata;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004509 if (res && ioa_cfg->sis64)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004510 len = snprintf(buf, PAGE_SIZE, "%s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004511 __ipr_format_res_path(res->res_path, buffer,
4512 sizeof(buffer)));
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004513 else if (res)
4514 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4515 res->bus, res->target, res->lun);
4516
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004517 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4518 return len;
4519}
4520
4521static struct device_attribute ipr_resource_path_attr = {
4522 .attr = {
4523 .name = "resource_path",
Wayne Boyer75576bb2010-07-14 10:50:14 -07004524 .mode = S_IRUGO,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004525 },
4526 .show = ipr_show_resource_path
4527};
4528
Wayne Boyer75576bb2010-07-14 10:50:14 -07004529/**
Wayne Boyer46d74562010-08-11 07:15:17 -07004530 * ipr_show_device_id - Show the device_id for this device.
4531 * @dev: device struct
4532 * @attr: device attribute structure
4533 * @buf: buffer
4534 *
4535 * Return value:
4536 * number of bytes printed to buffer
4537 **/
4538static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4539{
4540 struct scsi_device *sdev = to_scsi_device(dev);
4541 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4542 struct ipr_resource_entry *res;
4543 unsigned long lock_flags = 0;
4544 ssize_t len = -ENXIO;
4545
4546 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4547 res = (struct ipr_resource_entry *)sdev->hostdata;
4548 if (res && ioa_cfg->sis64)
Wen Xiongbb8647e2015-06-11 20:45:18 -05004549 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
Wayne Boyer46d74562010-08-11 07:15:17 -07004550 else if (res)
4551 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4552
4553 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4554 return len;
4555}
4556
4557static struct device_attribute ipr_device_id_attr = {
4558 .attr = {
4559 .name = "device_id",
4560 .mode = S_IRUGO,
4561 },
4562 .show = ipr_show_device_id
4563};
4564
4565/**
Wayne Boyer75576bb2010-07-14 10:50:14 -07004566 * ipr_show_resource_type - Show the resource type for this device.
4567 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004568 * @attr: device attribute structure
Wayne Boyer75576bb2010-07-14 10:50:14 -07004569 * @buf: buffer
4570 *
4571 * Return value:
4572 * number of bytes printed to buffer
4573 **/
4574static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4575{
4576 struct scsi_device *sdev = to_scsi_device(dev);
4577 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4578 struct ipr_resource_entry *res;
4579 unsigned long lock_flags = 0;
4580 ssize_t len = -ENXIO;
4581
4582 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4583 res = (struct ipr_resource_entry *)sdev->hostdata;
4584
4585 if (res)
4586 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4587
4588 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4589 return len;
4590}
4591
4592static struct device_attribute ipr_resource_type_attr = {
4593 .attr = {
4594 .name = "resource_type",
4595 .mode = S_IRUGO,
4596 },
4597 .show = ipr_show_resource_type
4598};
4599
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004600/**
4601 * ipr_show_raw_mode - Show the adapter's raw mode
4602 * @dev: class device struct
4603 * @buf: buffer
4604 *
4605 * Return value:
4606 * number of bytes printed to buffer
4607 **/
4608static ssize_t ipr_show_raw_mode(struct device *dev,
4609 struct device_attribute *attr, char *buf)
4610{
4611 struct scsi_device *sdev = to_scsi_device(dev);
4612 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4613 struct ipr_resource_entry *res;
4614 unsigned long lock_flags = 0;
4615 ssize_t len;
4616
4617 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4618 res = (struct ipr_resource_entry *)sdev->hostdata;
4619 if (res)
4620 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4621 else
4622 len = -ENXIO;
4623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4624 return len;
4625}
4626
4627/**
4628 * ipr_store_raw_mode - Change the adapter's raw mode
4629 * @dev: class device struct
4630 * @buf: buffer
4631 *
4632 * Return value:
4633 * number of bytes printed to buffer
4634 **/
4635static ssize_t ipr_store_raw_mode(struct device *dev,
4636 struct device_attribute *attr,
4637 const char *buf, size_t count)
4638{
4639 struct scsi_device *sdev = to_scsi_device(dev);
4640 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4641 struct ipr_resource_entry *res;
4642 unsigned long lock_flags = 0;
4643 ssize_t len;
4644
4645 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4646 res = (struct ipr_resource_entry *)sdev->hostdata;
4647 if (res) {
Gabriel Krisman Bertazie35d7f272015-08-19 11:47:06 -03004648 if (ipr_is_af_dasd_device(res)) {
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004649 res->raw_mode = simple_strtoul(buf, NULL, 10);
4650 len = strlen(buf);
4651 if (res->sdev)
4652 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4653 res->raw_mode ? "enabled" : "disabled");
4654 } else
4655 len = -EINVAL;
4656 } else
4657 len = -ENXIO;
4658 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4659 return len;
4660}
4661
4662static struct device_attribute ipr_raw_mode_attr = {
4663 .attr = {
4664 .name = "raw_mode",
4665 .mode = S_IRUGO | S_IWUSR,
4666 },
4667 .show = ipr_show_raw_mode,
4668 .store = ipr_store_raw_mode
4669};
4670
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671static struct device_attribute *ipr_dev_attrs[] = {
4672 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004673 &ipr_resource_path_attr,
Wayne Boyer46d74562010-08-11 07:15:17 -07004674 &ipr_device_id_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004675 &ipr_resource_type_attr,
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004676 &ipr_raw_mode_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677 NULL,
4678};
4679
4680/**
4681 * ipr_biosparam - Return the HSC mapping
4682 * @sdev: scsi device struct
4683 * @block_device: block device pointer
4684 * @capacity: capacity of the device
4685 * @parm: Array containing returned HSC values.
4686 *
4687 * This function generates the HSC parms that fdisk uses.
4688 * We want to make sure we return something that places partitions
4689 * on 4k boundaries for best performance with the IOA.
4690 *
4691 * Return value:
4692 * 0 on success
4693 **/
4694static int ipr_biosparam(struct scsi_device *sdev,
4695 struct block_device *block_device,
4696 sector_t capacity, int *parm)
4697{
4698 int heads, sectors;
4699 sector_t cylinders;
4700
4701 heads = 128;
4702 sectors = 32;
4703
4704 cylinders = capacity;
4705 sector_div(cylinders, (128 * 32));
4706
4707 /* return result */
4708 parm[0] = heads;
4709 parm[1] = sectors;
4710 parm[2] = cylinders;
4711
4712 return 0;
4713}
4714
4715/**
Brian King35a39692006-09-25 12:39:20 -05004716 * ipr_find_starget - Find target based on bus/target.
4717 * @starget: scsi target struct
4718 *
4719 * Return value:
4720 * resource entry pointer if found / NULL if not found
4721 **/
4722static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4723{
4724 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4725 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4726 struct ipr_resource_entry *res;
4727
4728 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004729 if ((res->bus == starget->channel) &&
Brian King0ee1d712012-03-14 21:20:06 -05004730 (res->target == starget->id)) {
Brian King35a39692006-09-25 12:39:20 -05004731 return res;
4732 }
4733 }
4734
4735 return NULL;
4736}
4737
4738static struct ata_port_info sata_port_info;
4739
4740/**
4741 * ipr_target_alloc - Prepare for commands to a SCSI target
4742 * @starget: scsi target struct
4743 *
4744 * If the device is a SATA device, this function allocates an
4745 * ATA port with libata, else it does nothing.
4746 *
4747 * Return value:
4748 * 0 on success / non-0 on failure
4749 **/
4750static int ipr_target_alloc(struct scsi_target *starget)
4751{
4752 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4753 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4754 struct ipr_sata_port *sata_port;
4755 struct ata_port *ap;
4756 struct ipr_resource_entry *res;
4757 unsigned long lock_flags;
4758
4759 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4760 res = ipr_find_starget(starget);
4761 starget->hostdata = NULL;
4762
4763 if (res && ipr_is_gata(res)) {
4764 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4765 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4766 if (!sata_port)
4767 return -ENOMEM;
4768
4769 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4770 if (ap) {
4771 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4772 sata_port->ioa_cfg = ioa_cfg;
4773 sata_port->ap = ap;
4774 sata_port->res = res;
4775
4776 res->sata_port = sata_port;
4777 ap->private_data = sata_port;
4778 starget->hostdata = sata_port;
4779 } else {
4780 kfree(sata_port);
4781 return -ENOMEM;
4782 }
4783 }
4784 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4785
4786 return 0;
4787}
4788
4789/**
4790 * ipr_target_destroy - Destroy a SCSI target
4791 * @starget: scsi target struct
4792 *
4793 * If the device was a SATA device, this function frees the libata
4794 * ATA port, else it does nothing.
4795 *
4796 **/
4797static void ipr_target_destroy(struct scsi_target *starget)
4798{
4799 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004800 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4801 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4802
4803 if (ioa_cfg->sis64) {
Brian King0ee1d712012-03-14 21:20:06 -05004804 if (!ipr_find_starget(starget)) {
4805 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4806 clear_bit(starget->id, ioa_cfg->array_ids);
4807 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4808 clear_bit(starget->id, ioa_cfg->vset_ids);
4809 else if (starget->channel == 0)
4810 clear_bit(starget->id, ioa_cfg->target_ids);
4811 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004812 }
Brian King35a39692006-09-25 12:39:20 -05004813
4814 if (sata_port) {
4815 starget->hostdata = NULL;
4816 ata_sas_port_destroy(sata_port->ap);
4817 kfree(sata_port);
4818 }
4819}
4820
4821/**
4822 * ipr_find_sdev - Find device based on bus/target/lun.
4823 * @sdev: scsi device struct
4824 *
4825 * Return value:
4826 * resource entry pointer if found / NULL if not found
4827 **/
4828static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4829{
4830 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4831 struct ipr_resource_entry *res;
4832
4833 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004834 if ((res->bus == sdev->channel) &&
4835 (res->target == sdev->id) &&
4836 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004837 return res;
4838 }
4839
4840 return NULL;
4841}
4842
4843/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004844 * ipr_slave_destroy - Unconfigure a SCSI device
4845 * @sdev: scsi device struct
4846 *
4847 * Return value:
4848 * nothing
4849 **/
4850static void ipr_slave_destroy(struct scsi_device *sdev)
4851{
4852 struct ipr_resource_entry *res;
4853 struct ipr_ioa_cfg *ioa_cfg;
4854 unsigned long lock_flags = 0;
4855
4856 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4857
4858 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4859 res = (struct ipr_resource_entry *) sdev->hostdata;
4860 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004861 if (res->sata_port)
Tejun Heo3e4ec342010-05-10 21:41:30 +02004862 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863 sdev->hostdata = NULL;
4864 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004865 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866 }
4867 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4868}
4869
4870/**
4871 * ipr_slave_configure - Configure a SCSI device
4872 * @sdev: scsi device struct
4873 *
4874 * This function configures the specified scsi device.
4875 *
4876 * Return value:
4877 * 0 on success
4878 **/
4879static int ipr_slave_configure(struct scsi_device *sdev)
4880{
4881 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4882 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004883 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004885 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886
4887 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4888 res = sdev->hostdata;
4889 if (res) {
4890 if (ipr_is_af_dasd_device(res))
4891 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004892 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004894 sdev->no_uld_attach = 1;
4895 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004896 if (ipr_is_vset_device(res)) {
Brian King60654e22014-12-02 12:47:46 -06004897 sdev->scsi_level = SCSI_SPC_3;
Jens Axboe242f9dc2008-09-14 05:55:09 -07004898 blk_queue_rq_timeout(sdev->request_queue,
4899 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004900 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004901 }
Brian Kingdd406ef2009-04-22 08:58:02 -05004902 if (ipr_is_gata(res) && res->sata_port)
4903 ap = res->sata_port->ap;
4904 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4905
4906 if (ap) {
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004907 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004908 ata_sas_slave_configure(sdev, ap);
Christoph Hellwigc8b09f62014-11-03 20:15:14 +01004909 }
4910
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004911 if (ioa_cfg->sis64)
4912 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004913 ipr_format_res_path(ioa_cfg,
4914 res->res_path, buffer, sizeof(buffer)));
Brian Kingdd406ef2009-04-22 08:58:02 -05004915 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004916 }
4917 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4918 return 0;
4919}
4920
4921/**
Brian King35a39692006-09-25 12:39:20 -05004922 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4923 * @sdev: scsi device struct
4924 *
4925 * This function initializes an ATA port so that future commands
4926 * sent through queuecommand will work.
4927 *
4928 * Return value:
4929 * 0 on success
4930 **/
4931static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4932{
4933 struct ipr_sata_port *sata_port = NULL;
4934 int rc = -ENXIO;
4935
4936 ENTER;
4937 if (sdev->sdev_target)
4938 sata_port = sdev->sdev_target->hostdata;
Dan Williamsb2024452012-03-21 21:09:07 -07004939 if (sata_port) {
Brian King35a39692006-09-25 12:39:20 -05004940 rc = ata_sas_port_init(sata_port->ap);
Dan Williamsb2024452012-03-21 21:09:07 -07004941 if (rc == 0)
4942 rc = ata_sas_sync_probe(sata_port->ap);
4943 }
4944
Brian King35a39692006-09-25 12:39:20 -05004945 if (rc)
4946 ipr_slave_destroy(sdev);
4947
4948 LEAVE;
4949 return rc;
4950}
4951
4952/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953 * ipr_slave_alloc - Prepare for commands to a device.
4954 * @sdev: scsi device struct
4955 *
4956 * This function saves a pointer to the resource entry
4957 * in the scsi device struct if the device exists. We
4958 * can then use this pointer in ipr_queuecommand when
4959 * handling new commands.
4960 *
4961 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004962 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07004963 **/
4964static int ipr_slave_alloc(struct scsi_device *sdev)
4965{
4966 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4967 struct ipr_resource_entry *res;
4968 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004969 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004970
4971 sdev->hostdata = NULL;
4972
4973 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4974
Brian King35a39692006-09-25 12:39:20 -05004975 res = ipr_find_sdev(sdev);
4976 if (res) {
4977 res->sdev = sdev;
4978 res->add_to_ml = 0;
4979 res->in_erp = 0;
4980 sdev->hostdata = res;
4981 if (!ipr_is_naca_model(res))
4982 res->needs_sync_complete = 1;
4983 rc = 0;
4984 if (ipr_is_gata(res)) {
4985 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4986 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987 }
4988 }
4989
4990 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4991
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004992 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993}
4994
Brian King6cdb0812014-10-30 17:27:10 -05004995/**
4996 * ipr_match_lun - Match function for specified LUN
4997 * @ipr_cmd: ipr command struct
4998 * @device: device to match (sdev)
4999 *
5000 * Returns:
5001 * 1 if command matches sdev / 0 if command does not match sdev
5002 **/
5003static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5004{
5005 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5006 return 1;
5007 return 0;
5008}
5009
5010/**
5011 * ipr_wait_for_ops - Wait for matching commands to complete
5012 * @ipr_cmd: ipr command struct
5013 * @device: device to match (sdev)
5014 * @match: match function to use
5015 *
5016 * Returns:
5017 * SUCCESS / FAILED
5018 **/
5019static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5020 int (*match)(struct ipr_cmnd *, void *))
5021{
5022 struct ipr_cmnd *ipr_cmd;
5023 int wait;
5024 unsigned long flags;
5025 struct ipr_hrr_queue *hrrq;
5026 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5027 DECLARE_COMPLETION_ONSTACK(comp);
5028
5029 ENTER;
5030 do {
5031 wait = 0;
5032
5033 for_each_hrrq(hrrq, ioa_cfg) {
5034 spin_lock_irqsave(hrrq->lock, flags);
5035 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5036 if (match(ipr_cmd, device)) {
5037 ipr_cmd->eh_comp = &comp;
5038 wait++;
5039 }
5040 }
5041 spin_unlock_irqrestore(hrrq->lock, flags);
5042 }
5043
5044 if (wait) {
5045 timeout = wait_for_completion_timeout(&comp, timeout);
5046
5047 if (!timeout) {
5048 wait = 0;
5049
5050 for_each_hrrq(hrrq, ioa_cfg) {
5051 spin_lock_irqsave(hrrq->lock, flags);
5052 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5053 if (match(ipr_cmd, device)) {
5054 ipr_cmd->eh_comp = NULL;
5055 wait++;
5056 }
5057 }
5058 spin_unlock_irqrestore(hrrq->lock, flags);
5059 }
5060
5061 if (wait)
5062 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5063 LEAVE;
5064 return wait ? FAILED : SUCCESS;
5065 }
5066 }
5067 } while (wait);
5068
5069 LEAVE;
5070 return SUCCESS;
5071}
5072
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005073static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005074{
5075 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005076 unsigned long lock_flags = 0;
5077 int rc = SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078
5079 ENTER;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005080 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5081 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005082
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05005083 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005084 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005085 dev_err(&ioa_cfg->pdev->dev,
5086 "Adapter being reset as a result of error recovery.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005088 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5089 ioa_cfg->sdt_state = GET_DUMP;
5090 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005092 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5093 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5094 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005096 /* If we got hit with a host reset while we were already resetting
5097 the adapter for some reason, and the reset failed. */
5098 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5099 ipr_trace;
5100 rc = FAILED;
5101 }
5102
5103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104 LEAVE;
5105 return rc;
5106}
5107
5108/**
Brian Kingc6513092006-03-29 09:37:43 -06005109 * ipr_device_reset - Reset the device
5110 * @ioa_cfg: ioa config struct
5111 * @res: resource entry struct
5112 *
5113 * This function issues a device reset to the affected device.
5114 * If the device is a SCSI device, a LUN reset will be sent
5115 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05005116 * will be sent. If the device is a SATA device, a PHY reset will
5117 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06005118 *
5119 * Return value:
5120 * 0 on success / non-zero on failure
5121 **/
5122static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5123 struct ipr_resource_entry *res)
5124{
5125 struct ipr_cmnd *ipr_cmd;
5126 struct ipr_ioarcb *ioarcb;
5127 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05005128 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06005129 u32 ioasc;
5130
5131 ENTER;
5132 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5133 ioarcb = &ipr_cmd->ioarcb;
5134 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08005135
5136 if (ipr_cmd->ioa_cfg->sis64) {
5137 regs = &ipr_cmd->i.ata_ioadl.regs;
5138 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5139 } else
5140 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06005141
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005142 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06005143 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5144 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05005145 if (ipr_is_gata(res)) {
5146 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08005147 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05005148 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5149 }
Brian Kingc6513092006-03-29 09:37:43 -06005150
5151 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005152 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005153 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005154 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5155 if (ipr_cmd->ioa_cfg->sis64)
5156 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5157 sizeof(struct ipr_ioasa_gata));
5158 else
5159 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5160 sizeof(struct ipr_ioasa_gata));
5161 }
Brian Kingc6513092006-03-29 09:37:43 -06005162
5163 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005164 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
Brian Kingc6513092006-03-29 09:37:43 -06005165}
5166
5167/**
Brian King35a39692006-09-25 12:39:20 -05005168 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09005169 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05005170 * @classes: class of the attached device
5171 *
Tejun Heocc0680a2007-08-06 18:36:23 +09005172 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05005173 *
5174 * Return value:
5175 * 0 on success / non-zero on failure
5176 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09005177static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07005178 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05005179{
Tejun Heocc0680a2007-08-06 18:36:23 +09005180 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05005181 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5182 struct ipr_resource_entry *res;
5183 unsigned long lock_flags = 0;
5184 int rc = -ENXIO;
5185
5186 ENTER;
5187 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005188 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06005189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5190 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5191 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5192 }
5193
Brian King35a39692006-09-25 12:39:20 -05005194 res = sata_port->res;
5195 if (res) {
5196 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005197 *classes = res->ata_class;
Brian King35a39692006-09-25 12:39:20 -05005198 }
5199
5200 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5201 LEAVE;
5202 return rc;
5203}
5204
5205/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005206 * ipr_eh_dev_reset - Reset the device
5207 * @scsi_cmd: scsi command struct
5208 *
5209 * This function issues a device reset to the affected device.
5210 * A LUN reset will be sent to the device first. If that does
5211 * not work, a target reset will be sent.
5212 *
5213 * Return value:
5214 * SUCCESS / FAILED
5215 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005216static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005217{
5218 struct ipr_cmnd *ipr_cmd;
5219 struct ipr_ioa_cfg *ioa_cfg;
5220 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05005221 struct ata_port *ap;
5222 int rc = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005223 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224
5225 ENTER;
5226 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5227 res = scsi_cmd->device->hostdata;
5228
brking@us.ibm.comeeb883072005-11-01 17:02:29 -06005229 if (!res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230 return FAILED;
5231
5232 /*
5233 * If we are currently going through reset/reload, return failed. This will force the
5234 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5235 * reset to complete
5236 */
5237 if (ioa_cfg->in_reset_reload)
5238 return FAILED;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005239 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005240 return FAILED;
5241
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005242 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005243 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005244 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5245 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5246 if (ipr_cmd->scsi_cmd)
5247 ipr_cmd->done = ipr_scsi_eh_done;
5248 if (ipr_cmd->qc)
5249 ipr_cmd->done = ipr_sata_eh_done;
5250 if (ipr_cmd->qc &&
5251 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5252 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5253 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5254 }
Brian King7402ece2006-11-21 10:28:23 -06005255 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005257 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005260 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05005261
5262 if (ipr_is_gata(res) && res->sata_port) {
5263 ap = res->sata_port->ap;
5264 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09005265 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05005266 spin_lock_irq(scsi_cmd->device->host->host_lock);
Brian King5af23d22007-05-09 15:36:35 -05005267
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005268 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005269 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005270 list_for_each_entry(ipr_cmd,
5271 &hrrq->hrrq_pending_q, queue) {
5272 if (ipr_cmd->ioarcb.res_handle ==
5273 res->res_handle) {
5274 rc = -EIO;
5275 break;
5276 }
Brian King5af23d22007-05-09 15:36:35 -05005277 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005278 spin_unlock(&hrrq->_lock);
Brian King5af23d22007-05-09 15:36:35 -05005279 }
Brian King35a39692006-09-25 12:39:20 -05005280 } else
5281 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06005283 res->reset_occurred = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005284
Linus Torvalds1da177e2005-04-16 15:20:36 -07005285 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005286 return rc ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005287}
5288
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005289static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005290{
5291 int rc;
Brian King6cdb0812014-10-30 17:27:10 -05005292 struct ipr_ioa_cfg *ioa_cfg;
5293
5294 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005295
5296 spin_lock_irq(cmd->device->host->host_lock);
5297 rc = __ipr_eh_dev_reset(cmd);
5298 spin_unlock_irq(cmd->device->host->host_lock);
5299
Brian King6cdb0812014-10-30 17:27:10 -05005300 if (rc == SUCCESS)
5301 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5302
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005303 return rc;
5304}
5305
Linus Torvalds1da177e2005-04-16 15:20:36 -07005306/**
5307 * ipr_bus_reset_done - Op done function for bus reset.
5308 * @ipr_cmd: ipr command struct
5309 *
5310 * This function is the op done function for a bus reset
5311 *
5312 * Return value:
5313 * none
5314 **/
5315static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5316{
5317 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5318 struct ipr_resource_entry *res;
5319
5320 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005321 if (!ioa_cfg->sis64)
5322 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5323 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5324 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5325 break;
5326 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328
5329 /*
5330 * If abort has not completed, indicate the reset has, else call the
5331 * abort's done function to wake the sleeping eh thread
5332 */
5333 if (ipr_cmd->sibling->sibling)
5334 ipr_cmd->sibling->sibling = NULL;
5335 else
5336 ipr_cmd->sibling->done(ipr_cmd->sibling);
5337
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005338 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005339 LEAVE;
5340}
5341
5342/**
5343 * ipr_abort_timeout - An abort task has timed out
5344 * @ipr_cmd: ipr command struct
5345 *
5346 * This function handles when an abort task times out. If this
5347 * happens we issue a bus reset since we have resources tied
5348 * up that must be freed before returning to the midlayer.
5349 *
5350 * Return value:
5351 * none
5352 **/
5353static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5354{
5355 struct ipr_cmnd *reset_cmd;
5356 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5357 struct ipr_cmd_pkt *cmd_pkt;
5358 unsigned long lock_flags = 0;
5359
5360 ENTER;
5361 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5362 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5363 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5364 return;
5365 }
5366
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005367 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005368 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5369 ipr_cmd->sibling = reset_cmd;
5370 reset_cmd->sibling = ipr_cmd;
5371 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5372 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5373 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5374 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5375 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5376
5377 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5379 LEAVE;
5380}
5381
5382/**
5383 * ipr_cancel_op - Cancel specified op
5384 * @scsi_cmd: scsi command struct
5385 *
5386 * This function cancels specified op.
5387 *
5388 * Return value:
5389 * SUCCESS / FAILED
5390 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005391static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005392{
5393 struct ipr_cmnd *ipr_cmd;
5394 struct ipr_ioa_cfg *ioa_cfg;
5395 struct ipr_resource_entry *res;
5396 struct ipr_cmd_pkt *cmd_pkt;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005397 u32 ioasc, int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005398 int op_found = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005399 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005400
5401 ENTER;
5402 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5403 res = scsi_cmd->device->hostdata;
5404
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005405 /* If we are currently going through reset/reload, return failed.
5406 * This will force the mid-layer to call ipr_eh_host_reset,
5407 * which will then go to sleep and wait for the reset to complete
5408 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005409 if (ioa_cfg->in_reset_reload ||
5410 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005411 return FAILED;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005412 if (!res)
5413 return FAILED;
5414
5415 /*
5416 * If we are aborting a timed out op, chances are that the timeout was caused
5417 * by a still not detected EEH error. In such cases, reading a register will
5418 * trigger the EEH recovery infrastructure.
5419 */
5420 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5421
5422 if (!ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423 return FAILED;
5424
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005425 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005426 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005427 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5428 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5429 ipr_cmd->done = ipr_scsi_eh_done;
5430 op_found = 1;
5431 break;
5432 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005434 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435 }
5436
5437 if (!op_found)
5438 return SUCCESS;
5439
5440 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005441 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005442 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5443 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5444 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5445 ipr_cmd->u.sdev = scsi_cmd->device;
5446
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005447 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5448 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005449 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005450 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005451
5452 /*
5453 * If the abort task timed out and we sent a bus reset, we will get
5454 * one the following responses to the abort
5455 */
5456 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5457 ioasc = 0;
5458 ipr_trace;
5459 }
5460
Kleber Sacilotto de Souzac4ee22a2013-03-14 13:52:23 -05005461 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005462 if (!ipr_is_naca_model(res))
5463 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005464
5465 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005466 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005467}
5468
5469/**
5470 * ipr_eh_abort - Abort a single op
5471 * @scsi_cmd: scsi command struct
5472 *
5473 * Return value:
Brian Kingf688f962014-12-02 12:47:37 -06005474 * 0 if scan in progress / 1 if scan is complete
5475 **/
5476static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5477{
5478 unsigned long lock_flags;
5479 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5480 int rc = 0;
5481
5482 spin_lock_irqsave(shost->host_lock, lock_flags);
5483 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5484 rc = 1;
5485 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5486 rc = 1;
5487 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5488 return rc;
5489}
5490
5491/**
5492 * ipr_eh_host_reset - Reset the host adapter
5493 * @scsi_cmd: scsi command struct
5494 *
5495 * Return value:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005496 * SUCCESS / FAILED
5497 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005498static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005499{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005500 unsigned long flags;
5501 int rc;
Brian King6cdb0812014-10-30 17:27:10 -05005502 struct ipr_ioa_cfg *ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005503
5504 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005505
Brian King6cdb0812014-10-30 17:27:10 -05005506 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5507
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005508 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5509 rc = ipr_cancel_op(scsi_cmd);
5510 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005511
Brian King6cdb0812014-10-30 17:27:10 -05005512 if (rc == SUCCESS)
5513 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005514 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005515 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005516}
5517
5518/**
5519 * ipr_handle_other_interrupt - Handle "other" interrupts
5520 * @ioa_cfg: ioa config struct
Wayne Boyer634651f2010-08-27 14:45:07 -07005521 * @int_reg: interrupt register
Linus Torvalds1da177e2005-04-16 15:20:36 -07005522 *
5523 * Return value:
5524 * IRQ_NONE / IRQ_HANDLED
5525 **/
Wayne Boyer634651f2010-08-27 14:45:07 -07005526static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer630ad8312011-04-07 12:12:30 -07005527 u32 int_reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005528{
5529 irqreturn_t rc = IRQ_HANDLED;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005530 u32 int_mask_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005531
Wayne Boyer7dacb642011-04-12 10:29:02 -07005532 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5533 int_reg &= ~int_mask_reg;
5534
5535 /* If an interrupt on the adapter did not occur, ignore it.
5536 * Or in the case of SIS 64, check for a stage change interrupt.
5537 */
5538 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5539 if (ioa_cfg->sis64) {
5540 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5541 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5542 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5543
5544 /* clear stage change */
5545 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5546 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5547 list_del(&ioa_cfg->reset_cmd->queue);
5548 del_timer(&ioa_cfg->reset_cmd->timer);
5549 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5550 return IRQ_HANDLED;
5551 }
5552 }
5553
5554 return IRQ_NONE;
5555 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005556
5557 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5558 /* Mask the interrupt */
5559 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005560 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5561
5562 list_del(&ioa_cfg->reset_cmd->queue);
5563 del_timer(&ioa_cfg->reset_cmd->timer);
5564 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005565 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
Brian King7dd21302012-03-14 21:20:08 -05005566 if (ioa_cfg->clear_isr) {
5567 if (ipr_debug && printk_ratelimit())
5568 dev_err(&ioa_cfg->pdev->dev,
5569 "Spurious interrupt detected. 0x%08X\n", int_reg);
5570 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5571 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5572 return IRQ_NONE;
5573 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005574 } else {
5575 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5576 ioa_cfg->ioa_unit_checked = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005577 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5578 dev_err(&ioa_cfg->pdev->dev,
5579 "No Host RRQ. 0x%08X\n", int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005580 else
5581 dev_err(&ioa_cfg->pdev->dev,
5582 "Permanent IOA failure. 0x%08X\n", int_reg);
5583
5584 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5585 ioa_cfg->sdt_state = GET_DUMP;
5586
5587 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5588 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5589 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005590
Linus Torvalds1da177e2005-04-16 15:20:36 -07005591 return rc;
5592}
5593
5594/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005595 * ipr_isr_eh - Interrupt service routine error handler
5596 * @ioa_cfg: ioa config struct
5597 * @msg: message to log
5598 *
5599 * Return value:
5600 * none
5601 **/
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005602static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005603{
5604 ioa_cfg->errors_logged++;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005605 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005606
5607 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5608 ioa_cfg->sdt_state = GET_DUMP;
5609
5610 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5611}
5612
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005613static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005614 struct list_head *doneq)
5615{
5616 u32 ioasc;
5617 u16 cmd_index;
5618 struct ipr_cmnd *ipr_cmd;
5619 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5620 int num_hrrq = 0;
5621
5622 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005623 if (!hrr_queue->allow_interrupts)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005624 return 0;
5625
5626 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5627 hrr_queue->toggle_bit) {
5628
5629 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5630 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5631 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5632
5633 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5634 cmd_index < hrr_queue->min_cmd_id)) {
5635 ipr_isr_eh(ioa_cfg,
5636 "Invalid response handle from IOA: ",
5637 cmd_index);
5638 break;
5639 }
5640
5641 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5642 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5643
5644 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5645
5646 list_move_tail(&ipr_cmd->queue, doneq);
5647
5648 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5649 hrr_queue->hrrq_curr++;
5650 } else {
5651 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5652 hrr_queue->toggle_bit ^= 1u;
5653 }
5654 num_hrrq++;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005655 if (budget > 0 && num_hrrq >= budget)
5656 break;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005657 }
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005658
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005659 return num_hrrq;
5660}
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005661
Christoph Hellwig511cbce2015-11-10 14:56:14 +01005662static int ipr_iopoll(struct irq_poll *iop, int budget)
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005663{
5664 struct ipr_ioa_cfg *ioa_cfg;
5665 struct ipr_hrr_queue *hrrq;
5666 struct ipr_cmnd *ipr_cmd, *temp;
5667 unsigned long hrrq_flags;
5668 int completed_ops;
5669 LIST_HEAD(doneq);
5670
5671 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5672 ioa_cfg = hrrq->ioa_cfg;
5673
5674 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5675 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5676
5677 if (completed_ops < budget)
Christoph Hellwig511cbce2015-11-10 14:56:14 +01005678 irq_poll_complete(iop);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005679 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5680
5681 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5682 list_del(&ipr_cmd->queue);
5683 del_timer(&ipr_cmd->timer);
5684 ipr_cmd->fast_done(ipr_cmd);
5685 }
5686
5687 return completed_ops;
5688}
5689
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005690/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005691 * ipr_isr - Interrupt service routine
5692 * @irq: irq number
5693 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005694 *
5695 * Return value:
5696 * IRQ_NONE / IRQ_HANDLED
5697 **/
David Howells7d12e782006-10-05 14:55:46 +01005698static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005699{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005700 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5701 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005702 unsigned long hrrq_flags = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005703 u32 int_reg = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005704 int num_hrrq = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005705 int irq_none = 0;
Brian King172cd6e2012-07-17 08:14:40 -05005706 struct ipr_cmnd *ipr_cmd, *temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005707 irqreturn_t rc = IRQ_NONE;
Brian King172cd6e2012-07-17 08:14:40 -05005708 LIST_HEAD(doneq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005709
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005710 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005711 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005712 if (!hrrq->allow_interrupts) {
5713 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005714 return IRQ_NONE;
5715 }
5716
Linus Torvalds1da177e2005-04-16 15:20:36 -07005717 while (1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005718 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5719 rc = IRQ_HANDLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005720
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005721 if (!ioa_cfg->clear_isr)
5722 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005723
Linus Torvalds1da177e2005-04-16 15:20:36 -07005724 /* Clear the PCI interrupt */
Wayne Boyera5442ba2011-05-17 09:18:53 -07005725 num_hrrq = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005726 do {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005727 writel(IPR_PCII_HRRQ_UPDATED,
5728 ioa_cfg->regs.clr_interrupt_reg32);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005729 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005730 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005731 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005732
Wayne Boyer7dacb642011-04-12 10:29:02 -07005733 } else if (rc == IRQ_NONE && irq_none == 0) {
5734 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5735 irq_none++;
Wayne Boyera5442ba2011-05-17 09:18:53 -07005736 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5737 int_reg & IPR_PCII_HRRQ_UPDATED) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005738 ipr_isr_eh(ioa_cfg,
5739 "Error clearing HRRQ: ", num_hrrq);
Brian King172cd6e2012-07-17 08:14:40 -05005740 rc = IRQ_HANDLED;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005741 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005742 } else
5743 break;
5744 }
5745
5746 if (unlikely(rc == IRQ_NONE))
Wayne Boyer634651f2010-08-27 14:45:07 -07005747 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005748
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005749 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05005750 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5751 list_del(&ipr_cmd->queue);
5752 del_timer(&ipr_cmd->timer);
5753 ipr_cmd->fast_done(ipr_cmd);
5754 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005755 return rc;
5756}
Brian King172cd6e2012-07-17 08:14:40 -05005757
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005758/**
5759 * ipr_isr_mhrrq - Interrupt service routine
5760 * @irq: irq number
5761 * @devp: pointer to ioa config struct
5762 *
5763 * Return value:
5764 * IRQ_NONE / IRQ_HANDLED
5765 **/
5766static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5767{
5768 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005769 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005770 unsigned long hrrq_flags = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005771 struct ipr_cmnd *ipr_cmd, *temp;
5772 irqreturn_t rc = IRQ_NONE;
5773 LIST_HEAD(doneq);
5774
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005775 spin_lock_irqsave(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005776
5777 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005778 if (!hrrq->allow_interrupts) {
5779 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005780 return IRQ_NONE;
5781 }
5782
Jens Axboe89f8b332014-03-13 09:38:42 -06005783 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005784 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5785 hrrq->toggle_bit) {
Christoph Hellwigea511902015-12-07 06:41:11 -08005786 irq_poll_sched(&hrrq->iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005787 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5788 return IRQ_HANDLED;
5789 }
5790 } else {
5791 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5792 hrrq->toggle_bit)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005793
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005794 if (ipr_process_hrrq(hrrq, -1, &doneq))
5795 rc = IRQ_HANDLED;
5796 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005797
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005798 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005799
5800 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5801 list_del(&ipr_cmd->queue);
5802 del_timer(&ipr_cmd->timer);
5803 ipr_cmd->fast_done(ipr_cmd);
5804 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005805 return rc;
5806}
5807
5808/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005809 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07005810 * @ioa_cfg: ioa config struct
5811 * @ipr_cmd: ipr command struct
5812 *
5813 * Return value:
5814 * 0 on success / -1 on failure
5815 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08005816static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5817 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005818{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005819 int i, nseg;
5820 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005821 u32 length;
5822 u32 ioadl_flags = 0;
5823 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5824 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005825 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005826
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005827 length = scsi_bufflen(scsi_cmd);
5828 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005829 return 0;
5830
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005831 nseg = scsi_dma_map(scsi_cmd);
5832 if (nseg < 0) {
Anton Blanchard51f52a42011-05-09 10:07:40 +10005833 if (printk_ratelimit())
Anton Blanchardd73341b2014-10-30 17:27:08 -05005834 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005835 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005836 }
5837
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005838 ipr_cmd->dma_use_sg = nseg;
5839
Wayne Boyer438b0332010-05-10 09:13:00 -07005840 ioarcb->data_transfer_length = cpu_to_be32(length);
Wayne Boyerb8803b12010-05-14 08:55:13 -07005841 ioarcb->ioadl_len =
5842 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
Wayne Boyer438b0332010-05-10 09:13:00 -07005843
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005844 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5845 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5846 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005847 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5848 ioadl_flags = IPR_IOADL_FLAGS_READ;
5849
5850 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5851 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5852 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5853 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5854 }
5855
5856 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5857 return 0;
5858}
5859
5860/**
5861 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5862 * @ioa_cfg: ioa config struct
5863 * @ipr_cmd: ipr command struct
5864 *
5865 * Return value:
5866 * 0 on success / -1 on failure
5867 **/
5868static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5869 struct ipr_cmnd *ipr_cmd)
5870{
5871 int i, nseg;
5872 struct scatterlist *sg;
5873 u32 length;
5874 u32 ioadl_flags = 0;
5875 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5876 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5877 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5878
5879 length = scsi_bufflen(scsi_cmd);
5880 if (!length)
5881 return 0;
5882
5883 nseg = scsi_dma_map(scsi_cmd);
5884 if (nseg < 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05005885 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
Wayne Boyera32c0552010-02-19 13:23:36 -08005886 return -1;
5887 }
5888
5889 ipr_cmd->dma_use_sg = nseg;
5890
5891 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5892 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5893 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5894 ioarcb->data_transfer_length = cpu_to_be32(length);
5895 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005896 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5897 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5898 ioadl_flags = IPR_IOADL_FLAGS_READ;
5899 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5900 ioarcb->read_ioadl_len =
5901 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5902 }
5903
Wayne Boyera32c0552010-02-19 13:23:36 -08005904 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5905 ioadl = ioarcb->u.add_data.u.ioadl;
5906 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5907 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005908 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5909 }
5910
5911 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5912 ioadl[i].flags_and_data_len =
5913 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5914 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5915 }
5916
5917 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5918 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005919}
5920
5921/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005922 * ipr_erp_done - Process completion of ERP for a device
5923 * @ipr_cmd: ipr command struct
5924 *
5925 * This function copies the sense buffer into the scsi_cmd
5926 * struct and pushes the scsi_done function.
5927 *
5928 * Return value:
5929 * nothing
5930 **/
5931static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5932{
5933 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5934 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005935 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005936
5937 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5938 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005939 scmd_printk(KERN_ERR, scsi_cmd,
5940 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005941 } else {
5942 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5943 SCSI_SENSE_BUFFERSIZE);
5944 }
5945
5946 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005947 if (!ipr_is_naca_model(res))
5948 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005949 res->in_erp = 0;
5950 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005951 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005952 scsi_cmd->scsi_done(scsi_cmd);
Brian King2f1b0a82017-03-15 16:58:36 -05005953 if (ipr_cmd->eh_comp)
5954 complete(ipr_cmd->eh_comp);
5955 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005956}
5957
5958/**
5959 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5960 * @ipr_cmd: ipr command struct
5961 *
5962 * Return value:
5963 * none
5964 **/
5965static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5966{
Brian King51b1c7e2007-03-29 12:43:50 -05005967 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005968 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08005969 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005970
5971 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08005972 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005973 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005974 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005975 ioarcb->read_ioadl_len = 0;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005976 ioasa->hdr.ioasc = 0;
5977 ioasa->hdr.residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005978
5979 if (ipr_cmd->ioa_cfg->sis64)
5980 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5981 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5982 else {
5983 ioarcb->write_ioadl_addr =
5984 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5985 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5986 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005987}
5988
5989/**
5990 * ipr_erp_request_sense - Send request sense to a device
5991 * @ipr_cmd: ipr command struct
5992 *
5993 * This function sends a request sense to a device as a result
5994 * of a check condition.
5995 *
5996 * Return value:
5997 * nothing
5998 **/
5999static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6000{
6001 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006002 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006003
6004 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6005 ipr_erp_done(ipr_cmd);
6006 return;
6007 }
6008
6009 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6010
6011 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6012 cmd_pkt->cdb[0] = REQUEST_SENSE;
6013 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6014 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6015 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6016 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6017
Wayne Boyera32c0552010-02-19 13:23:36 -08006018 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6019 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006020
6021 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6022 IPR_REQUEST_SENSE_TIMEOUT * 2);
6023}
6024
6025/**
6026 * ipr_erp_cancel_all - Send cancel all to a device
6027 * @ipr_cmd: ipr command struct
6028 *
6029 * This function sends a cancel all to a device to clear the
6030 * queue. If we are running TCQ on the device, QERR is set to 1,
6031 * which means all outstanding ops have been dropped on the floor.
6032 * Cancel all will return them to us.
6033 *
6034 * Return value:
6035 * nothing
6036 **/
6037static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6038{
6039 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6040 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6041 struct ipr_cmd_pkt *cmd_pkt;
6042
6043 res->in_erp = 1;
6044
6045 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6046
Christoph Hellwig17ea0122014-11-24 15:36:20 +01006047 if (!scsi_cmd->device->simple_tags) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006048 ipr_erp_request_sense(ipr_cmd);
6049 return;
6050 }
6051
6052 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6053 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6054 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6055
6056 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6057 IPR_CANCEL_ALL_TIMEOUT);
6058}
6059
6060/**
6061 * ipr_dump_ioasa - Dump contents of IOASA
6062 * @ioa_cfg: ioa config struct
6063 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06006064 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006065 *
6066 * This function is invoked by the interrupt handler when ops
6067 * fail. It will log the IOASA if appropriate. Only called
6068 * for GPDD ops.
6069 *
6070 * Return value:
6071 * none
6072 **/
6073static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06006074 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006075{
6076 int i;
6077 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05006078 u32 ioasc, fd_ioasc;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006079 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006080 __be32 *ioasa_data = (__be32 *)ioasa;
6081 int error_index;
6082
Wayne Boyer96d21f02010-05-10 09:13:27 -07006083 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6084 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006085
6086 if (0 == ioasc)
6087 return;
6088
6089 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6090 return;
6091
Brian Kingb0692dd2007-03-29 12:43:09 -05006092 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6093 error_index = ipr_get_error(fd_ioasc);
6094 else
6095 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006096
6097 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6098 /* Don't log an error if the IOA already logged one */
Wayne Boyer96d21f02010-05-10 09:13:27 -07006099 if (ioasa->hdr.ilid != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006100 return;
6101
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006102 if (!ipr_is_gscsi(res))
6103 return;
6104
Linus Torvalds1da177e2005-04-16 15:20:36 -07006105 if (ipr_error_table[error_index].log_ioasa == 0)
6106 return;
6107 }
6108
Brian Kingfe964d02006-03-29 09:37:29 -06006109 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006110
Wayne Boyer96d21f02010-05-10 09:13:27 -07006111 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6112 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6113 data_len = sizeof(struct ipr_ioasa64);
6114 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006115 data_len = sizeof(struct ipr_ioasa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006116
6117 ipr_err("IOASA Dump:\n");
6118
6119 for (i = 0; i < data_len / 4; i += 4) {
6120 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6121 be32_to_cpu(ioasa_data[i]),
6122 be32_to_cpu(ioasa_data[i+1]),
6123 be32_to_cpu(ioasa_data[i+2]),
6124 be32_to_cpu(ioasa_data[i+3]));
6125 }
6126}
6127
6128/**
6129 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6130 * @ioasa: IOASA
6131 * @sense_buf: sense data buffer
6132 *
6133 * Return value:
6134 * none
6135 **/
6136static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6137{
6138 u32 failing_lba;
6139 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6140 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006141 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6142 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006143
6144 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6145
6146 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6147 return;
6148
6149 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6150
6151 if (ipr_is_vset_device(res) &&
6152 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6153 ioasa->u.vset.failing_lba_hi != 0) {
6154 sense_buf[0] = 0x72;
6155 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6156 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6157 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6158
6159 sense_buf[7] = 12;
6160 sense_buf[8] = 0;
6161 sense_buf[9] = 0x0A;
6162 sense_buf[10] = 0x80;
6163
6164 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6165
6166 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6167 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6168 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6169 sense_buf[15] = failing_lba & 0x000000ff;
6170
6171 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6172
6173 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6174 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6175 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6176 sense_buf[19] = failing_lba & 0x000000ff;
6177 } else {
6178 sense_buf[0] = 0x70;
6179 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6180 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6181 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6182
6183 /* Illegal request */
6184 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
Wayne Boyer96d21f02010-05-10 09:13:27 -07006185 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006186 sense_buf[7] = 10; /* additional length */
6187
6188 /* IOARCB was in error */
6189 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6190 sense_buf[15] = 0xC0;
6191 else /* Parameter data was invalid */
6192 sense_buf[15] = 0x80;
6193
6194 sense_buf[16] =
6195 ((IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07006196 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006197 sense_buf[17] =
6198 (IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07006199 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006200 } else {
6201 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6202 if (ipr_is_vset_device(res))
6203 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6204 else
6205 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6206
6207 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6208 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6209 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6210 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6211 sense_buf[6] = failing_lba & 0x000000ff;
6212 }
6213
6214 sense_buf[7] = 6; /* additional length */
6215 }
6216 }
6217}
6218
6219/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006220 * ipr_get_autosense - Copy autosense data to sense buffer
6221 * @ipr_cmd: ipr command struct
6222 *
6223 * This function copies the autosense buffer to the buffer
6224 * in the scsi_cmd, if there is autosense available.
6225 *
6226 * Return value:
6227 * 1 if autosense was available / 0 if not
6228 **/
6229static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6230{
Wayne Boyer96d21f02010-05-10 09:13:27 -07006231 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6232 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006233
Wayne Boyer96d21f02010-05-10 09:13:27 -07006234 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006235 return 0;
6236
Wayne Boyer96d21f02010-05-10 09:13:27 -07006237 if (ipr_cmd->ioa_cfg->sis64)
6238 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6239 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6240 SCSI_SENSE_BUFFERSIZE));
6241 else
6242 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6243 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6244 SCSI_SENSE_BUFFERSIZE));
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006245 return 1;
6246}
6247
6248/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006249 * ipr_erp_start - Process an error response for a SCSI op
6250 * @ioa_cfg: ioa config struct
6251 * @ipr_cmd: ipr command struct
6252 *
6253 * This function determines whether or not to initiate ERP
6254 * on the affected device.
6255 *
6256 * Return value:
6257 * nothing
6258 **/
6259static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6260 struct ipr_cmnd *ipr_cmd)
6261{
6262 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6263 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006264 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King8a048992007-04-26 16:00:10 -05006265 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006266
6267 if (!res) {
6268 ipr_scsi_eh_done(ipr_cmd);
6269 return;
6270 }
6271
Brian King8a048992007-04-26 16:00:10 -05006272 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006273 ipr_gen_sense(ipr_cmd);
6274
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006275 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6276
Brian King8a048992007-04-26 16:00:10 -05006277 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006278 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006279 if (ipr_is_naca_model(res))
6280 scsi_cmd->result |= (DID_ABORT << 16);
6281 else
6282 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006283 break;
6284 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006285 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006286 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6287 break;
6288 case IPR_IOASC_HW_SEL_TIMEOUT:
6289 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006290 if (!ipr_is_naca_model(res))
6291 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006292 break;
6293 case IPR_IOASC_SYNC_REQUIRED:
6294 if (!res->in_erp)
6295 res->needs_sync_complete = 1;
6296 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6297 break;
6298 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006299 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006300 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6301 break;
6302 case IPR_IOASC_BUS_WAS_RESET:
6303 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6304 /*
6305 * Report the bus reset and ask for a retry. The device
6306 * will give CC/UA the next command.
6307 */
6308 if (!res->resetting_device)
6309 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6310 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006311 if (!ipr_is_naca_model(res))
6312 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006313 break;
6314 case IPR_IOASC_HW_DEV_BUS_STATUS:
6315 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6316 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006317 if (!ipr_get_autosense(ipr_cmd)) {
6318 if (!ipr_is_naca_model(res)) {
6319 ipr_erp_cancel_all(ipr_cmd);
6320 return;
6321 }
6322 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006323 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006324 if (!ipr_is_naca_model(res))
6325 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006326 break;
6327 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6328 break;
Wen Xiongf8ee25d2015-03-26 11:23:58 -05006329 case IPR_IOASC_IR_NON_OPTIMIZED:
6330 if (res->raw_mode) {
6331 res->raw_mode = 0;
6332 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6333 } else
6334 scsi_cmd->result |= (DID_ERROR << 16);
6335 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006336 default:
Brian King5b7304f2006-08-02 14:57:51 -05006337 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6338 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006339 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006340 res->needs_sync_complete = 1;
6341 break;
6342 }
6343
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006344 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006345 scsi_cmd->scsi_done(scsi_cmd);
Brian King2f1b0a82017-03-15 16:58:36 -05006346 if (ipr_cmd->eh_comp)
6347 complete(ipr_cmd->eh_comp);
6348 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006349}
6350
6351/**
6352 * ipr_scsi_done - mid-layer done function
6353 * @ipr_cmd: ipr command struct
6354 *
6355 * This function is invoked by the interrupt handler for
6356 * ops generated by the SCSI mid-layer
6357 *
6358 * Return value:
6359 * none
6360 **/
6361static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6362{
6363 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6364 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006365 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King36b8e182015-07-14 11:41:29 -05006366 unsigned long lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006367
Wayne Boyer96d21f02010-05-10 09:13:27 -07006368 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006369
6370 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
Brian King172cd6e2012-07-17 08:14:40 -05006371 scsi_dma_unmap(scsi_cmd);
6372
Brian King36b8e182015-07-14 11:41:29 -05006373 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006374 scsi_cmd->scsi_done(scsi_cmd);
Brian King2f1b0a82017-03-15 16:58:36 -05006375 if (ipr_cmd->eh_comp)
6376 complete(ipr_cmd->eh_comp);
6377 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King36b8e182015-07-14 11:41:29 -05006378 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006379 } else {
Brian King36b8e182015-07-14 11:41:29 -05006380 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6381 spin_lock(&ipr_cmd->hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006382 ipr_erp_start(ioa_cfg, ipr_cmd);
Brian King36b8e182015-07-14 11:41:29 -05006383 spin_unlock(&ipr_cmd->hrrq->_lock);
6384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006385 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006386}
6387
6388/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006389 * ipr_queuecommand - Queue a mid-layer request
Brian King00bfef22012-07-17 08:13:52 -05006390 * @shost: scsi host struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006391 * @scsi_cmd: scsi command struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006392 *
6393 * This function queues a request generated by the mid-layer.
6394 *
6395 * Return value:
6396 * 0 on success
6397 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6398 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6399 **/
Brian King00bfef22012-07-17 08:13:52 -05006400static int ipr_queuecommand(struct Scsi_Host *shost,
6401 struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006402{
6403 struct ipr_ioa_cfg *ioa_cfg;
6404 struct ipr_resource_entry *res;
6405 struct ipr_ioarcb *ioarcb;
6406 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006407 unsigned long hrrq_flags, lock_flags;
Dan Carpenterd12f1572012-07-30 11:18:22 +03006408 int rc;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006409 struct ipr_hrr_queue *hrrq;
6410 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006411
Brian King00bfef22012-07-17 08:13:52 -05006412 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6413
Linus Torvalds1da177e2005-04-16 15:20:36 -07006414 scsi_cmd->result = (DID_OK << 16);
Brian King00bfef22012-07-17 08:13:52 -05006415 res = scsi_cmd->device->hostdata;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006416
6417 if (ipr_is_gata(res) && res->sata_port) {
6418 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6419 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6420 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6421 return rc;
6422 }
6423
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006424 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6425 hrrq = &ioa_cfg->hrrq[hrrq_id];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006426
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006427 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006428 /*
6429 * We are currently blocking all devices due to a host reset
6430 * We have told the host to stop giving us new requests, but
6431 * ERP ops don't count. FIXME
6432 */
Brian Kingbfae7822013-01-30 23:45:08 -06006433 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006434 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006435 return SCSI_MLQUEUE_HOST_BUSY;
Brian King00bfef22012-07-17 08:13:52 -05006436 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006437
6438 /*
6439 * FIXME - Create scsi_set_host_offline interface
6440 * and the ioa_is_dead check can be removed
6441 */
Brian Kingbfae7822013-01-30 23:45:08 -06006442 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006443 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006444 goto err_nodev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006445 }
6446
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006447 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6448 if (ipr_cmd == NULL) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006449 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006450 return SCSI_MLQUEUE_HOST_BUSY;
6451 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006452 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006453
Brian King172cd6e2012-07-17 08:14:40 -05006454 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006455 ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006456
6457 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6458 ipr_cmd->scsi_cmd = scsi_cmd;
Brian King172cd6e2012-07-17 08:14:40 -05006459 ipr_cmd->done = ipr_scsi_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006460
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006461 if (ipr_is_gscsi(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006462 if (scsi_cmd->underflow == 0)
6463 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6464
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006465 if (res->reset_occurred) {
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006466 res->reset_occurred = 0;
Wayne Boyerab6c10b2011-03-31 09:56:10 -07006467 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006468 }
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006469 }
6470
6471 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6472 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6473
Linus Torvalds1da177e2005-04-16 15:20:36 -07006474 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
Christoph Hellwig50668632014-10-30 14:30:06 +01006475 if (scsi_cmd->flags & SCMD_TAGGED)
6476 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6477 else
6478 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006479 }
6480
6481 if (scsi_cmd->cmnd[0] >= 0xC0 &&
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006482 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006483 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006484 }
Gabriel Krisman Bertazi3cb4fc12015-08-19 11:47:05 -03006485 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
Wen Xiongf8ee25d2015-03-26 11:23:58 -05006486 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006487
Gabriel Krisman Bertazi3cb4fc12015-08-19 11:47:05 -03006488 if (scsi_cmd->underflow == 0)
6489 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6490 }
6491
Dan Carpenterd12f1572012-07-30 11:18:22 +03006492 if (ioa_cfg->sis64)
6493 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6494 else
6495 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006496
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006497 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6498 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006499 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006500 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006501 if (!rc)
6502 scsi_dma_unmap(scsi_cmd);
Brian Kinga5fb4072012-03-14 21:20:09 -05006503 return SCSI_MLQUEUE_HOST_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006504 }
6505
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006506 if (unlikely(hrrq->ioa_is_dead)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006507 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006508 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006509 scsi_dma_unmap(scsi_cmd);
6510 goto err_nodev;
6511 }
6512
6513 ioarcb->res_handle = res->res_handle;
6514 if (res->needs_sync_complete) {
6515 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6516 res->needs_sync_complete = 0;
6517 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006518 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
Brian King00bfef22012-07-17 08:13:52 -05006519 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian Kinga5fb4072012-03-14 21:20:09 -05006520 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006521 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006522 return 0;
6523
6524err_nodev:
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006525 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006526 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6527 scsi_cmd->result = (DID_NO_CONNECT << 16);
6528 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006529 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006530 return 0;
6531}
6532
6533/**
Brian King35a39692006-09-25 12:39:20 -05006534 * ipr_ioctl - IOCTL handler
6535 * @sdev: scsi device struct
6536 * @cmd: IOCTL cmd
6537 * @arg: IOCTL arg
6538 *
6539 * Return value:
6540 * 0 on success / other on failure
6541 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06006542static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05006543{
6544 struct ipr_resource_entry *res;
6545
6546 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05006547 if (res && ipr_is_gata(res)) {
6548 if (cmd == HDIO_GET_IDENTITY)
6549 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05006550 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05006551 }
Brian King35a39692006-09-25 12:39:20 -05006552
6553 return -EINVAL;
6554}
6555
6556/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006557 * ipr_info - Get information about the card/driver
6558 * @scsi_host: scsi host struct
6559 *
6560 * Return value:
6561 * pointer to buffer with description string
6562 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006563static const char *ipr_ioa_info(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006564{
6565 static char buffer[512];
6566 struct ipr_ioa_cfg *ioa_cfg;
6567 unsigned long lock_flags = 0;
6568
6569 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6570
6571 spin_lock_irqsave(host->host_lock, lock_flags);
6572 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6573 spin_unlock_irqrestore(host->host_lock, lock_flags);
6574
6575 return buffer;
6576}
6577
6578static struct scsi_host_template driver_template = {
6579 .module = THIS_MODULE,
6580 .name = "IPR",
6581 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05006582 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006583 .queuecommand = ipr_queuecommand,
6584 .eh_abort_handler = ipr_eh_abort,
6585 .eh_device_reset_handler = ipr_eh_dev_reset,
6586 .eh_host_reset_handler = ipr_eh_host_reset,
6587 .slave_alloc = ipr_slave_alloc,
6588 .slave_configure = ipr_slave_configure,
6589 .slave_destroy = ipr_slave_destroy,
Brian Kingf688f962014-12-02 12:47:37 -06006590 .scan_finished = ipr_scan_finished,
Brian King35a39692006-09-25 12:39:20 -05006591 .target_alloc = ipr_target_alloc,
6592 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006593 .change_queue_depth = ipr_change_queue_depth,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006594 .bios_param = ipr_biosparam,
6595 .can_queue = IPR_MAX_COMMANDS,
6596 .this_id = -1,
6597 .sg_tablesize = IPR_MAX_SGLIST,
6598 .max_sectors = IPR_IOA_MAX_SECTORS,
6599 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6600 .use_clustering = ENABLE_CLUSTERING,
6601 .shost_attrs = ipr_ioa_attrs,
6602 .sdev_attrs = ipr_dev_attrs,
Martin K. Petersen54b2b502013-10-23 06:25:40 -04006603 .proc_name = IPR_NAME,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006604};
6605
Brian King35a39692006-09-25 12:39:20 -05006606/**
6607 * ipr_ata_phy_reset - libata phy_reset handler
6608 * @ap: ata port to reset
6609 *
6610 **/
6611static void ipr_ata_phy_reset(struct ata_port *ap)
6612{
6613 unsigned long flags;
6614 struct ipr_sata_port *sata_port = ap->private_data;
6615 struct ipr_resource_entry *res = sata_port->res;
6616 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6617 int rc;
6618
6619 ENTER;
6620 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006621 while (ioa_cfg->in_reset_reload) {
Brian King35a39692006-09-25 12:39:20 -05006622 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6623 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6624 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6625 }
6626
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006627 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Brian King35a39692006-09-25 12:39:20 -05006628 goto out_unlock;
6629
6630 rc = ipr_device_reset(ioa_cfg, res);
6631
6632 if (rc) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02006633 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006634 goto out_unlock;
6635 }
6636
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006637 ap->link.device[0].class = res->ata_class;
6638 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Tejun Heo3e4ec342010-05-10 21:41:30 +02006639 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006640
6641out_unlock:
6642 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6643 LEAVE;
6644}
6645
6646/**
6647 * ipr_ata_post_internal - Cleanup after an internal command
6648 * @qc: ATA queued command
6649 *
6650 * Return value:
6651 * none
6652 **/
6653static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6654{
6655 struct ipr_sata_port *sata_port = qc->ap->private_data;
6656 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6657 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006658 struct ipr_hrr_queue *hrrq;
Brian King35a39692006-09-25 12:39:20 -05006659 unsigned long flags;
6660
6661 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006662 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06006663 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6664 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6665 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6666 }
6667
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006668 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006669 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006670 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6671 if (ipr_cmd->qc == qc) {
6672 ipr_device_reset(ioa_cfg, sata_port->res);
6673 break;
6674 }
Brian King35a39692006-09-25 12:39:20 -05006675 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006676 spin_unlock(&hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006677 }
6678 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6679}
6680
6681/**
Brian King35a39692006-09-25 12:39:20 -05006682 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6683 * @regs: destination
6684 * @tf: source ATA taskfile
6685 *
6686 * Return value:
6687 * none
6688 **/
6689static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6690 struct ata_taskfile *tf)
6691{
6692 regs->feature = tf->feature;
6693 regs->nsect = tf->nsect;
6694 regs->lbal = tf->lbal;
6695 regs->lbam = tf->lbam;
6696 regs->lbah = tf->lbah;
6697 regs->device = tf->device;
6698 regs->command = tf->command;
6699 regs->hob_feature = tf->hob_feature;
6700 regs->hob_nsect = tf->hob_nsect;
6701 regs->hob_lbal = tf->hob_lbal;
6702 regs->hob_lbam = tf->hob_lbam;
6703 regs->hob_lbah = tf->hob_lbah;
6704 regs->ctl = tf->ctl;
6705}
6706
6707/**
6708 * ipr_sata_done - done function for SATA commands
6709 * @ipr_cmd: ipr command struct
6710 *
6711 * This function is invoked by the interrupt handler for
6712 * ops generated by the SCSI mid-layer to SATA devices
6713 *
6714 * Return value:
6715 * none
6716 **/
6717static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6718{
6719 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6720 struct ata_queued_cmd *qc = ipr_cmd->qc;
6721 struct ipr_sata_port *sata_port = qc->ap->private_data;
6722 struct ipr_resource_entry *res = sata_port->res;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006723 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King35a39692006-09-25 12:39:20 -05006724
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006725 spin_lock(&ipr_cmd->hrrq->_lock);
Wayne Boyer96d21f02010-05-10 09:13:27 -07006726 if (ipr_cmd->ioa_cfg->sis64)
6727 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6728 sizeof(struct ipr_ioasa_gata));
6729 else
6730 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6731 sizeof(struct ipr_ioasa_gata));
Brian King35a39692006-09-25 12:39:20 -05006732 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6733
Wayne Boyer96d21f02010-05-10 09:13:27 -07006734 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006735 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05006736
6737 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
Wayne Boyer96d21f02010-05-10 09:13:27 -07006738 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
Brian King35a39692006-09-25 12:39:20 -05006739 else
Wayne Boyer96d21f02010-05-10 09:13:27 -07006740 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006741 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006742 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006743 ata_qc_complete(qc);
6744}
6745
6746/**
Wayne Boyera32c0552010-02-19 13:23:36 -08006747 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6748 * @ipr_cmd: ipr command struct
6749 * @qc: ATA queued command
6750 *
6751 **/
6752static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6753 struct ata_queued_cmd *qc)
6754{
6755 u32 ioadl_flags = 0;
6756 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006757 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
Wayne Boyera32c0552010-02-19 13:23:36 -08006758 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6759 int len = qc->nbytes;
6760 struct scatterlist *sg;
6761 unsigned int si;
6762 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6763
6764 if (len == 0)
6765 return;
6766
6767 if (qc->dma_dir == DMA_TO_DEVICE) {
6768 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6769 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6770 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6771 ioadl_flags = IPR_IOADL_FLAGS_READ;
6772
6773 ioarcb->data_transfer_length = cpu_to_be32(len);
6774 ioarcb->ioadl_len =
6775 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6776 ioarcb->u.sis64_addr_data.data_ioadl_addr =
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006777 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
Wayne Boyera32c0552010-02-19 13:23:36 -08006778
6779 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6780 ioadl64->flags = cpu_to_be32(ioadl_flags);
6781 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6782 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6783
6784 last_ioadl64 = ioadl64;
6785 ioadl64++;
6786 }
6787
6788 if (likely(last_ioadl64))
6789 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6790}
6791
6792/**
Brian King35a39692006-09-25 12:39:20 -05006793 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6794 * @ipr_cmd: ipr command struct
6795 * @qc: ATA queued command
6796 *
6797 **/
6798static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6799 struct ata_queued_cmd *qc)
6800{
6801 u32 ioadl_flags = 0;
6802 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08006803 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006804 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01006805 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05006806 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09006807 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05006808
6809 if (len == 0)
6810 return;
6811
6812 if (qc->dma_dir == DMA_TO_DEVICE) {
6813 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6814 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08006815 ioarcb->data_transfer_length = cpu_to_be32(len);
6816 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05006817 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6818 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6819 ioadl_flags = IPR_IOADL_FLAGS_READ;
6820 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6821 ioarcb->read_ioadl_len =
6822 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6823 }
6824
Tejun Heoff2aeb12007-12-05 16:43:11 +09006825 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05006826 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6827 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006828
6829 last_ioadl = ioadl;
6830 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05006831 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006832
6833 if (likely(last_ioadl))
6834 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05006835}
6836
6837/**
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006838 * ipr_qc_defer - Get a free ipr_cmd
6839 * @qc: queued command
6840 *
6841 * Return value:
6842 * 0 if success
6843 **/
6844static int ipr_qc_defer(struct ata_queued_cmd *qc)
6845{
6846 struct ata_port *ap = qc->ap;
6847 struct ipr_sata_port *sata_port = ap->private_data;
6848 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6849 struct ipr_cmnd *ipr_cmd;
6850 struct ipr_hrr_queue *hrrq;
6851 int hrrq_id;
6852
6853 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6854 hrrq = &ioa_cfg->hrrq[hrrq_id];
6855
6856 qc->lldd_task = NULL;
6857 spin_lock(&hrrq->_lock);
6858 if (unlikely(hrrq->ioa_is_dead)) {
6859 spin_unlock(&hrrq->_lock);
6860 return 0;
6861 }
6862
6863 if (unlikely(!hrrq->allow_cmds)) {
6864 spin_unlock(&hrrq->_lock);
6865 return ATA_DEFER_LINK;
6866 }
6867
6868 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6869 if (ipr_cmd == NULL) {
6870 spin_unlock(&hrrq->_lock);
6871 return ATA_DEFER_LINK;
6872 }
6873
6874 qc->lldd_task = ipr_cmd;
6875 spin_unlock(&hrrq->_lock);
6876 return 0;
6877}
6878
6879/**
Brian King35a39692006-09-25 12:39:20 -05006880 * ipr_qc_issue - Issue a SATA qc to a device
6881 * @qc: queued command
6882 *
6883 * Return value:
6884 * 0 if success
6885 **/
6886static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6887{
6888 struct ata_port *ap = qc->ap;
6889 struct ipr_sata_port *sata_port = ap->private_data;
6890 struct ipr_resource_entry *res = sata_port->res;
6891 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6892 struct ipr_cmnd *ipr_cmd;
6893 struct ipr_ioarcb *ioarcb;
6894 struct ipr_ioarcb_ata_regs *regs;
6895
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006896 if (qc->lldd_task == NULL)
6897 ipr_qc_defer(qc);
6898
6899 ipr_cmd = qc->lldd_task;
6900 if (ipr_cmd == NULL)
Brian King0feeed82007-03-29 12:43:43 -05006901 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05006902
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006903 qc->lldd_task = NULL;
6904 spin_lock(&ipr_cmd->hrrq->_lock);
6905 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6906 ipr_cmd->hrrq->ioa_is_dead)) {
6907 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6908 spin_unlock(&ipr_cmd->hrrq->_lock);
6909 return AC_ERR_SYSTEM;
6910 }
6911
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006912 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King35a39692006-09-25 12:39:20 -05006913 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05006914
Wayne Boyera32c0552010-02-19 13:23:36 -08006915 if (ioa_cfg->sis64) {
6916 regs = &ipr_cmd->i.ata_ioadl.regs;
6917 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6918 } else
6919 regs = &ioarcb->u.add_data.u.regs;
6920
6921 memset(regs, 0, sizeof(*regs));
6922 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05006923
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006924 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Brian King35a39692006-09-25 12:39:20 -05006925 ipr_cmd->qc = qc;
6926 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006927 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05006928 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6929 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6930 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01006931 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05006932
Wayne Boyera32c0552010-02-19 13:23:36 -08006933 if (ioa_cfg->sis64)
6934 ipr_build_ata_ioadl64(ipr_cmd, qc);
6935 else
6936 ipr_build_ata_ioadl(ipr_cmd, qc);
6937
Brian King35a39692006-09-25 12:39:20 -05006938 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6939 ipr_copy_sata_tf(regs, &qc->tf);
6940 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006941 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05006942
6943 switch (qc->tf.protocol) {
6944 case ATA_PROT_NODATA:
6945 case ATA_PROT_PIO:
6946 break;
6947
6948 case ATA_PROT_DMA:
6949 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6950 break;
6951
Tejun Heo0dc36882007-12-18 16:34:43 -05006952 case ATAPI_PROT_PIO:
6953 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05006954 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6955 break;
6956
Tejun Heo0dc36882007-12-18 16:34:43 -05006957 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05006958 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6959 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6960 break;
6961
6962 default:
6963 WARN_ON(1);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006964 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King0feeed82007-03-29 12:43:43 -05006965 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05006966 }
6967
Wayne Boyera32c0552010-02-19 13:23:36 -08006968 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006969 spin_unlock(&ipr_cmd->hrrq->_lock);
Wayne Boyera32c0552010-02-19 13:23:36 -08006970
Brian King35a39692006-09-25 12:39:20 -05006971 return 0;
6972}
6973
6974/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006975 * ipr_qc_fill_rtf - Read result TF
6976 * @qc: ATA queued command
6977 *
6978 * Return value:
6979 * true
6980 **/
6981static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6982{
6983 struct ipr_sata_port *sata_port = qc->ap->private_data;
6984 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6985 struct ata_taskfile *tf = &qc->result_tf;
6986
6987 tf->feature = g->error;
6988 tf->nsect = g->nsect;
6989 tf->lbal = g->lbal;
6990 tf->lbam = g->lbam;
6991 tf->lbah = g->lbah;
6992 tf->device = g->device;
6993 tf->command = g->status;
6994 tf->hob_nsect = g->hob_nsect;
6995 tf->hob_lbal = g->hob_lbal;
6996 tf->hob_lbam = g->hob_lbam;
6997 tf->hob_lbah = g->hob_lbah;
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006998
6999 return true;
7000}
7001
Brian King35a39692006-09-25 12:39:20 -05007002static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05007003 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09007004 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05007005 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05007006 .qc_prep = ata_noop_qc_prep,
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007007 .qc_defer = ipr_qc_defer,
Brian King35a39692006-09-25 12:39:20 -05007008 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007009 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05007010 .port_start = ata_sas_port_start,
7011 .port_stop = ata_sas_port_stop
7012};
7013
7014static struct ata_port_info sata_port_info = {
Shaohua Li5067c042015-03-12 10:32:18 -07007015 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7016 ATA_FLAG_SAS_HOST,
Sergei Shtylyov0f2e0332011-01-21 20:32:01 +03007017 .pio_mask = ATA_PIO4_ONLY,
7018 .mwdma_mask = ATA_MWDMA2,
7019 .udma_mask = ATA_UDMA6,
Brian King35a39692006-09-25 12:39:20 -05007020 .port_ops = &ipr_sata_ops
7021};
7022
Linus Torvalds1da177e2005-04-16 15:20:36 -07007023#ifdef CONFIG_PPC_PSERIES
7024static const u16 ipr_blocked_processors[] = {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00007025 PVR_NORTHSTAR,
7026 PVR_PULSAR,
7027 PVR_POWER4,
7028 PVR_ICESTAR,
7029 PVR_SSTAR,
7030 PVR_POWER4p,
7031 PVR_630,
7032 PVR_630p
Linus Torvalds1da177e2005-04-16 15:20:36 -07007033};
7034
7035/**
7036 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7037 * @ioa_cfg: ioa cfg struct
7038 *
7039 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7040 * certain pSeries hardware. This function determines if the given
7041 * adapter is in one of these confgurations or not.
7042 *
7043 * Return value:
7044 * 1 if adapter is not supported / 0 if adapter is supported
7045 **/
7046static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7047{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007048 int i;
7049
Auke Kok44c10132007-06-08 15:46:36 -07007050 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007051 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00007052 if (pvr_version_is(ipr_blocked_processors[i]))
Auke Kok44c10132007-06-08 15:46:36 -07007053 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007054 }
7055 }
7056 return 0;
7057}
7058#else
7059#define ipr_invalid_adapter(ioa_cfg) 0
7060#endif
7061
7062/**
7063 * ipr_ioa_bringdown_done - IOA bring down completion.
7064 * @ipr_cmd: ipr command struct
7065 *
7066 * This function processes the completion of an adapter bring down.
7067 * It wakes any reset sleepers.
7068 *
7069 * Return value:
7070 * IPR_RC_JOB_RETURN
7071 **/
7072static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7073{
7074 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05007075 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007076
7077 ENTER;
Brian Kingbfae7822013-01-30 23:45:08 -06007078 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7079 ipr_trace;
7080 spin_unlock_irq(ioa_cfg->host->host_lock);
7081 scsi_unblock_requests(ioa_cfg->host);
7082 spin_lock_irq(ioa_cfg->host->host_lock);
7083 }
7084
Linus Torvalds1da177e2005-04-16 15:20:36 -07007085 ioa_cfg->in_reset_reload = 0;
7086 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05007087 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7088 spin_lock(&ioa_cfg->hrrq[i]._lock);
7089 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7090 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7091 }
7092 wmb();
7093
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007094 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007095 wake_up_all(&ioa_cfg->reset_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007096 LEAVE;
7097
7098 return IPR_RC_JOB_RETURN;
7099}
7100
7101/**
7102 * ipr_ioa_reset_done - IOA reset completion.
7103 * @ipr_cmd: ipr command struct
7104 *
7105 * This function processes the completion of an adapter reset.
7106 * It schedules any necessary mid-layer add/removes and
7107 * wakes any reset sleepers.
7108 *
7109 * Return value:
7110 * IPR_RC_JOB_RETURN
7111 **/
7112static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7113{
7114 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7115 struct ipr_resource_entry *res;
Brian Kingafc3f832016-08-24 12:56:51 -05007116 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007117
7118 ENTER;
7119 ioa_cfg->in_reset_reload = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007120 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7121 spin_lock(&ioa_cfg->hrrq[j]._lock);
7122 ioa_cfg->hrrq[j].allow_cmds = 1;
7123 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7124 }
7125 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007126 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06007127 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007128
7129 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Brian Kingf688f962014-12-02 12:47:37 -06007130 if (res->add_to_ml || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007131 ipr_trace;
7132 break;
7133 }
7134 }
7135 schedule_work(&ioa_cfg->work_q);
7136
Brian Kingafc3f832016-08-24 12:56:51 -05007137 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7138 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7139 if (j < IPR_NUM_LOG_HCAMS)
7140 ipr_send_hcam(ioa_cfg,
7141 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7142 ioa_cfg->hostrcb[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007143 else
Brian Kingafc3f832016-08-24 12:56:51 -05007144 ipr_send_hcam(ioa_cfg,
7145 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7146 ioa_cfg->hostrcb[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007147 }
7148
Brian King6bb04172007-04-26 16:00:08 -05007149 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007150 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7151
7152 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007153 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007154 wake_up_all(&ioa_cfg->reset_wait_q);
7155
Mark Nelson30237852008-12-10 12:23:20 +11007156 spin_unlock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007157 scsi_unblock_requests(ioa_cfg->host);
Mark Nelson30237852008-12-10 12:23:20 +11007158 spin_lock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007159
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007160 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007161 scsi_block_requests(ioa_cfg->host);
7162
Brian Kingf688f962014-12-02 12:47:37 -06007163 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007164 LEAVE;
7165 return IPR_RC_JOB_RETURN;
7166}
7167
7168/**
7169 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7170 * @supported_dev: supported device struct
7171 * @vpids: vendor product id struct
7172 *
7173 * Return value:
7174 * none
7175 **/
7176static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7177 struct ipr_std_inq_vpids *vpids)
7178{
7179 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7180 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7181 supported_dev->num_records = 1;
7182 supported_dev->data_length =
7183 cpu_to_be16(sizeof(struct ipr_supported_device));
7184 supported_dev->reserved = 0;
7185}
7186
7187/**
7188 * ipr_set_supported_devs - Send Set Supported Devices for a device
7189 * @ipr_cmd: ipr command struct
7190 *
Wayne Boyera32c0552010-02-19 13:23:36 -08007191 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07007192 *
7193 * Return value:
7194 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7195 **/
7196static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7197{
7198 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7199 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007200 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7201 struct ipr_resource_entry *res = ipr_cmd->u.res;
7202
7203 ipr_cmd->job_step = ipr_ioa_reset_done;
7204
7205 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06007206 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007207 continue;
7208
7209 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007210 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007211
7212 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7213 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7214 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7215
7216 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007217 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007218 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7219 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7220
Wayne Boyera32c0552010-02-19 13:23:36 -08007221 ipr_init_ioadl(ipr_cmd,
7222 ioa_cfg->vpd_cbs_dma +
7223 offsetof(struct ipr_misc_cbs, supp_dev),
7224 sizeof(struct ipr_supported_device),
7225 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007226
7227 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7228 IPR_SET_SUP_DEVICE_TIMEOUT);
7229
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007230 if (!ioa_cfg->sis64)
7231 ipr_cmd->job_step = ipr_set_supported_devs;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007232 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007233 return IPR_RC_JOB_RETURN;
7234 }
7235
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007236 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007237 return IPR_RC_JOB_CONTINUE;
7238}
7239
7240/**
7241 * ipr_get_mode_page - Locate specified mode page
7242 * @mode_pages: mode page buffer
7243 * @page_code: page code to find
7244 * @len: minimum required length for mode page
7245 *
7246 * Return value:
7247 * pointer to mode page / NULL on failure
7248 **/
7249static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7250 u32 page_code, u32 len)
7251{
7252 struct ipr_mode_page_hdr *mode_hdr;
7253 u32 page_length;
7254 u32 length;
7255
7256 if (!mode_pages || (mode_pages->hdr.length == 0))
7257 return NULL;
7258
7259 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7260 mode_hdr = (struct ipr_mode_page_hdr *)
7261 (mode_pages->data + mode_pages->hdr.block_desc_len);
7262
7263 while (length) {
7264 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7265 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7266 return mode_hdr;
7267 break;
7268 } else {
7269 page_length = (sizeof(struct ipr_mode_page_hdr) +
7270 mode_hdr->page_length);
7271 length -= page_length;
7272 mode_hdr = (struct ipr_mode_page_hdr *)
7273 ((unsigned long)mode_hdr + page_length);
7274 }
7275 }
7276 return NULL;
7277}
7278
7279/**
7280 * ipr_check_term_power - Check for term power errors
7281 * @ioa_cfg: ioa config struct
7282 * @mode_pages: IOAFP mode pages buffer
7283 *
7284 * Check the IOAFP's mode page 28 for term power errors
7285 *
7286 * Return value:
7287 * nothing
7288 **/
7289static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7290 struct ipr_mode_pages *mode_pages)
7291{
7292 int i;
7293 int entry_length;
7294 struct ipr_dev_bus_entry *bus;
7295 struct ipr_mode_page28 *mode_page;
7296
7297 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7298 sizeof(struct ipr_mode_page28));
7299
7300 entry_length = mode_page->entry_length;
7301
7302 bus = mode_page->bus;
7303
7304 for (i = 0; i < mode_page->num_entries; i++) {
7305 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7306 dev_err(&ioa_cfg->pdev->dev,
7307 "Term power is absent on scsi bus %d\n",
7308 bus->res_addr.bus);
7309 }
7310
7311 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7312 }
7313}
7314
7315/**
7316 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7317 * @ioa_cfg: ioa config struct
7318 *
7319 * Looks through the config table checking for SES devices. If
7320 * the SES device is in the SES table indicating a maximum SCSI
7321 * bus speed, the speed is limited for the bus.
7322 *
7323 * Return value:
7324 * none
7325 **/
7326static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7327{
7328 u32 max_xfer_rate;
7329 int i;
7330
7331 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7332 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7333 ioa_cfg->bus_attr[i].bus_width);
7334
7335 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7336 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7337 }
7338}
7339
7340/**
7341 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7342 * @ioa_cfg: ioa config struct
7343 * @mode_pages: mode page 28 buffer
7344 *
7345 * Updates mode page 28 based on driver configuration
7346 *
7347 * Return value:
7348 * none
7349 **/
7350static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007351 struct ipr_mode_pages *mode_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007352{
7353 int i, entry_length;
7354 struct ipr_dev_bus_entry *bus;
7355 struct ipr_bus_attributes *bus_attr;
7356 struct ipr_mode_page28 *mode_page;
7357
7358 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7359 sizeof(struct ipr_mode_page28));
7360
7361 entry_length = mode_page->entry_length;
7362
7363 /* Loop for each device bus entry */
7364 for (i = 0, bus = mode_page->bus;
7365 i < mode_page->num_entries;
7366 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7367 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7368 dev_err(&ioa_cfg->pdev->dev,
7369 "Invalid resource address reported: 0x%08X\n",
7370 IPR_GET_PHYS_LOC(bus->res_addr));
7371 continue;
7372 }
7373
7374 bus_attr = &ioa_cfg->bus_attr[i];
7375 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7376 bus->bus_width = bus_attr->bus_width;
7377 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7378 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7379 if (bus_attr->qas_enabled)
7380 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7381 else
7382 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7383 }
7384}
7385
7386/**
7387 * ipr_build_mode_select - Build a mode select command
7388 * @ipr_cmd: ipr command struct
7389 * @res_handle: resource handle to send command to
7390 * @parm: Byte 2 of Mode Sense command
7391 * @dma_addr: DMA buffer address
7392 * @xfer_len: data transfer length
7393 *
7394 * Return value:
7395 * none
7396 **/
7397static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08007398 __be32 res_handle, u8 parm,
7399 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007400{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007401 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7402
7403 ioarcb->res_handle = res_handle;
7404 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7405 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7406 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7407 ioarcb->cmd_pkt.cdb[1] = parm;
7408 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7409
Wayne Boyera32c0552010-02-19 13:23:36 -08007410 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007411}
7412
7413/**
7414 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7415 * @ipr_cmd: ipr command struct
7416 *
7417 * This function sets up the SCSI bus attributes and sends
7418 * a Mode Select for Page 28 to activate them.
7419 *
7420 * Return value:
7421 * IPR_RC_JOB_RETURN
7422 **/
7423static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7424{
7425 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7426 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7427 int length;
7428
7429 ENTER;
Brian King47338042006-02-08 20:57:42 -06007430 ipr_scsi_bus_speed_limit(ioa_cfg);
7431 ipr_check_term_power(ioa_cfg, mode_pages);
7432 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7433 length = mode_pages->hdr.length + 1;
7434 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007435
7436 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7437 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7438 length);
7439
Wayne Boyerf72919e2010-02-19 13:24:21 -08007440 ipr_cmd->job_step = ipr_set_supported_devs;
7441 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7442 struct ipr_resource_entry, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007443 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7444
7445 LEAVE;
7446 return IPR_RC_JOB_RETURN;
7447}
7448
7449/**
7450 * ipr_build_mode_sense - Builds a mode sense command
7451 * @ipr_cmd: ipr command struct
7452 * @res: resource entry struct
7453 * @parm: Byte 2 of mode sense command
7454 * @dma_addr: DMA address of mode sense buffer
7455 * @xfer_len: Size of DMA buffer
7456 *
7457 * Return value:
7458 * none
7459 **/
7460static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7461 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08007462 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007463{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007464 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7465
7466 ioarcb->res_handle = res_handle;
7467 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7468 ioarcb->cmd_pkt.cdb[2] = parm;
7469 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7470 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7471
Wayne Boyera32c0552010-02-19 13:23:36 -08007472 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007473}
7474
7475/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007476 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7477 * @ipr_cmd: ipr command struct
7478 *
7479 * This function handles the failure of an IOA bringup command.
7480 *
7481 * Return value:
7482 * IPR_RC_JOB_RETURN
7483 **/
7484static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7485{
7486 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007487 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007488
7489 dev_err(&ioa_cfg->pdev->dev,
7490 "0x%02X failed with IOASC: 0x%08X\n",
7491 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7492
7493 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007494 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007495 return IPR_RC_JOB_RETURN;
7496}
7497
7498/**
7499 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7500 * @ipr_cmd: ipr command struct
7501 *
7502 * This function handles the failure of a Mode Sense to the IOAFP.
7503 * Some adapters do not handle all mode pages.
7504 *
7505 * Return value:
7506 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7507 **/
7508static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7509{
Wayne Boyerf72919e2010-02-19 13:24:21 -08007510 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007511 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007512
7513 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08007514 ipr_cmd->job_step = ipr_set_supported_devs;
7515 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7516 struct ipr_resource_entry, queue);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007517 return IPR_RC_JOB_CONTINUE;
7518 }
7519
7520 return ipr_reset_cmd_failed(ipr_cmd);
7521}
7522
7523/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007524 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7525 * @ipr_cmd: ipr command struct
7526 *
7527 * This function send a Page 28 mode sense to the IOA to
7528 * retrieve SCSI bus attributes.
7529 *
7530 * Return value:
7531 * IPR_RC_JOB_RETURN
7532 **/
7533static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7534{
7535 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7536
7537 ENTER;
7538 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7539 0x28, ioa_cfg->vpd_cbs_dma +
7540 offsetof(struct ipr_misc_cbs, mode_pages),
7541 sizeof(struct ipr_mode_pages));
7542
7543 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007544 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007545
7546 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7547
7548 LEAVE;
7549 return IPR_RC_JOB_RETURN;
7550}
7551
7552/**
Brian Kingac09c342007-04-26 16:00:16 -05007553 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7554 * @ipr_cmd: ipr command struct
7555 *
7556 * This function enables dual IOA RAID support if possible.
7557 *
7558 * Return value:
7559 * IPR_RC_JOB_RETURN
7560 **/
7561static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7562{
7563 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7564 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7565 struct ipr_mode_page24 *mode_page;
7566 int length;
7567
7568 ENTER;
7569 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7570 sizeof(struct ipr_mode_page24));
7571
7572 if (mode_page)
7573 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7574
7575 length = mode_pages->hdr.length + 1;
7576 mode_pages->hdr.length = 0;
7577
7578 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7579 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7580 length);
7581
7582 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7583 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7584
7585 LEAVE;
7586 return IPR_RC_JOB_RETURN;
7587}
7588
7589/**
7590 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7591 * @ipr_cmd: ipr command struct
7592 *
7593 * This function handles the failure of a Mode Sense to the IOAFP.
7594 * Some adapters do not handle all mode pages.
7595 *
7596 * Return value:
7597 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7598 **/
7599static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7600{
Wayne Boyer96d21f02010-05-10 09:13:27 -07007601 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian Kingac09c342007-04-26 16:00:16 -05007602
7603 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7604 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7605 return IPR_RC_JOB_CONTINUE;
7606 }
7607
7608 return ipr_reset_cmd_failed(ipr_cmd);
7609}
7610
7611/**
7612 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7613 * @ipr_cmd: ipr command struct
7614 *
7615 * This function send a mode sense to the IOA to retrieve
7616 * the IOA Advanced Function Control mode page.
7617 *
7618 * Return value:
7619 * IPR_RC_JOB_RETURN
7620 **/
7621static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7622{
7623 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7624
7625 ENTER;
7626 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7627 0x24, ioa_cfg->vpd_cbs_dma +
7628 offsetof(struct ipr_misc_cbs, mode_pages),
7629 sizeof(struct ipr_mode_pages));
7630
7631 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7632 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7633
7634 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7635
7636 LEAVE;
7637 return IPR_RC_JOB_RETURN;
7638}
7639
7640/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007641 * ipr_init_res_table - Initialize the resource table
7642 * @ipr_cmd: ipr command struct
7643 *
7644 * This function looks through the existing resource table, comparing
7645 * it with the config table. This function will take care of old/new
7646 * devices and schedule adding/removing them from the mid-layer
7647 * as appropriate.
7648 *
7649 * Return value:
7650 * IPR_RC_JOB_CONTINUE
7651 **/
7652static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7653{
7654 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7655 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007656 struct ipr_config_table_entry_wrapper cfgtew;
7657 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007658 LIST_HEAD(old_res);
7659
7660 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007661 if (ioa_cfg->sis64)
7662 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7663 else
7664 flag = ioa_cfg->u.cfg_table->hdr.flags;
7665
7666 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007667 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7668
7669 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7670 list_move_tail(&res->queue, &old_res);
7671
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007672 if (ioa_cfg->sis64)
Wayne Boyer438b0332010-05-10 09:13:00 -07007673 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007674 else
7675 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7676
7677 for (i = 0; i < entries; i++) {
7678 if (ioa_cfg->sis64)
7679 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7680 else
7681 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007682 found = 0;
7683
7684 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007685 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007686 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7687 found = 1;
7688 break;
7689 }
7690 }
7691
7692 if (!found) {
7693 if (list_empty(&ioa_cfg->free_res_q)) {
7694 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7695 break;
7696 }
7697
7698 found = 1;
7699 res = list_entry(ioa_cfg->free_res_q.next,
7700 struct ipr_resource_entry, queue);
7701 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007702 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007703 res->add_to_ml = 1;
Wayne Boyer56115592010-06-10 14:46:34 -07007704 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7705 res->sdev->allow_restart = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007706
7707 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007708 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007709 }
7710
7711 list_for_each_entry_safe(res, temp, &old_res, queue) {
7712 if (res->sdev) {
7713 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007714 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007715 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007716 }
7717 }
7718
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007719 list_for_each_entry_safe(res, temp, &old_res, queue) {
7720 ipr_clear_res_target(res);
7721 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7722 }
7723
Brian Kingac09c342007-04-26 16:00:16 -05007724 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7725 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7726 else
7727 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007728
7729 LEAVE;
7730 return IPR_RC_JOB_CONTINUE;
7731}
7732
7733/**
7734 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7735 * @ipr_cmd: ipr command struct
7736 *
7737 * This function sends a Query IOA Configuration command
7738 * to the adapter to retrieve the IOA configuration table.
7739 *
7740 * Return value:
7741 * IPR_RC_JOB_RETURN
7742 **/
7743static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7744{
7745 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7746 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007747 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05007748 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007749
7750 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05007751 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7752 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007753 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7754 ucode_vpd->major_release, ucode_vpd->card_type,
7755 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7756 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7757 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7758
7759 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer438b0332010-05-10 09:13:00 -07007760 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007761 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7762 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007763
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007764 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08007765 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007766
7767 ipr_cmd->job_step = ipr_init_res_table;
7768
7769 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7770
7771 LEAVE;
7772 return IPR_RC_JOB_RETURN;
7773}
7774
Gabriel Krisman Bertazi1a47af22015-11-03 16:26:09 -02007775static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7776{
7777 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7778
7779 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7780 return IPR_RC_JOB_CONTINUE;
7781
7782 return ipr_reset_cmd_failed(ipr_cmd);
7783}
7784
7785static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7786 __be32 res_handle, u8 sa_code)
7787{
7788 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7789
7790 ioarcb->res_handle = res_handle;
7791 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7792 ioarcb->cmd_pkt.cdb[1] = sa_code;
7793 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7794}
7795
7796/**
7797 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7798 * action
7799 *
7800 * Return value:
7801 * none
7802 **/
7803static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7804{
7805 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7806 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7807 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7808
7809 ENTER;
7810
7811 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7812
7813 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7814 ipr_build_ioa_service_action(ipr_cmd,
7815 cpu_to_be32(IPR_IOA_RES_HANDLE),
7816 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7817
7818 ioarcb->cmd_pkt.cdb[2] = 0x40;
7819
7820 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7821 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7822 IPR_SET_SUP_DEVICE_TIMEOUT);
7823
7824 LEAVE;
7825 return IPR_RC_JOB_RETURN;
7826 }
7827
7828 LEAVE;
7829 return IPR_RC_JOB_CONTINUE;
7830}
7831
Linus Torvalds1da177e2005-04-16 15:20:36 -07007832/**
7833 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7834 * @ipr_cmd: ipr command struct
7835 *
7836 * This utility function sends an inquiry to the adapter.
7837 *
7838 * Return value:
7839 * none
7840 **/
7841static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08007842 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007843{
7844 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007845
7846 ENTER;
7847 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7848 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7849
7850 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7851 ioarcb->cmd_pkt.cdb[1] = flags;
7852 ioarcb->cmd_pkt.cdb[2] = page;
7853 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7854
Wayne Boyera32c0552010-02-19 13:23:36 -08007855 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007856
7857 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7858 LEAVE;
7859}
7860
7861/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06007862 * ipr_inquiry_page_supported - Is the given inquiry page supported
7863 * @page0: inquiry page 0 buffer
7864 * @page: page code.
7865 *
7866 * This function determines if the specified inquiry page is supported.
7867 *
7868 * Return value:
7869 * 1 if page is supported / 0 if not
7870 **/
7871static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7872{
7873 int i;
7874
7875 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7876 if (page0->page[i] == page)
7877 return 1;
7878
7879 return 0;
7880}
7881
7882/**
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02007883 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7884 * @ipr_cmd: ipr command struct
7885 *
7886 * This function sends a Page 0xC4 inquiry to the adapter
7887 * to retrieve software VPD information.
7888 *
7889 * Return value:
7890 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7891 **/
7892static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
7893{
7894 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7895 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7896 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7897
7898 ENTER;
Gabriel Krisman Bertazi1a47af22015-11-03 16:26:09 -02007899 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02007900 memset(pageC4, 0, sizeof(*pageC4));
7901
7902 if (ipr_inquiry_page_supported(page0, 0xC4)) {
7903 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
7904 (ioa_cfg->vpd_cbs_dma
7905 + offsetof(struct ipr_misc_cbs,
7906 pageC4_data)),
7907 sizeof(struct ipr_inquiry_pageC4));
7908 return IPR_RC_JOB_RETURN;
7909 }
7910
7911 LEAVE;
7912 return IPR_RC_JOB_CONTINUE;
7913}
7914
7915/**
Brian Kingac09c342007-04-26 16:00:16 -05007916 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7917 * @ipr_cmd: ipr command struct
7918 *
7919 * This function sends a Page 0xD0 inquiry to the adapter
7920 * to retrieve adapter capabilities.
7921 *
7922 * Return value:
7923 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7924 **/
7925static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7926{
7927 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7928 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7929 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7930
7931 ENTER;
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02007932 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
Brian Kingac09c342007-04-26 16:00:16 -05007933 memset(cap, 0, sizeof(*cap));
7934
7935 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7936 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7937 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7938 sizeof(struct ipr_inquiry_cap));
7939 return IPR_RC_JOB_RETURN;
7940 }
7941
7942 LEAVE;
7943 return IPR_RC_JOB_CONTINUE;
7944}
7945
7946/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007947 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7948 * @ipr_cmd: ipr command struct
7949 *
7950 * This function sends a Page 3 inquiry to the adapter
7951 * to retrieve software VPD information.
7952 *
7953 * Return value:
7954 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7955 **/
7956static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7957{
7958 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007959
7960 ENTER;
7961
Brian Kingac09c342007-04-26 16:00:16 -05007962 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007963
7964 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7965 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7966 sizeof(struct ipr_inquiry_page3));
7967
7968 LEAVE;
7969 return IPR_RC_JOB_RETURN;
7970}
7971
7972/**
7973 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7974 * @ipr_cmd: ipr command struct
7975 *
7976 * This function sends a Page 0 inquiry to the adapter
7977 * to retrieve supported inquiry pages.
7978 *
7979 * Return value:
7980 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7981 **/
7982static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7983{
7984 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007985 char type[5];
7986
7987 ENTER;
7988
7989 /* Grab the type out of the VPD and store it away */
7990 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7991 type[4] = '\0';
7992 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7993
Brian Kingf688f962014-12-02 12:47:37 -06007994 if (ipr_invalid_adapter(ioa_cfg)) {
7995 dev_err(&ioa_cfg->pdev->dev,
7996 "Adapter not supported in this hardware configuration.\n");
7997
7998 if (!ipr_testmode) {
7999 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8000 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8001 list_add_tail(&ipr_cmd->queue,
8002 &ioa_cfg->hrrq->hrrq_free_q);
8003 return IPR_RC_JOB_RETURN;
8004 }
8005 }
8006
brking@us.ibm.com62275042005-11-01 17:01:14 -06008007 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008008
brking@us.ibm.com62275042005-11-01 17:01:14 -06008009 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8010 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8011 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008012
8013 LEAVE;
8014 return IPR_RC_JOB_RETURN;
8015}
8016
8017/**
8018 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8019 * @ipr_cmd: ipr command struct
8020 *
8021 * This function sends a standard inquiry to the adapter.
8022 *
8023 * Return value:
8024 * IPR_RC_JOB_RETURN
8025 **/
8026static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8027{
8028 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8029
8030 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008031 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008032
8033 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8034 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8035 sizeof(struct ipr_ioa_vpd));
8036
8037 LEAVE;
8038 return IPR_RC_JOB_RETURN;
8039}
8040
8041/**
Wayne Boyer214777b2010-02-19 13:24:26 -08008042 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008043 * @ipr_cmd: ipr command struct
8044 *
8045 * This function send an Identify Host Request Response Queue
8046 * command to establish the HRRQ with the adapter.
8047 *
8048 * Return value:
8049 * IPR_RC_JOB_RETURN
8050 **/
Wayne Boyer214777b2010-02-19 13:24:26 -08008051static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008052{
8053 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8054 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008055 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008056
8057 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008058 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
Brian King87adbe02016-09-16 16:51:37 -05008059 if (ioa_cfg->identify_hrrq_index == 0)
8060 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008061
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008062 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8063 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -07008064
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008065 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8066 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008067
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008068 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8069 if (ioa_cfg->sis64)
8070 ioarcb->cmd_pkt.cdb[1] = 0x1;
8071
8072 if (ioa_cfg->nvectors == 1)
8073 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8074 else
8075 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8076
8077 ioarcb->cmd_pkt.cdb[2] =
8078 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8079 ioarcb->cmd_pkt.cdb[3] =
8080 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8081 ioarcb->cmd_pkt.cdb[4] =
8082 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8083 ioarcb->cmd_pkt.cdb[5] =
8084 ((u64) hrrq->host_rrq_dma) & 0xff;
8085 ioarcb->cmd_pkt.cdb[7] =
8086 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8087 ioarcb->cmd_pkt.cdb[8] =
8088 (sizeof(u32) * hrrq->size) & 0xff;
8089
8090 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008091 ioarcb->cmd_pkt.cdb[9] =
8092 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008093
8094 if (ioa_cfg->sis64) {
8095 ioarcb->cmd_pkt.cdb[10] =
8096 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8097 ioarcb->cmd_pkt.cdb[11] =
8098 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8099 ioarcb->cmd_pkt.cdb[12] =
8100 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8101 ioarcb->cmd_pkt.cdb[13] =
8102 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8103 }
8104
8105 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008106 ioarcb->cmd_pkt.cdb[14] =
8107 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008108
8109 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8110 IPR_INTERNAL_TIMEOUT);
8111
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008112 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8113 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008114
8115 LEAVE;
8116 return IPR_RC_JOB_RETURN;
Wayne Boyer214777b2010-02-19 13:24:26 -08008117 }
8118
Linus Torvalds1da177e2005-04-16 15:20:36 -07008119 LEAVE;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008120 return IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008121}
8122
8123/**
8124 * ipr_reset_timer_done - Adapter reset timer function
8125 * @ipr_cmd: ipr command struct
8126 *
8127 * Description: This function is used in adapter reset processing
8128 * for timing events. If the reset_cmd pointer in the IOA
8129 * config struct is not this adapter's we are doing nested
8130 * resets and fail_all_ops will take care of freeing the
8131 * command block.
8132 *
8133 * Return value:
8134 * none
8135 **/
8136static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8137{
8138 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8139 unsigned long lock_flags = 0;
8140
8141 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8142
8143 if (ioa_cfg->reset_cmd == ipr_cmd) {
8144 list_del(&ipr_cmd->queue);
8145 ipr_cmd->done(ipr_cmd);
8146 }
8147
8148 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8149}
8150
8151/**
8152 * ipr_reset_start_timer - Start a timer for adapter reset job
8153 * @ipr_cmd: ipr command struct
8154 * @timeout: timeout value
8155 *
8156 * Description: This function is used in adapter reset processing
8157 * for timing events. If the reset_cmd pointer in the IOA
8158 * config struct is not this adapter's we are doing nested
8159 * resets and fail_all_ops will take care of freeing the
8160 * command block.
8161 *
8162 * Return value:
8163 * none
8164 **/
8165static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8166 unsigned long timeout)
8167{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008168
8169 ENTER;
8170 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008171 ipr_cmd->done = ipr_reset_ioa_job;
8172
8173 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8174 ipr_cmd->timer.expires = jiffies + timeout;
8175 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8176 add_timer(&ipr_cmd->timer);
8177}
8178
8179/**
8180 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8181 * @ioa_cfg: ioa cfg struct
8182 *
8183 * Return value:
8184 * nothing
8185 **/
8186static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8187{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008188 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008189
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008190 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008191 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008192 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8193
8194 /* Initialize Host RRQ pointers */
8195 hrrq->hrrq_start = hrrq->host_rrq;
8196 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8197 hrrq->hrrq_curr = hrrq->hrrq_start;
8198 hrrq->toggle_bit = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008199 spin_unlock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008200 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008201 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008202
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008203 ioa_cfg->identify_hrrq_index = 0;
8204 if (ioa_cfg->hrrq_num == 1)
8205 atomic_set(&ioa_cfg->hrrq_index, 0);
8206 else
8207 atomic_set(&ioa_cfg->hrrq_index, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008208
8209 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008210 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008211}
8212
8213/**
Wayne Boyer214777b2010-02-19 13:24:26 -08008214 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8215 * @ipr_cmd: ipr command struct
8216 *
8217 * Return value:
8218 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8219 **/
8220static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8221{
8222 unsigned long stage, stage_time;
8223 u32 feedback;
8224 volatile u32 int_reg;
8225 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8226 u64 maskval = 0;
8227
8228 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8229 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8230 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8231
8232 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8233
8234 /* sanity check the stage_time value */
Wayne Boyer438b0332010-05-10 09:13:00 -07008235 if (stage_time == 0)
8236 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8237 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
Wayne Boyer214777b2010-02-19 13:24:26 -08008238 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8239 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8240 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8241
8242 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8243 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8244 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8245 stage_time = ioa_cfg->transop_timeout;
8246 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8247 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
Wayne Boyer1df79ca2010-07-14 10:49:43 -07008248 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8249 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8250 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8251 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8252 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8253 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8254 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8255 return IPR_RC_JOB_CONTINUE;
8256 }
Wayne Boyer214777b2010-02-19 13:24:26 -08008257 }
8258
8259 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8260 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8261 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8262 ipr_cmd->done = ipr_reset_ioa_job;
8263 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008264
8265 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Wayne Boyer214777b2010-02-19 13:24:26 -08008266
8267 return IPR_RC_JOB_RETURN;
8268}
8269
8270/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008271 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8272 * @ipr_cmd: ipr command struct
8273 *
8274 * This function reinitializes some control blocks and
8275 * enables destructive diagnostics on the adapter.
8276 *
8277 * Return value:
8278 * IPR_RC_JOB_RETURN
8279 **/
8280static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8281{
8282 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8283 volatile u32 int_reg;
Wayne Boyer7be96902010-05-10 09:14:07 -07008284 volatile u64 maskval;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008285 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008286
8287 ENTER;
Wayne Boyer214777b2010-02-19 13:24:26 -08008288 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008289 ipr_init_ioa_mem(ioa_cfg);
8290
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008291 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8292 spin_lock(&ioa_cfg->hrrq[i]._lock);
8293 ioa_cfg->hrrq[i].allow_interrupts = 1;
8294 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8295 }
8296 wmb();
Wayne Boyer8701f182010-06-04 10:26:50 -07008297 if (ioa_cfg->sis64) {
8298 /* Set the adapter to the correct endian mode. */
8299 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8300 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8301 }
8302
Wayne Boyer7be96902010-05-10 09:14:07 -07008303 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008304
8305 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8306 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
Wayne Boyer214777b2010-02-19 13:24:26 -08008307 ioa_cfg->regs.clr_interrupt_mask_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008308 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8309 return IPR_RC_JOB_CONTINUE;
8310 }
8311
8312 /* Enable destructive diagnostics on IOA */
Wayne Boyer214777b2010-02-19 13:24:26 -08008313 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008314
Wayne Boyer7be96902010-05-10 09:14:07 -07008315 if (ioa_cfg->sis64) {
8316 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8317 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8318 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8319 } else
8320 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer214777b2010-02-19 13:24:26 -08008321
Linus Torvalds1da177e2005-04-16 15:20:36 -07008322 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8323
8324 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8325
Wayne Boyer214777b2010-02-19 13:24:26 -08008326 if (ioa_cfg->sis64) {
8327 ipr_cmd->job_step = ipr_reset_next_stage;
8328 return IPR_RC_JOB_CONTINUE;
8329 }
8330
Linus Torvalds1da177e2005-04-16 15:20:36 -07008331 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
Brian King5469cb52007-03-29 12:42:40 -05008332 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008333 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8334 ipr_cmd->done = ipr_reset_ioa_job;
8335 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008336 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008337
8338 LEAVE;
8339 return IPR_RC_JOB_RETURN;
8340}
8341
8342/**
8343 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8344 * @ipr_cmd: ipr command struct
8345 *
8346 * This function is invoked when an adapter dump has run out
8347 * of processing time.
8348 *
8349 * Return value:
8350 * IPR_RC_JOB_CONTINUE
8351 **/
8352static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8353{
8354 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8355
8356 if (ioa_cfg->sdt_state == GET_DUMP)
Brian King41e9a692011-09-21 08:51:11 -05008357 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8358 else if (ioa_cfg->sdt_state == READ_DUMP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008359 ioa_cfg->sdt_state = ABORT_DUMP;
8360
Brian King4c647e92011-10-15 09:08:56 -05008361 ioa_cfg->dump_timeout = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008362 ipr_cmd->job_step = ipr_reset_alert;
8363
8364 return IPR_RC_JOB_CONTINUE;
8365}
8366
8367/**
8368 * ipr_unit_check_no_data - Log a unit check/no data error log
8369 * @ioa_cfg: ioa config struct
8370 *
8371 * Logs an error indicating the adapter unit checked, but for some
8372 * reason, we were unable to fetch the unit check buffer.
8373 *
8374 * Return value:
8375 * nothing
8376 **/
8377static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8378{
8379 ioa_cfg->errors_logged++;
8380 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8381}
8382
8383/**
8384 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8385 * @ioa_cfg: ioa config struct
8386 *
8387 * Fetches the unit check buffer from the adapter by clocking the data
8388 * through the mailbox register.
8389 *
8390 * Return value:
8391 * nothing
8392 **/
8393static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8394{
8395 unsigned long mailbox;
8396 struct ipr_hostrcb *hostrcb;
8397 struct ipr_uc_sdt sdt;
8398 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05008399 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008400
8401 mailbox = readl(ioa_cfg->ioa_mailbox);
8402
Wayne Boyerdcbad002010-02-19 13:24:14 -08008403 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008404 ipr_unit_check_no_data(ioa_cfg);
8405 return;
8406 }
8407
8408 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8409 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8410 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8411
Wayne Boyerdcbad002010-02-19 13:24:14 -08008412 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8413 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8414 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008415 ipr_unit_check_no_data(ioa_cfg);
8416 return;
8417 }
8418
8419 /* Find length of the first sdt entry (UC buffer) */
Wayne Boyerdcbad002010-02-19 13:24:14 -08008420 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8421 length = be32_to_cpu(sdt.entry[0].end_token);
8422 else
8423 length = (be32_to_cpu(sdt.entry[0].end_token) -
8424 be32_to_cpu(sdt.entry[0].start_token)) &
8425 IPR_FMT2_MBX_ADDR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008426
8427 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8428 struct ipr_hostrcb, queue);
Brian Kingafc3f832016-08-24 12:56:51 -05008429 list_del_init(&hostrcb->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008430 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8431
8432 rc = ipr_get_ldump_data_section(ioa_cfg,
Wayne Boyerdcbad002010-02-19 13:24:14 -08008433 be32_to_cpu(sdt.entry[0].start_token),
Linus Torvalds1da177e2005-04-16 15:20:36 -07008434 (__be32 *)&hostrcb->hcam,
8435 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8436
Brian King65f56472007-04-26 16:00:12 -05008437 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008438 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08008439 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05008440 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8441 ioa_cfg->sdt_state == GET_DUMP)
8442 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8443 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07008444 ipr_unit_check_no_data(ioa_cfg);
8445
8446 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8447}
8448
8449/**
Wayne Boyer110def82010-11-04 09:36:16 -07008450 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8451 * @ipr_cmd: ipr command struct
8452 *
8453 * Description: This function will call to get the unit check buffer.
8454 *
8455 * Return value:
8456 * IPR_RC_JOB_RETURN
8457 **/
8458static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8459{
8460 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8461
8462 ENTER;
8463 ioa_cfg->ioa_unit_checked = 0;
8464 ipr_get_unit_check_buffer(ioa_cfg);
8465 ipr_cmd->job_step = ipr_reset_alert;
8466 ipr_reset_start_timer(ipr_cmd, 0);
8467
8468 LEAVE;
8469 return IPR_RC_JOB_RETURN;
8470}
8471
Gabriel Krisman Bertazif41f1d92015-11-03 16:26:06 -02008472static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8473{
8474 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8475
8476 ENTER;
8477
8478 if (ioa_cfg->sdt_state != GET_DUMP)
8479 return IPR_RC_JOB_RETURN;
8480
8481 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8482 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8483 IPR_PCII_MAILBOX_STABLE)) {
8484
8485 if (!ipr_cmd->u.time_left)
8486 dev_err(&ioa_cfg->pdev->dev,
8487 "Timed out waiting for Mailbox register.\n");
8488
8489 ioa_cfg->sdt_state = READ_DUMP;
8490 ioa_cfg->dump_timeout = 0;
8491 if (ioa_cfg->sis64)
8492 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8493 else
8494 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8495 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8496 schedule_work(&ioa_cfg->work_q);
8497
8498 } else {
8499 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8500 ipr_reset_start_timer(ipr_cmd,
8501 IPR_CHECK_FOR_RESET_TIMEOUT);
8502 }
8503
8504 LEAVE;
8505 return IPR_RC_JOB_RETURN;
8506}
8507
Wayne Boyer110def82010-11-04 09:36:16 -07008508/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008509 * ipr_reset_restore_cfg_space - Restore PCI config space.
8510 * @ipr_cmd: ipr command struct
8511 *
8512 * Description: This function restores the saved PCI config space of
8513 * the adapter, fails all outstanding ops back to the callers, and
8514 * fetches the dump/unit check if applicable to this reset.
8515 *
8516 * Return value:
8517 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8518 **/
8519static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8520{
8521 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer630ad8312011-04-07 12:12:30 -07008522 u32 int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008523
8524 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02008525 ioa_cfg->pdev->state_saved = true;
Jon Mason1d3c16a2010-11-30 17:43:26 -06008526 pci_restore_state(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008527
8528 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
Wayne Boyer96d21f02010-05-10 09:13:27 -07008529 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008530 return IPR_RC_JOB_CONTINUE;
8531 }
8532
8533 ipr_fail_all_ops(ioa_cfg);
8534
Wayne Boyer8701f182010-06-04 10:26:50 -07008535 if (ioa_cfg->sis64) {
8536 /* Set the adapter to the correct endian mode. */
8537 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8538 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8539 }
8540
Linus Torvalds1da177e2005-04-16 15:20:36 -07008541 if (ioa_cfg->ioa_unit_checked) {
Wayne Boyer110def82010-11-04 09:36:16 -07008542 if (ioa_cfg->sis64) {
8543 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8544 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8545 return IPR_RC_JOB_RETURN;
8546 } else {
8547 ioa_cfg->ioa_unit_checked = 0;
8548 ipr_get_unit_check_buffer(ioa_cfg);
8549 ipr_cmd->job_step = ipr_reset_alert;
8550 ipr_reset_start_timer(ipr_cmd, 0);
8551 return IPR_RC_JOB_RETURN;
8552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008553 }
8554
8555 if (ioa_cfg->in_ioa_bringdown) {
8556 ipr_cmd->job_step = ipr_ioa_bringdown_done;
Gabriel Krisman Bertazif41f1d92015-11-03 16:26:06 -02008557 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8558 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8559 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008560 } else {
8561 ipr_cmd->job_step = ipr_reset_enable_ioa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008562 }
8563
Wayne Boyer438b0332010-05-10 09:13:00 -07008564 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008565 return IPR_RC_JOB_CONTINUE;
8566}
8567
8568/**
Brian Kinge619e1a2007-01-23 11:25:37 -06008569 * ipr_reset_bist_done - BIST has completed on the adapter.
8570 * @ipr_cmd: ipr command struct
8571 *
8572 * Description: Unblock config space and resume the reset process.
8573 *
8574 * Return value:
8575 * IPR_RC_JOB_CONTINUE
8576 **/
8577static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8578{
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008579 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8580
Brian Kinge619e1a2007-01-23 11:25:37 -06008581 ENTER;
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008582 if (ioa_cfg->cfg_locked)
8583 pci_cfg_access_unlock(ioa_cfg->pdev);
8584 ioa_cfg->cfg_locked = 0;
Brian Kinge619e1a2007-01-23 11:25:37 -06008585 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8586 LEAVE;
8587 return IPR_RC_JOB_CONTINUE;
8588}
8589
8590/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008591 * ipr_reset_start_bist - Run BIST on the adapter.
8592 * @ipr_cmd: ipr command struct
8593 *
8594 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8595 *
8596 * Return value:
8597 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8598 **/
8599static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8600{
8601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008602 int rc = PCIBIOS_SUCCESSFUL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008603
8604 ENTER;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008605 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8606 writel(IPR_UPROCI_SIS64_START_BIST,
8607 ioa_cfg->regs.set_uproc_interrupt_reg32);
8608 else
8609 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8610
8611 if (rc == PCIBIOS_SUCCESSFUL) {
Brian Kinge619e1a2007-01-23 11:25:37 -06008612 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008613 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8614 rc = IPR_RC_JOB_RETURN;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008615 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008616 if (ioa_cfg->cfg_locked)
8617 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8618 ioa_cfg->cfg_locked = 0;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008619 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8620 rc = IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008621 }
8622
8623 LEAVE;
8624 return rc;
8625}
8626
8627/**
Brian King463fc692007-05-07 17:09:05 -05008628 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8629 * @ipr_cmd: ipr command struct
8630 *
8631 * Description: This clears PCI reset to the adapter and delays two seconds.
8632 *
8633 * Return value:
8634 * IPR_RC_JOB_RETURN
8635 **/
8636static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8637{
8638 ENTER;
Brian King463fc692007-05-07 17:09:05 -05008639 ipr_cmd->job_step = ipr_reset_bist_done;
8640 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8641 LEAVE;
8642 return IPR_RC_JOB_RETURN;
8643}
8644
8645/**
Brian King2796ca52015-03-26 11:23:52 -05008646 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8647 * @work: work struct
8648 *
8649 * Description: This pulses warm reset to a slot.
8650 *
8651 **/
8652static void ipr_reset_reset_work(struct work_struct *work)
8653{
8654 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8655 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8656 struct pci_dev *pdev = ioa_cfg->pdev;
8657 unsigned long lock_flags = 0;
8658
8659 ENTER;
8660 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8661 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8662 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8663
8664 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8665 if (ioa_cfg->reset_cmd == ipr_cmd)
8666 ipr_reset_ioa_job(ipr_cmd);
8667 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8668 LEAVE;
8669}
8670
8671/**
Brian King463fc692007-05-07 17:09:05 -05008672 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8673 * @ipr_cmd: ipr command struct
8674 *
8675 * Description: This asserts PCI reset to the adapter.
8676 *
8677 * Return value:
8678 * IPR_RC_JOB_RETURN
8679 **/
8680static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8681{
8682 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Brian King463fc692007-05-07 17:09:05 -05008683
8684 ENTER;
Brian King2796ca52015-03-26 11:23:52 -05008685 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8686 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
Brian King463fc692007-05-07 17:09:05 -05008687 ipr_cmd->job_step = ipr_reset_slot_reset_done;
Brian King463fc692007-05-07 17:09:05 -05008688 LEAVE;
8689 return IPR_RC_JOB_RETURN;
8690}
8691
8692/**
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008693 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8694 * @ipr_cmd: ipr command struct
8695 *
8696 * Description: This attempts to block config access to the IOA.
8697 *
8698 * Return value:
8699 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8700 **/
8701static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8702{
8703 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8704 int rc = IPR_RC_JOB_CONTINUE;
8705
8706 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8707 ioa_cfg->cfg_locked = 1;
8708 ipr_cmd->job_step = ioa_cfg->reset;
8709 } else {
8710 if (ipr_cmd->u.time_left) {
8711 rc = IPR_RC_JOB_RETURN;
8712 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8713 ipr_reset_start_timer(ipr_cmd,
8714 IPR_CHECK_FOR_RESET_TIMEOUT);
8715 } else {
8716 ipr_cmd->job_step = ioa_cfg->reset;
8717 dev_err(&ioa_cfg->pdev->dev,
8718 "Timed out waiting to lock config access. Resetting anyway.\n");
8719 }
8720 }
8721
8722 return rc;
8723}
8724
8725/**
8726 * ipr_reset_block_config_access - Block config access to the IOA
8727 * @ipr_cmd: ipr command struct
8728 *
8729 * Description: This attempts to block config access to the IOA
8730 *
8731 * Return value:
8732 * IPR_RC_JOB_CONTINUE
8733 **/
8734static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8735{
8736 ipr_cmd->ioa_cfg->cfg_locked = 0;
8737 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8738 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8739 return IPR_RC_JOB_CONTINUE;
8740}
8741
8742/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008743 * ipr_reset_allowed - Query whether or not IOA can be reset
8744 * @ioa_cfg: ioa config struct
8745 *
8746 * Return value:
8747 * 0 if reset not allowed / non-zero if reset is allowed
8748 **/
8749static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8750{
8751 volatile u32 temp_reg;
8752
8753 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8754 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8755}
8756
8757/**
8758 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8759 * @ipr_cmd: ipr command struct
8760 *
8761 * Description: This function waits for adapter permission to run BIST,
8762 * then runs BIST. If the adapter does not give permission after a
8763 * reasonable time, we will reset the adapter anyway. The impact of
8764 * resetting the adapter without warning the adapter is the risk of
8765 * losing the persistent error log on the adapter. If the adapter is
8766 * reset while it is writing to the flash on the adapter, the flash
8767 * segment will have bad ECC and be zeroed.
8768 *
8769 * Return value:
8770 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8771 **/
8772static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8773{
8774 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8775 int rc = IPR_RC_JOB_RETURN;
8776
8777 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8778 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8779 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8780 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008781 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008782 rc = IPR_RC_JOB_CONTINUE;
8783 }
8784
8785 return rc;
8786}
8787
8788/**
Wayne Boyer8701f182010-06-04 10:26:50 -07008789 * ipr_reset_alert - Alert the adapter of a pending reset
Linus Torvalds1da177e2005-04-16 15:20:36 -07008790 * @ipr_cmd: ipr command struct
8791 *
8792 * Description: This function alerts the adapter that it will be reset.
8793 * If memory space is not currently enabled, proceed directly
8794 * to running BIST on the adapter. The timer must always be started
8795 * so we guarantee we do not run BIST from ipr_isr.
8796 *
8797 * Return value:
8798 * IPR_RC_JOB_RETURN
8799 **/
8800static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8801{
8802 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8803 u16 cmd_reg;
8804 int rc;
8805
8806 ENTER;
8807 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8808
8809 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8810 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
Wayne Boyer214777b2010-02-19 13:24:26 -08008811 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008812 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8813 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008814 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008815 }
8816
8817 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8818 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8819
8820 LEAVE;
8821 return IPR_RC_JOB_RETURN;
8822}
8823
8824/**
Brian King4fdd7c72015-03-26 11:23:50 -05008825 * ipr_reset_quiesce_done - Complete IOA disconnect
8826 * @ipr_cmd: ipr command struct
8827 *
8828 * Description: Freeze the adapter to complete quiesce processing
8829 *
8830 * Return value:
8831 * IPR_RC_JOB_CONTINUE
8832 **/
8833static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8834{
8835 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8836
8837 ENTER;
8838 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8839 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8840 LEAVE;
8841 return IPR_RC_JOB_CONTINUE;
8842}
8843
8844/**
8845 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8846 * @ipr_cmd: ipr command struct
8847 *
8848 * Description: Ensure nothing is outstanding to the IOA and
8849 * proceed with IOA disconnect. Otherwise reset the IOA.
8850 *
8851 * Return value:
8852 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8853 **/
8854static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8855{
8856 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8857 struct ipr_cmnd *loop_cmd;
8858 struct ipr_hrr_queue *hrrq;
8859 int rc = IPR_RC_JOB_CONTINUE;
8860 int count = 0;
8861
8862 ENTER;
8863 ipr_cmd->job_step = ipr_reset_quiesce_done;
8864
8865 for_each_hrrq(hrrq, ioa_cfg) {
8866 spin_lock(&hrrq->_lock);
8867 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8868 count++;
8869 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8870 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8871 rc = IPR_RC_JOB_RETURN;
8872 break;
8873 }
8874 spin_unlock(&hrrq->_lock);
8875
8876 if (count)
8877 break;
8878 }
8879
8880 LEAVE;
8881 return rc;
8882}
8883
8884/**
8885 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8886 * @ipr_cmd: ipr command struct
8887 *
8888 * Description: Cancel any oustanding HCAMs to the IOA.
8889 *
8890 * Return value:
8891 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8892 **/
8893static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8894{
8895 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8896 int rc = IPR_RC_JOB_CONTINUE;
8897 struct ipr_cmd_pkt *cmd_pkt;
8898 struct ipr_cmnd *hcam_cmd;
8899 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8900
8901 ENTER;
8902 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8903
8904 if (!hrrq->ioa_is_dead) {
8905 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8906 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8907 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8908 continue;
8909
8910 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8911 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8912 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8913 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8914 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8915 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8916 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8917 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8918 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8919 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8920 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8921 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8922 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8923 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8924
8925 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8926 IPR_CANCEL_TIMEOUT);
8927
8928 rc = IPR_RC_JOB_RETURN;
8929 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8930 break;
8931 }
8932 }
8933 } else
8934 ipr_cmd->job_step = ipr_reset_alert;
8935
8936 LEAVE;
8937 return rc;
8938}
8939
8940/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008941 * ipr_reset_ucode_download_done - Microcode download completion
8942 * @ipr_cmd: ipr command struct
8943 *
8944 * Description: This function unmaps the microcode download buffer.
8945 *
8946 * Return value:
8947 * IPR_RC_JOB_CONTINUE
8948 **/
8949static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8950{
8951 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8952 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8953
Anton Blanchardd73341b2014-10-30 17:27:08 -05008954 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008955 sglist->num_sg, DMA_TO_DEVICE);
8956
8957 ipr_cmd->job_step = ipr_reset_alert;
8958 return IPR_RC_JOB_CONTINUE;
8959}
8960
8961/**
8962 * ipr_reset_ucode_download - Download microcode to the adapter
8963 * @ipr_cmd: ipr command struct
8964 *
8965 * Description: This function checks to see if it there is microcode
8966 * to download to the adapter. If there is, a download is performed.
8967 *
8968 * Return value:
8969 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8970 **/
8971static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8972{
8973 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8974 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8975
8976 ENTER;
8977 ipr_cmd->job_step = ipr_reset_alert;
8978
8979 if (!sglist)
8980 return IPR_RC_JOB_CONTINUE;
8981
8982 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8983 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8984 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8985 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8986 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8987 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8988 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8989
Wayne Boyera32c0552010-02-19 13:23:36 -08008990 if (ioa_cfg->sis64)
8991 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8992 else
8993 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008994 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8995
8996 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8997 IPR_WRITE_BUFFER_TIMEOUT);
8998
8999 LEAVE;
9000 return IPR_RC_JOB_RETURN;
9001}
9002
9003/**
9004 * ipr_reset_shutdown_ioa - Shutdown the adapter
9005 * @ipr_cmd: ipr command struct
9006 *
9007 * Description: This function issues an adapter shutdown of the
9008 * specified type to the specified adapter as part of the
9009 * adapter reset job.
9010 *
9011 * Return value:
9012 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9013 **/
9014static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9015{
9016 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9017 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9018 unsigned long timeout;
9019 int rc = IPR_RC_JOB_CONTINUE;
9020
9021 ENTER;
Brian King4fdd7c72015-03-26 11:23:50 -05009022 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9023 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9024 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009025 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009026 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9027 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9028 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9029 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9030
Brian Kingac09c342007-04-26 16:00:16 -05009031 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9032 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009033 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9034 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05009035 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9036 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009037 else
Brian Kingac09c342007-04-26 16:00:16 -05009038 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009039
9040 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9041
9042 rc = IPR_RC_JOB_RETURN;
9043 ipr_cmd->job_step = ipr_reset_ucode_download;
9044 } else
9045 ipr_cmd->job_step = ipr_reset_alert;
9046
9047 LEAVE;
9048 return rc;
9049}
9050
9051/**
9052 * ipr_reset_ioa_job - Adapter reset job
9053 * @ipr_cmd: ipr command struct
9054 *
9055 * Description: This function is the job router for the adapter reset job.
9056 *
9057 * Return value:
9058 * none
9059 **/
9060static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9061{
9062 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009063 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9064
9065 do {
Wayne Boyer96d21f02010-05-10 09:13:27 -07009066 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009067
9068 if (ioa_cfg->reset_cmd != ipr_cmd) {
9069 /*
9070 * We are doing nested adapter resets and this is
9071 * not the current reset job.
9072 */
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009073 list_add_tail(&ipr_cmd->queue,
9074 &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009075 return;
9076 }
9077
9078 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06009079 rc = ipr_cmd->job_step_failed(ipr_cmd);
9080 if (rc == IPR_RC_JOB_RETURN)
9081 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009082 }
9083
9084 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06009085 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009086 rc = ipr_cmd->job_step(ipr_cmd);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009087 } while (rc == IPR_RC_JOB_CONTINUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009088}
9089
9090/**
9091 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9092 * @ioa_cfg: ioa config struct
9093 * @job_step: first job step of reset job
9094 * @shutdown_type: shutdown type
9095 *
9096 * Description: This function will initiate the reset of the given adapter
9097 * starting at the selected job step.
9098 * If the caller needs to wait on the completion of the reset,
9099 * the caller must sleep on the reset_wait_q.
9100 *
9101 * Return value:
9102 * none
9103 **/
9104static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9105 int (*job_step) (struct ipr_cmnd *),
9106 enum ipr_shutdown_type shutdown_type)
9107{
9108 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009109 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009110
9111 ioa_cfg->in_reset_reload = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009112 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9113 spin_lock(&ioa_cfg->hrrq[i]._lock);
9114 ioa_cfg->hrrq[i].allow_cmds = 0;
9115 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9116 }
9117 wmb();
Brian Kingbfae7822013-01-30 23:45:08 -06009118 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
9119 scsi_block_requests(ioa_cfg->host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009120
9121 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9122 ioa_cfg->reset_cmd = ipr_cmd;
9123 ipr_cmd->job_step = job_step;
9124 ipr_cmd->u.shutdown_type = shutdown_type;
9125
9126 ipr_reset_ioa_job(ipr_cmd);
9127}
9128
9129/**
9130 * ipr_initiate_ioa_reset - Initiate an adapter reset
9131 * @ioa_cfg: ioa config struct
9132 * @shutdown_type: shutdown type
9133 *
9134 * Description: This function will initiate the reset of the given adapter.
9135 * If the caller needs to wait on the completion of the reset,
9136 * the caller must sleep on the reset_wait_q.
9137 *
9138 * Return value:
9139 * none
9140 **/
9141static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9142 enum ipr_shutdown_type shutdown_type)
9143{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009144 int i;
9145
9146 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009147 return;
9148
Brian King41e9a692011-09-21 08:51:11 -05009149 if (ioa_cfg->in_reset_reload) {
9150 if (ioa_cfg->sdt_state == GET_DUMP)
9151 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9152 else if (ioa_cfg->sdt_state == READ_DUMP)
9153 ioa_cfg->sdt_state = ABORT_DUMP;
9154 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009155
9156 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9157 dev_err(&ioa_cfg->pdev->dev,
9158 "IOA taken offline - error recovery failed\n");
9159
9160 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009161 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9162 spin_lock(&ioa_cfg->hrrq[i]._lock);
9163 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9164 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9165 }
9166 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07009167
9168 if (ioa_cfg->in_ioa_bringdown) {
9169 ioa_cfg->reset_cmd = NULL;
9170 ioa_cfg->in_reset_reload = 0;
9171 ipr_fail_all_ops(ioa_cfg);
9172 wake_up_all(&ioa_cfg->reset_wait_q);
9173
Brian Kingbfae7822013-01-30 23:45:08 -06009174 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9175 spin_unlock_irq(ioa_cfg->host->host_lock);
9176 scsi_unblock_requests(ioa_cfg->host);
9177 spin_lock_irq(ioa_cfg->host->host_lock);
9178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009179 return;
9180 } else {
9181 ioa_cfg->in_ioa_bringdown = 1;
9182 shutdown_type = IPR_SHUTDOWN_NONE;
9183 }
9184 }
9185
9186 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9187 shutdown_type);
9188}
9189
9190/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009191 * ipr_reset_freeze - Hold off all I/O activity
9192 * @ipr_cmd: ipr command struct
9193 *
9194 * Description: If the PCI slot is frozen, hold off all I/O
9195 * activity; then, as soon as the slot is available again,
9196 * initiate an adapter reset.
9197 */
9198static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9199{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009200 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9201 int i;
9202
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009203 /* Disallow new interrupts, avoid loop */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009204 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9205 spin_lock(&ioa_cfg->hrrq[i]._lock);
9206 ioa_cfg->hrrq[i].allow_interrupts = 0;
9207 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9208 }
9209 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009210 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009211 ipr_cmd->done = ipr_reset_ioa_job;
9212 return IPR_RC_JOB_RETURN;
9213}
9214
9215/**
Brian King6270e592014-01-21 12:16:41 -06009216 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9217 * @pdev: PCI device struct
9218 *
9219 * Description: This routine is called to tell us that the MMIO
9220 * access to the IOA has been restored
9221 */
9222static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9223{
9224 unsigned long flags = 0;
9225 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9226
9227 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9228 if (!ioa_cfg->probe_done)
9229 pci_save_state(pdev);
9230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9231 return PCI_ERS_RESULT_NEED_RESET;
9232}
9233
9234/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009235 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9236 * @pdev: PCI device struct
9237 *
9238 * Description: This routine is called to tell us that the PCI bus
9239 * is down. Can't do anything here, except put the device driver
9240 * into a holding pattern, waiting for the PCI bus to come back.
9241 */
9242static void ipr_pci_frozen(struct pci_dev *pdev)
9243{
9244 unsigned long flags = 0;
9245 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9246
9247 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009248 if (ioa_cfg->probe_done)
9249 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009250 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9251}
9252
9253/**
9254 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9255 * @pdev: PCI device struct
9256 *
9257 * Description: This routine is called by the pci error recovery
9258 * code after the PCI slot has been reset, just before we
9259 * should resume normal operations.
9260 */
9261static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9262{
9263 unsigned long flags = 0;
9264 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9265
9266 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009267 if (ioa_cfg->probe_done) {
9268 if (ioa_cfg->needs_warm_reset)
9269 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9270 else
9271 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9272 IPR_SHUTDOWN_NONE);
9273 } else
9274 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9276 return PCI_ERS_RESULT_RECOVERED;
9277}
9278
9279/**
9280 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9281 * @pdev: PCI device struct
9282 *
9283 * Description: This routine is called when the PCI bus has
9284 * permanently failed.
9285 */
9286static void ipr_pci_perm_failure(struct pci_dev *pdev)
9287{
9288 unsigned long flags = 0;
9289 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009290 int i;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009291
9292 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009293 if (ioa_cfg->probe_done) {
9294 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9295 ioa_cfg->sdt_state = ABORT_DUMP;
9296 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9297 ioa_cfg->in_ioa_bringdown = 1;
9298 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9299 spin_lock(&ioa_cfg->hrrq[i]._lock);
9300 ioa_cfg->hrrq[i].allow_cmds = 0;
9301 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9302 }
9303 wmb();
9304 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9305 } else
9306 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009307 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9308}
9309
9310/**
9311 * ipr_pci_error_detected - Called when a PCI error is detected.
9312 * @pdev: PCI device struct
9313 * @state: PCI channel state
9314 *
9315 * Description: Called when a PCI error is detected.
9316 *
9317 * Return value:
9318 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9319 */
9320static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9321 pci_channel_state_t state)
9322{
9323 switch (state) {
9324 case pci_channel_io_frozen:
9325 ipr_pci_frozen(pdev);
Brian King6270e592014-01-21 12:16:41 -06009326 return PCI_ERS_RESULT_CAN_RECOVER;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009327 case pci_channel_io_perm_failure:
9328 ipr_pci_perm_failure(pdev);
9329 return PCI_ERS_RESULT_DISCONNECT;
9330 break;
9331 default:
9332 break;
9333 }
9334 return PCI_ERS_RESULT_NEED_RESET;
9335}
9336
9337/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009338 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9339 * @ioa_cfg: ioa cfg struct
9340 *
9341 * Description: This is the second phase of adapter intialization
9342 * This function takes care of initilizing the adapter to the point
9343 * where it can accept new commands.
9344
9345 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02009346 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009347 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009348static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009349{
9350 int rc = 0;
9351 unsigned long host_lock_flags = 0;
9352
9353 ENTER;
9354 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9355 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
Brian King6270e592014-01-21 12:16:41 -06009356 ioa_cfg->probe_done = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009357 if (ioa_cfg->needs_hard_reset) {
9358 ioa_cfg->needs_hard_reset = 0;
9359 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9360 } else
9361 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9362 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009363 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009364
9365 LEAVE;
9366 return rc;
9367}
9368
9369/**
9370 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9371 * @ioa_cfg: ioa config struct
9372 *
9373 * Return value:
9374 * none
9375 **/
9376static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9377{
9378 int i;
9379
Brian Kinga65e8f12015-03-26 11:23:55 -05009380 if (ioa_cfg->ipr_cmnd_list) {
9381 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9382 if (ioa_cfg->ipr_cmnd_list[i])
9383 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9384 ioa_cfg->ipr_cmnd_list[i],
9385 ioa_cfg->ipr_cmnd_list_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009386
Brian Kinga65e8f12015-03-26 11:23:55 -05009387 ioa_cfg->ipr_cmnd_list[i] = NULL;
9388 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009389 }
9390
9391 if (ioa_cfg->ipr_cmd_pool)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009392 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009393
Brian King89aad422012-03-14 21:20:10 -05009394 kfree(ioa_cfg->ipr_cmnd_list);
9395 kfree(ioa_cfg->ipr_cmnd_list_dma);
9396 ioa_cfg->ipr_cmnd_list = NULL;
9397 ioa_cfg->ipr_cmnd_list_dma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009398 ioa_cfg->ipr_cmd_pool = NULL;
9399}
9400
9401/**
9402 * ipr_free_mem - Frees memory allocated for an adapter
9403 * @ioa_cfg: ioa cfg struct
9404 *
9405 * Return value:
9406 * nothing
9407 **/
9408static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9409{
9410 int i;
9411
9412 kfree(ioa_cfg->res_entries);
Anton Blanchardd73341b2014-10-30 17:27:08 -05009413 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9414 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009415 ipr_free_cmd_blks(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009416
9417 for (i = 0; i < ioa_cfg->hrrq_num; i++)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009418 dma_free_coherent(&ioa_cfg->pdev->dev,
9419 sizeof(u32) * ioa_cfg->hrrq[i].size,
9420 ioa_cfg->hrrq[i].host_rrq,
9421 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009422
Anton Blanchardd73341b2014-10-30 17:27:08 -05009423 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9424 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009425
Brian Kingafc3f832016-08-24 12:56:51 -05009426 for (i = 0; i < IPR_MAX_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009427 dma_free_coherent(&ioa_cfg->pdev->dev,
9428 sizeof(struct ipr_hostrcb),
9429 ioa_cfg->hostrcb[i],
9430 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009431 }
9432
9433 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009434 kfree(ioa_cfg->trace);
9435}
9436
9437/**
Brian King2796ca52015-03-26 11:23:52 -05009438 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9439 * @ioa_cfg: ipr cfg struct
9440 *
9441 * This function frees all allocated IRQs for the
9442 * specified adapter.
9443 *
9444 * Return value:
9445 * none
9446 **/
9447static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9448{
9449 struct pci_dev *pdev = ioa_cfg->pdev;
9450
9451 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9452 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9453 int i;
9454 for (i = 0; i < ioa_cfg->nvectors; i++)
9455 free_irq(ioa_cfg->vectors_info[i].vec,
9456 &ioa_cfg->hrrq[i]);
9457 } else
9458 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9459
9460 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9461 pci_disable_msi(pdev);
9462 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9463 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9464 pci_disable_msix(pdev);
9465 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9466 }
9467}
9468
9469/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009470 * ipr_free_all_resources - Free all allocated resources for an adapter.
9471 * @ipr_cmd: ipr command struct
9472 *
9473 * This function frees all allocated resources for the
9474 * specified adapter.
9475 *
9476 * Return value:
9477 * none
9478 **/
9479static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9480{
9481 struct pci_dev *pdev = ioa_cfg->pdev;
9482
9483 ENTER;
Brian King2796ca52015-03-26 11:23:52 -05009484 ipr_free_irqs(ioa_cfg);
9485 if (ioa_cfg->reset_work_q)
9486 destroy_workqueue(ioa_cfg->reset_work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009487 iounmap(ioa_cfg->hdw_dma_regs);
9488 pci_release_regions(pdev);
9489 ipr_free_mem(ioa_cfg);
9490 scsi_host_put(ioa_cfg->host);
9491 pci_disable_device(pdev);
9492 LEAVE;
9493}
9494
9495/**
9496 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9497 * @ioa_cfg: ioa config struct
9498 *
9499 * Return value:
9500 * 0 on success / -ENOMEM on allocation failure
9501 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009502static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009503{
9504 struct ipr_cmnd *ipr_cmd;
9505 struct ipr_ioarcb *ioarcb;
9506 dma_addr_t dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009507 int i, entries_each_hrrq, hrrq_id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009508
Anton Blanchardd73341b2014-10-30 17:27:08 -05009509 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009510 sizeof(struct ipr_cmnd), 512, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009511
9512 if (!ioa_cfg->ipr_cmd_pool)
9513 return -ENOMEM;
9514
Brian King89aad422012-03-14 21:20:10 -05009515 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9516 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9517
9518 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9519 ipr_free_cmd_blks(ioa_cfg);
9520 return -ENOMEM;
9521 }
9522
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009523 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9524 if (ioa_cfg->hrrq_num > 1) {
9525 if (i == 0) {
9526 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9527 ioa_cfg->hrrq[i].min_cmd_id = 0;
9528 ioa_cfg->hrrq[i].max_cmd_id =
9529 (entries_each_hrrq - 1);
9530 } else {
9531 entries_each_hrrq =
9532 IPR_NUM_BASE_CMD_BLKS/
9533 (ioa_cfg->hrrq_num - 1);
9534 ioa_cfg->hrrq[i].min_cmd_id =
9535 IPR_NUM_INTERNAL_CMD_BLKS +
9536 (i - 1) * entries_each_hrrq;
9537 ioa_cfg->hrrq[i].max_cmd_id =
9538 (IPR_NUM_INTERNAL_CMD_BLKS +
9539 i * entries_each_hrrq - 1);
9540 }
9541 } else {
9542 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9543 ioa_cfg->hrrq[i].min_cmd_id = 0;
9544 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9545 }
9546 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9547 }
9548
9549 BUG_ON(ioa_cfg->hrrq_num == 0);
9550
9551 i = IPR_NUM_CMD_BLKS -
9552 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9553 if (i > 0) {
9554 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9555 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9556 }
9557
Linus Torvalds1da177e2005-04-16 15:20:36 -07009558 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009559 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009560
9561 if (!ipr_cmd) {
9562 ipr_free_cmd_blks(ioa_cfg);
9563 return -ENOMEM;
9564 }
9565
9566 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9567 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9568 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9569
9570 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08009571 ipr_cmd->dma_addr = dma_addr;
9572 if (ioa_cfg->sis64)
9573 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9574 else
9575 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9576
Linus Torvalds1da177e2005-04-16 15:20:36 -07009577 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08009578 if (ioa_cfg->sis64) {
9579 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9580 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9581 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009582 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
Wayne Boyera32c0552010-02-19 13:23:36 -08009583 } else {
9584 ioarcb->write_ioadl_addr =
9585 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9586 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9587 ioarcb->ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009588 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
Wayne Boyera32c0552010-02-19 13:23:36 -08009589 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009590 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9591 ipr_cmd->cmd_index = i;
9592 ipr_cmd->ioa_cfg = ioa_cfg;
9593 ipr_cmd->sense_buffer_dma = dma_addr +
9594 offsetof(struct ipr_cmnd, sense_buffer);
9595
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009596 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9597 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9598 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9599 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9600 hrrq_id++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009601 }
9602
9603 return 0;
9604}
9605
9606/**
9607 * ipr_alloc_mem - Allocate memory for an adapter
9608 * @ioa_cfg: ioa config struct
9609 *
9610 * Return value:
9611 * 0 on success / non-zero for error
9612 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009613static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009614{
9615 struct pci_dev *pdev = ioa_cfg->pdev;
9616 int i, rc = -ENOMEM;
9617
9618 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06009619 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009620 ioa_cfg->max_devs_supported, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009621
9622 if (!ioa_cfg->res_entries)
9623 goto out;
9624
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009625 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009626 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009627 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9628 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009629
Anton Blanchardd73341b2014-10-30 17:27:08 -05009630 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9631 sizeof(struct ipr_misc_cbs),
9632 &ioa_cfg->vpd_cbs_dma,
9633 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009634
9635 if (!ioa_cfg->vpd_cbs)
9636 goto out_free_res_entries;
9637
9638 if (ipr_alloc_cmd_blks(ioa_cfg))
9639 goto out_free_vpd_cbs;
9640
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009641 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009642 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009643 sizeof(u32) * ioa_cfg->hrrq[i].size,
Anton Blanchardd73341b2014-10-30 17:27:08 -05009644 &ioa_cfg->hrrq[i].host_rrq_dma,
9645 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009646
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009647 if (!ioa_cfg->hrrq[i].host_rrq) {
9648 while (--i > 0)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009649 dma_free_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009650 sizeof(u32) * ioa_cfg->hrrq[i].size,
9651 ioa_cfg->hrrq[i].host_rrq,
9652 ioa_cfg->hrrq[i].host_rrq_dma);
9653 goto out_ipr_free_cmd_blocks;
9654 }
9655 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9656 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009657
Anton Blanchardd73341b2014-10-30 17:27:08 -05009658 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9659 ioa_cfg->cfg_table_size,
9660 &ioa_cfg->cfg_table_dma,
9661 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009662
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009663 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009664 goto out_free_host_rrq;
9665
Brian Kingafc3f832016-08-24 12:56:51 -05009666 for (i = 0; i < IPR_MAX_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009667 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9668 sizeof(struct ipr_hostrcb),
9669 &ioa_cfg->hostrcb_dma[i],
9670 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009671
9672 if (!ioa_cfg->hostrcb[i])
9673 goto out_free_hostrcb_dma;
9674
9675 ioa_cfg->hostrcb[i]->hostrcb_dma =
9676 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06009677 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009678 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9679 }
9680
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06009681 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009682 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9683
9684 if (!ioa_cfg->trace)
9685 goto out_free_hostrcb_dma;
9686
Linus Torvalds1da177e2005-04-16 15:20:36 -07009687 rc = 0;
9688out:
9689 LEAVE;
9690 return rc;
9691
9692out_free_hostrcb_dma:
9693 while (i-- > 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009694 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9695 ioa_cfg->hostrcb[i],
9696 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009697 }
Anton Blanchardd73341b2014-10-30 17:27:08 -05009698 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9699 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009700out_free_host_rrq:
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009701 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009702 dma_free_coherent(&pdev->dev,
9703 sizeof(u32) * ioa_cfg->hrrq[i].size,
9704 ioa_cfg->hrrq[i].host_rrq,
9705 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009706 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009707out_ipr_free_cmd_blocks:
9708 ipr_free_cmd_blks(ioa_cfg);
9709out_free_vpd_cbs:
Anton Blanchardd73341b2014-10-30 17:27:08 -05009710 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9711 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009712out_free_res_entries:
9713 kfree(ioa_cfg->res_entries);
9714 goto out;
9715}
9716
9717/**
9718 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9719 * @ioa_cfg: ioa config struct
9720 *
9721 * Return value:
9722 * none
9723 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009724static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009725{
9726 int i;
9727
9728 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9729 ioa_cfg->bus_attr[i].bus = i;
9730 ioa_cfg->bus_attr[i].qas_enabled = 0;
9731 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9732 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9733 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9734 else
9735 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9736 }
9737}
9738
9739/**
Brian King6270e592014-01-21 12:16:41 -06009740 * ipr_init_regs - Initialize IOA registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07009741 * @ioa_cfg: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07009742 *
9743 * Return value:
Brian King6270e592014-01-21 12:16:41 -06009744 * none
Linus Torvalds1da177e2005-04-16 15:20:36 -07009745 **/
Brian King6270e592014-01-21 12:16:41 -06009746static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009747{
9748 const struct ipr_interrupt_offsets *p;
9749 struct ipr_interrupts *t;
9750 void __iomem *base;
9751
Linus Torvalds1da177e2005-04-16 15:20:36 -07009752 p = &ioa_cfg->chip_cfg->regs;
9753 t = &ioa_cfg->regs;
9754 base = ioa_cfg->hdw_dma_regs;
9755
9756 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9757 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009758 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009759 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009760 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009761 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009762 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009763 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009764 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009765 t->ioarrin_reg = base + p->ioarrin_reg;
9766 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009767 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009768 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009769 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009770 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009771 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009772
9773 if (ioa_cfg->sis64) {
Wayne Boyer214777b2010-02-19 13:24:26 -08009774 t->init_feedback_reg = base + p->init_feedback_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009775 t->dump_addr_reg = base + p->dump_addr_reg;
9776 t->dump_data_reg = base + p->dump_data_reg;
Wayne Boyer8701f182010-06-04 10:26:50 -07009777 t->endian_swap_reg = base + p->endian_swap_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009778 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009779}
9780
9781/**
Brian King6270e592014-01-21 12:16:41 -06009782 * ipr_init_ioa_cfg - Initialize IOA config struct
9783 * @ioa_cfg: ioa config struct
9784 * @host: scsi host struct
9785 * @pdev: PCI dev struct
9786 *
9787 * Return value:
9788 * none
9789 **/
9790static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9791 struct Scsi_Host *host, struct pci_dev *pdev)
9792{
9793 int i;
9794
9795 ioa_cfg->host = host;
9796 ioa_cfg->pdev = pdev;
9797 ioa_cfg->log_level = ipr_log_level;
9798 ioa_cfg->doorbell = IPR_DOORBELL;
9799 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9800 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9801 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9802 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9803 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9804 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9805
9806 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9807 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
Brian Kingafc3f832016-08-24 12:56:51 -05009808 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
Brian King6270e592014-01-21 12:16:41 -06009809 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9810 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9811 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9812 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9813 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9814 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9815 ioa_cfg->sdt_state = INACTIVE;
9816
9817 ipr_initialize_bus_attr(ioa_cfg);
9818 ioa_cfg->max_devs_supported = ipr_max_devs;
9819
9820 if (ioa_cfg->sis64) {
9821 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9822 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9823 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9824 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9825 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9826 + ((sizeof(struct ipr_config_table_entry64)
9827 * ioa_cfg->max_devs_supported)));
9828 } else {
9829 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9830 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9831 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9832 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9833 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9834 + ((sizeof(struct ipr_config_table_entry)
9835 * ioa_cfg->max_devs_supported)));
9836 }
9837
Brian Kingf688f962014-12-02 12:47:37 -06009838 host->max_channel = IPR_VSET_BUS;
Brian King6270e592014-01-21 12:16:41 -06009839 host->unique_id = host->host_no;
9840 host->max_cmd_len = IPR_MAX_CDB_LEN;
9841 host->can_queue = ioa_cfg->max_cmds;
9842 pci_set_drvdata(pdev, ioa_cfg);
9843
9844 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9845 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9846 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9847 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9848 if (i == 0)
9849 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9850 else
9851 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9852 }
9853}
9854
9855/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009856 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07009857 * @dev_id: PCI device id struct
9858 *
9859 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009860 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009861 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009862static const struct ipr_chip_t *
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009863ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009864{
9865 int i;
9866
Linus Torvalds1da177e2005-04-16 15:20:36 -07009867 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9868 if (ipr_chip[i].vendor == dev_id->vendor &&
9869 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009870 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07009871 return NULL;
9872}
9873
Brian King6270e592014-01-21 12:16:41 -06009874/**
9875 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9876 * during probe time
9877 * @ioa_cfg: ioa config struct
9878 *
9879 * Return value:
9880 * None
9881 **/
9882static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9883{
9884 struct pci_dev *pdev = ioa_cfg->pdev;
9885
9886 if (pci_channel_offline(pdev)) {
9887 wait_event_timeout(ioa_cfg->eeh_wait_q,
9888 !pci_channel_offline(pdev),
9889 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9890 pci_restore_state(pdev);
9891 }
9892}
9893
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009894static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9895{
9896 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009897 int i, vectors;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009898
9899 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9900 entries[i].entry = i;
9901
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009902 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9903 entries, 1, ipr_number_of_msix);
9904 if (vectors < 0) {
Brian King6270e592014-01-21 12:16:41 -06009905 ipr_wait_for_pci_err_recovery(ioa_cfg);
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009906 return vectors;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009907 }
9908
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009909 for (i = 0; i < vectors; i++)
9910 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9911 ioa_cfg->nvectors = vectors;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009912
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009913 return 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009914}
9915
9916static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9917{
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009918 int i, vectors;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009919
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009920 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9921 if (vectors < 0) {
Brian King6270e592014-01-21 12:16:41 -06009922 ipr_wait_for_pci_err_recovery(ioa_cfg);
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009923 return vectors;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009924 }
9925
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009926 for (i = 0; i < vectors; i++)
9927 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9928 ioa_cfg->nvectors = vectors;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009929
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009930 return 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009931}
9932
9933static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9934{
9935 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9936
9937 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9938 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9939 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9940 ioa_cfg->vectors_info[vec_idx].
9941 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9942 }
9943}
9944
9945static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9946{
9947 int i, rc;
9948
9949 for (i = 1; i < ioa_cfg->nvectors; i++) {
9950 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9951 ipr_isr_mhrrq,
9952 0,
9953 ioa_cfg->vectors_info[i].desc,
9954 &ioa_cfg->hrrq[i]);
9955 if (rc) {
9956 while (--i >= 0)
9957 free_irq(ioa_cfg->vectors_info[i].vec,
9958 &ioa_cfg->hrrq[i]);
9959 return rc;
9960 }
9961 }
9962 return 0;
9963}
9964
Linus Torvalds1da177e2005-04-16 15:20:36 -07009965/**
Wayne Boyer95fecd92009-06-16 15:13:28 -07009966 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9967 * @pdev: PCI device struct
9968 *
9969 * Description: Simply set the msi_received flag to 1 indicating that
9970 * Message Signaled Interrupts are supported.
9971 *
9972 * Return value:
9973 * 0 on success / non-zero on failure
9974 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009975static irqreturn_t ipr_test_intr(int irq, void *devp)
Wayne Boyer95fecd92009-06-16 15:13:28 -07009976{
9977 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9978 unsigned long lock_flags = 0;
9979 irqreturn_t rc = IRQ_HANDLED;
9980
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009981 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009982 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9983
9984 ioa_cfg->msi_received = 1;
9985 wake_up(&ioa_cfg->msi_wait_q);
9986
9987 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9988 return rc;
9989}
9990
9991/**
9992 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9993 * @pdev: PCI device struct
9994 *
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009995 * Description: The return value from pci_enable_msi_range() can not always be
Wayne Boyer95fecd92009-06-16 15:13:28 -07009996 * trusted. This routine sets up and initiates a test interrupt to determine
9997 * if the interrupt is received via the ipr_test_intr() service routine.
9998 * If the tests fails, the driver will fall back to LSI.
9999 *
10000 * Return value:
10001 * 0 on success / non-zero on failure
10002 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010003static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
Wayne Boyer95fecd92009-06-16 15:13:28 -070010004{
10005 int rc;
10006 volatile u32 int_reg;
10007 unsigned long lock_flags = 0;
10008
10009 ENTER;
10010
10011 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10012 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10013 ioa_cfg->msi_received = 0;
10014 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer214777b2010-02-19 13:24:26 -080010015 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010016 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10017 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10018
wenxiong@linux.vnet.ibm.comf19799f2013-02-27 12:37:45 -060010019 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10020 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10021 else
10022 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010023 if (rc) {
10024 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
10025 return rc;
10026 } else if (ipr_debug)
10027 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
10028
Wayne Boyer214777b2010-02-19 13:24:26 -080010029 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010030 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10031 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010032 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010033 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10034
Wayne Boyer95fecd92009-06-16 15:13:28 -070010035 if (!ioa_cfg->msi_received) {
10036 /* MSI test failed */
10037 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10038 rc = -EOPNOTSUPP;
10039 } else if (ipr_debug)
10040 dev_info(&pdev->dev, "MSI test succeeded.\n");
10041
10042 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10043
wenxiong@linux.vnet.ibm.comf19799f2013-02-27 12:37:45 -060010044 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10045 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
10046 else
10047 free_irq(pdev->irq, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010048
10049 LEAVE;
10050
10051 return rc;
10052}
10053
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010054 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -070010055 * @pdev: PCI device struct
10056 * @dev_id: PCI device id struct
10057 *
10058 * Return value:
10059 * 0 on success / non-zero on failure
10060 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010061static int ipr_probe_ioa(struct pci_dev *pdev,
10062 const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010063{
10064 struct ipr_ioa_cfg *ioa_cfg;
10065 struct Scsi_Host *host;
10066 unsigned long ipr_regs_pci;
10067 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -070010068 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -050010069 volatile u32 mask, uproc, interrupts;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010070 unsigned long lock_flags, driver_lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010071
10072 ENTER;
10073
Linus Torvalds1da177e2005-04-16 15:20:36 -070010074 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010075 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10076
10077 if (!host) {
10078 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10079 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -060010080 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010081 }
10082
10083 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10084 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Dan Williams8d8e7d132012-07-09 21:06:08 -070010085 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010086
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010087 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010088
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010089 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010090 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10091 dev_id->vendor, dev_id->device);
10092 goto out_scsi_host_put;
10093 }
10094
Wayne Boyera32c0552010-02-19 13:23:36 -080010095 /* set SIS 32 or SIS 64 */
10096 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010097 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
Brian King7dd21302012-03-14 21:20:08 -050010098 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
Brian King89aad422012-03-14 21:20:10 -050010099 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010100
Brian King5469cb52007-03-29 12:42:40 -050010101 if (ipr_transop_timeout)
10102 ioa_cfg->transop_timeout = ipr_transop_timeout;
10103 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10104 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10105 else
10106 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10107
Auke Kok44c10132007-06-08 15:46:36 -070010108 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -050010109
Brian King6270e592014-01-21 12:16:41 -060010110 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10111
Linus Torvalds1da177e2005-04-16 15:20:36 -070010112 ipr_regs_pci = pci_resource_start(pdev, 0);
10113
10114 rc = pci_request_regions(pdev, IPR_NAME);
10115 if (rc < 0) {
10116 dev_err(&pdev->dev,
10117 "Couldn't register memory range of registers\n");
10118 goto out_scsi_host_put;
10119 }
10120
Brian King6270e592014-01-21 12:16:41 -060010121 rc = pci_enable_device(pdev);
10122
10123 if (rc || pci_channel_offline(pdev)) {
10124 if (pci_channel_offline(pdev)) {
10125 ipr_wait_for_pci_err_recovery(ioa_cfg);
10126 rc = pci_enable_device(pdev);
10127 }
10128
10129 if (rc) {
10130 dev_err(&pdev->dev, "Cannot enable adapter\n");
10131 ipr_wait_for_pci_err_recovery(ioa_cfg);
10132 goto out_release_regions;
10133 }
10134 }
10135
Arjan van de Ven25729a72008-09-28 16:18:02 -070010136 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010137
10138 if (!ipr_regs) {
10139 dev_err(&pdev->dev,
10140 "Couldn't map memory range of registers\n");
10141 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -060010142 goto out_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010143 }
10144
10145 ioa_cfg->hdw_dma_regs = ipr_regs;
10146 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10147 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10148
Brian King6270e592014-01-21 12:16:41 -060010149 ipr_init_regs(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010150
Wayne Boyera32c0552010-02-19 13:23:36 -080010151 if (ioa_cfg->sis64) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010152 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Wayne Boyera32c0552010-02-19 13:23:36 -080010153 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010154 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10155 rc = dma_set_mask_and_coherent(&pdev->dev,
10156 DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -080010157 }
Wayne Boyera32c0552010-02-19 13:23:36 -080010158 } else
Anton Blanchard869404c2014-10-30 17:27:09 -050010159 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -080010160
Linus Torvalds1da177e2005-04-16 15:20:36 -070010161 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010162 dev_err(&pdev->dev, "Failed to set DMA mask\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070010163 goto cleanup_nomem;
10164 }
10165
10166 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10167 ioa_cfg->chip_cfg->cache_line_size);
10168
10169 if (rc != PCIBIOS_SUCCESSFUL) {
10170 dev_err(&pdev->dev, "Write of cache line size failed\n");
Brian King6270e592014-01-21 12:16:41 -060010171 ipr_wait_for_pci_err_recovery(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010172 rc = -EIO;
10173 goto cleanup_nomem;
10174 }
10175
Brian King6270e592014-01-21 12:16:41 -060010176 /* Issue MMIO read to ensure card is not in EEH */
10177 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10178 ipr_wait_for_pci_err_recovery(ioa_cfg);
10179
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010180 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10181 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10182 IPR_MAX_MSIX_VECTORS);
10183 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10184 }
10185
10186 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010187 ipr_enable_msix(ioa_cfg) == 0)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010188 ioa_cfg->intr_flag = IPR_USE_MSIX;
10189 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010190 ipr_enable_msi(ioa_cfg) == 0)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010191 ioa_cfg->intr_flag = IPR_USE_MSI;
10192 else {
10193 ioa_cfg->intr_flag = IPR_USE_LSI;
Brian King54e430b2016-06-27 09:09:40 -050010194 ioa_cfg->clear_isr = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010195 ioa_cfg->nvectors = 1;
10196 dev_info(&pdev->dev, "Cannot enable MSI.\n");
10197 }
10198
Brian King6270e592014-01-21 12:16:41 -060010199 pci_set_master(pdev);
10200
10201 if (pci_channel_offline(pdev)) {
10202 ipr_wait_for_pci_err_recovery(ioa_cfg);
10203 pci_set_master(pdev);
10204 if (pci_channel_offline(pdev)) {
10205 rc = -EIO;
10206 goto out_msi_disable;
10207 }
10208 }
10209
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010210 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
10211 ioa_cfg->intr_flag == IPR_USE_MSIX) {
Wayne Boyer95fecd92009-06-16 15:13:28 -070010212 rc = ipr_test_msi(ioa_cfg, pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010213 if (rc == -EOPNOTSUPP) {
Brian King6270e592014-01-21 12:16:41 -060010214 ipr_wait_for_pci_err_recovery(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010215 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
10216 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
10217 pci_disable_msi(pdev);
10218 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
10219 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
10220 pci_disable_msix(pdev);
10221 }
10222
10223 ioa_cfg->intr_flag = IPR_USE_LSI;
10224 ioa_cfg->nvectors = 1;
10225 }
Wayne Boyer95fecd92009-06-16 15:13:28 -070010226 else if (rc)
10227 goto out_msi_disable;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010228 else {
10229 if (ioa_cfg->intr_flag == IPR_USE_MSI)
10230 dev_info(&pdev->dev,
10231 "Request for %d MSIs succeeded with starting IRQ: %d\n",
10232 ioa_cfg->nvectors, pdev->irq);
10233 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10234 dev_info(&pdev->dev,
10235 "Request for %d MSIXs succeeded.",
10236 ioa_cfg->nvectors);
10237 }
10238 }
10239
10240 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10241 (unsigned int)num_online_cpus(),
10242 (unsigned int)IPR_MAX_HRRQ_NUM);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010243
Linus Torvalds1da177e2005-04-16 15:20:36 -070010244 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -070010245 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010246
10247 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -070010248 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010249
10250 rc = ipr_alloc_mem(ioa_cfg);
10251 if (rc < 0) {
10252 dev_err(&pdev->dev,
10253 "Couldn't allocate enough memory for device driver!\n");
Julia Lawallf170c682011-07-11 14:08:25 -070010254 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010255 }
10256
Brian King6270e592014-01-21 12:16:41 -060010257 /* Save away PCI config space for use following IOA reset */
10258 rc = pci_save_state(pdev);
10259
10260 if (rc != PCIBIOS_SUCCESSFUL) {
10261 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10262 rc = -EIO;
10263 goto cleanup_nolog;
10264 }
10265
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010266 /*
10267 * If HRRQ updated interrupt is not masked, or reset alert is set,
10268 * the card is in an unknown state and needs a hard reset
10269 */
Wayne Boyer214777b2010-02-19 13:24:26 -080010270 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10271 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10272 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010273 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10274 ioa_cfg->needs_hard_reset = 1;
Anton Blanchard5d7c20b2011-08-01 19:43:45 +100010275 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
Brian King473b1e82007-05-02 10:44:11 -050010276 ioa_cfg->needs_hard_reset = 1;
10277 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10278 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010279
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010280 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010281 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010282 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010283
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010284 if (ioa_cfg->intr_flag == IPR_USE_MSI
10285 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
10286 name_msi_vectors(ioa_cfg);
10287 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
10288 0,
10289 ioa_cfg->vectors_info[0].desc,
10290 &ioa_cfg->hrrq[0]);
10291 if (!rc)
10292 rc = ipr_request_other_msi_irqs(ioa_cfg);
10293 } else {
10294 rc = request_irq(pdev->irq, ipr_isr,
10295 IRQF_SHARED,
10296 IPR_NAME, &ioa_cfg->hrrq[0]);
10297 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010298 if (rc) {
10299 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10300 pdev->irq, rc);
10301 goto cleanup_nolog;
10302 }
10303
Brian King463fc692007-05-07 17:09:05 -050010304 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10305 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10306 ioa_cfg->needs_warm_reset = 1;
10307 ioa_cfg->reset = ipr_reset_slot_reset;
Brian King2796ca52015-03-26 11:23:52 -050010308
10309 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10310 WQ_MEM_RECLAIM, host->host_no);
10311
10312 if (!ioa_cfg->reset_work_q) {
10313 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
Wei Yongjunc8e18ac2016-07-29 16:00:45 +000010314 rc = -ENOMEM;
Brian King2796ca52015-03-26 11:23:52 -050010315 goto out_free_irq;
10316 }
Brian King463fc692007-05-07 17:09:05 -050010317 } else
10318 ioa_cfg->reset = ipr_reset_start_bist;
10319
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010320 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010321 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010322 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010323
10324 LEAVE;
10325out:
10326 return rc;
10327
Brian King2796ca52015-03-26 11:23:52 -050010328out_free_irq:
10329 ipr_free_irqs(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010330cleanup_nolog:
10331 ipr_free_mem(ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010332out_msi_disable:
Brian King6270e592014-01-21 12:16:41 -060010333 ipr_wait_for_pci_err_recovery(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010334 if (ioa_cfg->intr_flag == IPR_USE_MSI)
10335 pci_disable_msi(pdev);
10336 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10337 pci_disable_msix(pdev);
Julia Lawallf170c682011-07-11 14:08:25 -070010338cleanup_nomem:
10339 iounmap(ipr_regs);
Brian King6270e592014-01-21 12:16:41 -060010340out_disable:
10341 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010342out_release_regions:
10343 pci_release_regions(pdev);
10344out_scsi_host_put:
10345 scsi_host_put(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010346 goto out;
10347}
10348
10349/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010350 * ipr_initiate_ioa_bringdown - Bring down an adapter
10351 * @ioa_cfg: ioa config struct
10352 * @shutdown_type: shutdown type
10353 *
10354 * Description: This function will initiate bringing down the adapter.
10355 * This consists of issuing an IOA shutdown to the adapter
10356 * to flush the cache, and running BIST.
10357 * If the caller needs to wait on the completion of the reset,
10358 * the caller must sleep on the reset_wait_q.
10359 *
10360 * Return value:
10361 * none
10362 **/
10363static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10364 enum ipr_shutdown_type shutdown_type)
10365{
10366 ENTER;
10367 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10368 ioa_cfg->sdt_state = ABORT_DUMP;
10369 ioa_cfg->reset_retries = 0;
10370 ioa_cfg->in_ioa_bringdown = 1;
10371 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10372 LEAVE;
10373}
10374
10375/**
10376 * __ipr_remove - Remove a single adapter
10377 * @pdev: pci device struct
10378 *
10379 * Adapter hot plug remove entry point.
10380 *
10381 * Return value:
10382 * none
10383 **/
10384static void __ipr_remove(struct pci_dev *pdev)
10385{
10386 unsigned long host_lock_flags = 0;
10387 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Brian Kingbfae7822013-01-30 23:45:08 -060010388 int i;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010389 unsigned long driver_lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010390 ENTER;
10391
10392 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -030010393 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -050010394 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10395 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10396 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10397 }
10398
Brian Kingbfae7822013-01-30 23:45:08 -060010399 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10400 spin_lock(&ioa_cfg->hrrq[i]._lock);
10401 ioa_cfg->hrrq[i].removing_ioa = 1;
10402 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10403 }
10404 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070010405 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10406
10407 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10408 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Tejun Heo43829732012-08-20 14:51:24 -070010409 flush_work(&ioa_cfg->work_q);
Brian King2796ca52015-03-26 11:23:52 -050010410 if (ioa_cfg->reset_work_q)
10411 flush_workqueue(ioa_cfg->reset_work_q);
wenxiong@linux.vnet.ibm.com9077a942013-03-14 13:52:24 -050010412 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010413 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10414
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010415 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010416 list_del(&ioa_cfg->queue);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010417 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010418
10419 if (ioa_cfg->sdt_state == ABORT_DUMP)
10420 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10422
10423 ipr_free_all_resources(ioa_cfg);
10424
10425 LEAVE;
10426}
10427
10428/**
10429 * ipr_remove - IOA hot plug remove entry point
10430 * @pdev: pci device struct
10431 *
10432 * Adapter hot plug remove entry point.
10433 *
10434 * Return value:
10435 * none
10436 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010437static void ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010438{
10439 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10440
10441 ENTER;
10442
Tony Jonesee959b02008-02-22 00:13:36 +010010443 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010444 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +010010445 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010446 &ipr_dump_attr);
Brian Kingafc3f832016-08-24 12:56:51 -050010447 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10448 &ipr_ioa_async_err_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010449 scsi_remove_host(ioa_cfg->host);
10450
10451 __ipr_remove(pdev);
10452
10453 LEAVE;
10454}
10455
10456/**
10457 * ipr_probe - Adapter hot plug add entry point
10458 *
10459 * Return value:
10460 * 0 on success / non-zero on failure
10461 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010462static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010463{
10464 struct ipr_ioa_cfg *ioa_cfg;
Brian Kingb195d5e2016-07-15 14:48:03 -050010465 unsigned long flags;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010466 int rc, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010467
10468 rc = ipr_probe_ioa(pdev, dev_id);
10469
10470 if (rc)
10471 return rc;
10472
10473 ioa_cfg = pci_get_drvdata(pdev);
10474 rc = ipr_probe_ioa_part2(ioa_cfg);
10475
10476 if (rc) {
10477 __ipr_remove(pdev);
10478 return rc;
10479 }
10480
10481 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10482
10483 if (rc) {
10484 __ipr_remove(pdev);
10485 return rc;
10486 }
10487
Tony Jonesee959b02008-02-22 00:13:36 +010010488 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010489 &ipr_trace_attr);
10490
10491 if (rc) {
10492 scsi_remove_host(ioa_cfg->host);
10493 __ipr_remove(pdev);
10494 return rc;
10495 }
10496
Brian Kingafc3f832016-08-24 12:56:51 -050010497 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10498 &ipr_ioa_async_err_log);
10499
10500 if (rc) {
10501 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10502 &ipr_dump_attr);
10503 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10504 &ipr_trace_attr);
10505 scsi_remove_host(ioa_cfg->host);
10506 __ipr_remove(pdev);
10507 return rc;
10508 }
10509
Tony Jonesee959b02008-02-22 00:13:36 +010010510 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010511 &ipr_dump_attr);
10512
10513 if (rc) {
Brian Kingafc3f832016-08-24 12:56:51 -050010514 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10515 &ipr_ioa_async_err_log);
Tony Jonesee959b02008-02-22 00:13:36 +010010516 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010517 &ipr_trace_attr);
10518 scsi_remove_host(ioa_cfg->host);
10519 __ipr_remove(pdev);
10520 return rc;
10521 }
Brian Kinga3d1ddd2016-08-08 17:53:12 -050010522 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10523 ioa_cfg->scan_enabled = 1;
10524 schedule_work(&ioa_cfg->work_q);
10525 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010526
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010527 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10528
Jens Axboe89f8b332014-03-13 09:38:42 -060010529 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010530 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +010010531 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010532 ioa_cfg->iopoll_weight, ipr_iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010533 }
10534 }
10535
Brian Kinga3d1ddd2016-08-08 17:53:12 -050010536 scsi_scan_host(ioa_cfg->host);
10537
Linus Torvalds1da177e2005-04-16 15:20:36 -070010538 return 0;
10539}
10540
10541/**
10542 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010543 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -070010544 *
10545 * This function is invoked upon system shutdown/reboot. It will issue
10546 * an adapter shutdown to the adapter to flush the write cache.
10547 *
10548 * Return value:
10549 * none
10550 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010551static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010552{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010553 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010554 unsigned long lock_flags = 0;
Brian King4fdd7c72015-03-26 11:23:50 -050010555 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010556 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010557
10558 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Jens Axboe89f8b332014-03-13 09:38:42 -060010559 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010560 ioa_cfg->iopoll_weight = 0;
10561 for (i = 1; i < ioa_cfg->hrrq_num; i++)
Christoph Hellwig511cbce2015-11-10 14:56:14 +010010562 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010563 }
10564
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -030010565 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -050010566 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10567 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10568 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10569 }
10570
Brian King4fdd7c72015-03-26 11:23:50 -050010571 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10572 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10573
10574 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10576 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Brian King4fdd7c72015-03-26 11:23:50 -050010577 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
Brian King2796ca52015-03-26 11:23:52 -050010578 ipr_free_irqs(ioa_cfg);
Brian King4fdd7c72015-03-26 11:23:50 -050010579 pci_disable_device(ioa_cfg->pdev);
10580 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010581}
10582
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010583static struct pci_device_id ipr_pci_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010584 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010585 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010586 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010587 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010588 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010589 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010590 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010591 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010592 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010593 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010594 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010595 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010596 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010597 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010598 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -050010599 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10600 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010601 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -060010602 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010603 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -050010604 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10605 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010606 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -050010607 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10608 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010609 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -060010610 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010611 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -050010612 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10613 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -060010614 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -050010615 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10616 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010617 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -050010618 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10619 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -050010620 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King185eb312007-03-29 12:42:53 -050010621 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10622 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Wayne Boyerb0f56d32010-06-24 13:34:14 -070010623 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10624 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -050010625 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -050010626 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010627 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -060010628 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010629 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -060010630 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010631 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010632 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10633 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010634 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010635 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10636 IPR_USE_LONG_TRANSOP_TIMEOUT },
Wayne Boyerd7b46272010-02-19 13:24:38 -080010637 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10638 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10639 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10640 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10641 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10642 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
Wayne Boyer32622bd2010-10-18 20:24:34 -070010643 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010644 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10645 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer5a918352011-10-27 11:58:21 -070010646 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10647 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer32622bd2010-10-18 20:24:34 -070010648 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010649 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010650 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010651 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010652 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010653 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010654 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010655 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10656 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10657 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010658 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010659 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10660 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10661 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10662 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10663 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10664 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10665 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10666 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010667 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10668 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10669 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wendy Xiongf94d9962014-01-21 12:16:40 -060010670 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10671 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010672 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10673 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10674 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10675 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10676 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10677 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10678 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10679 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10680 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10681 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10682 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
Wendy Xiong5eeac3e2014-03-12 16:08:52 -050010683 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10684 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10685 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10686 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10687 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10688 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
Wen Xiong00da9ff2016-07-12 16:02:07 -050010689 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10690 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10691 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10692 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010693 { }
10694};
10695MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10696
Stephen Hemmingera55b2d22012-09-07 09:33:16 -070010697static const struct pci_error_handlers ipr_err_handler = {
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010698 .error_detected = ipr_pci_error_detected,
Brian King6270e592014-01-21 12:16:41 -060010699 .mmio_enabled = ipr_pci_mmio_enabled,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010700 .slot_reset = ipr_pci_slot_reset,
10701};
10702
Linus Torvalds1da177e2005-04-16 15:20:36 -070010703static struct pci_driver ipr_driver = {
10704 .name = IPR_NAME,
10705 .id_table = ipr_pci_table,
10706 .probe = ipr_probe,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010707 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010708 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010709 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010710};
10711
10712/**
Wayne Boyerf72919e2010-02-19 13:24:21 -080010713 * ipr_halt_done - Shutdown prepare completion
10714 *
10715 * Return value:
10716 * none
10717 **/
10718static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10719{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010720 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010721}
10722
10723/**
10724 * ipr_halt - Issue shutdown prepare to all adapters
10725 *
10726 * Return value:
10727 * NOTIFY_OK on success / NOTIFY_DONE on failure
10728 **/
10729static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10730{
10731 struct ipr_cmnd *ipr_cmd;
10732 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010733 unsigned long flags = 0, driver_lock_flags;
Wayne Boyerf72919e2010-02-19 13:24:21 -080010734
10735 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10736 return NOTIFY_DONE;
10737
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010738 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010739
10740 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10741 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King4fdd7c72015-03-26 11:23:50 -050010742 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10743 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
Wayne Boyerf72919e2010-02-19 13:24:21 -080010744 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10745 continue;
10746 }
10747
10748 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10749 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10750 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10751 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10752 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10753
10754 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10755 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10756 }
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010757 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010758
10759 return NOTIFY_OK;
10760}
10761
10762static struct notifier_block ipr_notifier = {
10763 ipr_halt, NULL, 0
10764};
10765
10766/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010767 * ipr_init - Module entry point
10768 *
10769 * Return value:
10770 * 0 on success / negative value on failure
10771 **/
10772static int __init ipr_init(void)
10773{
10774 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10775 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10776
Wayne Boyerf72919e2010-02-19 13:24:21 -080010777 register_reboot_notifier(&ipr_notifier);
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -070010778 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010779}
10780
10781/**
10782 * ipr_exit - Module unload
10783 *
10784 * Module unload entry point.
10785 *
10786 * Return value:
10787 * none
10788 **/
10789static void __exit ipr_exit(void)
10790{
Wayne Boyerf72919e2010-02-19 13:24:21 -080010791 unregister_reboot_notifier(&ipr_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010792 pci_unregister_driver(&ipr_driver);
10793}
10794
10795module_init(ipr_init);
10796module_exit(ipr_exit);