blob: df4e27cd996a3c68cf095e40d7a6ccf7ebf6a065 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -030063#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050075#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050076#include <linux/hdreg.h>
Wayne Boyerf72919e2010-02-19 13:24:21 -080077#include <linux/reboot.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080078#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include "ipr.h"
88
89/*
90 * Global Data
91 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080092static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050097static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060098static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080099static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -0500100static unsigned int ipr_dual_ioa_raid = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600101static unsigned int ipr_number_of_msix = 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102static DEFINE_SPINLOCK(ipr_driver_lock);
103
104/* This table describes the differences between DMA controller chips */
105static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 .mailbox = 0x0042C,
Brian King89aad422012-03-14 21:20:10 -0500108 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500110 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600111 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 {
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
Wayne Boyer214777b2010-02-19 13:24:26 -0800115 .clr_interrupt_mask_reg32 = 0x00230,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 .sense_interrupt_mask_reg = 0x0022C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800117 .sense_interrupt_mask_reg32 = 0x0022C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 .clr_interrupt_reg = 0x00228,
Wayne Boyer214777b2010-02-19 13:24:26 -0800119 .clr_interrupt_reg32 = 0x00228,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 .sense_interrupt_reg = 0x00224,
Wayne Boyer214777b2010-02-19 13:24:26 -0800121 .sense_interrupt_reg32 = 0x00224,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800124 .sense_uproc_interrupt_reg32 = 0x00214,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 .set_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 }
130 },
131 { /* Snipe and Scamp */
132 .mailbox = 0x0052C,
Brian King89aad422012-03-14 21:20:10 -0500133 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500135 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600136 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 {
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800140 .clr_interrupt_mask_reg32 = 0x0028C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 .sense_interrupt_mask_reg = 0x00288,
Wayne Boyer214777b2010-02-19 13:24:26 -0800142 .sense_interrupt_mask_reg32 = 0x00288,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 .clr_interrupt_reg = 0x00284,
Wayne Boyer214777b2010-02-19 13:24:26 -0800144 .clr_interrupt_reg32 = 0x00284,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 .sense_interrupt_reg = 0x00280,
Wayne Boyer214777b2010-02-19 13:24:26 -0800146 .sense_interrupt_reg32 = 0x00280,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800149 .sense_uproc_interrupt_reg32 = 0x00290,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 .set_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 }
155 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800156 { /* CRoC */
Wayne Boyer110def82010-11-04 09:36:16 -0700157 .mailbox = 0x00044,
Brian King89aad422012-03-14 21:20:10 -0500158 .max_cmds = 1000,
Wayne Boyera74c1632010-02-19 13:23:51 -0800159 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500160 .clear_isr = 0,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600161 .iopoll_weight = 64,
Wayne Boyera74c1632010-02-19 13:23:51 -0800162 {
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
Wayne Boyer214777b2010-02-19 13:24:26 -0800165 .clr_interrupt_mask_reg32 = 0x0001C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800166 .sense_interrupt_mask_reg = 0x00010,
Wayne Boyer214777b2010-02-19 13:24:26 -0800167 .sense_interrupt_mask_reg32 = 0x00014,
Wayne Boyera74c1632010-02-19 13:23:51 -0800168 .clr_interrupt_reg = 0x00008,
Wayne Boyer214777b2010-02-19 13:24:26 -0800169 .clr_interrupt_reg32 = 0x0000C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800170 .sense_interrupt_reg = 0x00000,
Wayne Boyer214777b2010-02-19 13:24:26 -0800171 .sense_interrupt_reg32 = 0x00004,
Wayne Boyera74c1632010-02-19 13:23:51 -0800172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800174 .sense_uproc_interrupt_reg32 = 0x00024,
Wayne Boyera74c1632010-02-19 13:23:51 -0800175 .set_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800176 .set_uproc_interrupt_reg32 = 0x00024,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800177 .clr_uproc_interrupt_reg = 0x00028,
Wayne Boyer214777b2010-02-19 13:24:26 -0800178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800180 .dump_addr_reg = 0x00064,
Wayne Boyer8701f182010-06-04 10:26:50 -0700181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
Wayne Boyera74c1632010-02-19 13:23:51 -0800183 }
184 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185};
186
187static const struct ipr_chip_t ipr_chip[] = {
Wayne Boyercb237ef2010-06-17 11:51:40 -0700188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
Wayne Boyercd9b3d02012-02-23 11:54:55 -0800196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197};
198
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300199static int ipr_max_bus_speeds[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201};
202
203MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205module_param_named(max_speed, ipr_max_speed, uint, 0);
206MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207module_param_named(log_level, ipr_log_level, uint, 0);
208MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209module_param_named(testmode, ipr_testmode, int, 0);
210MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800211module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800215module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600216MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500217module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800219module_param_named(max_devs, ipr_max_devs, int, 0);
220MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600222module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
wenxiong@linux.vnet.ibm.com6634ff72013-11-15 10:58:15 -0600223MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224MODULE_LICENSE("GPL");
225MODULE_VERSION(IPR_DRIVER_VERSION);
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227/* A constant array of IOASCs/URCs/Error Messages */
228static const
229struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 "8155: An unknown error was received"},
232 {0x00330000, 0, 0,
233 "Soft underlength error"},
234 {0x005A0000, 0, 0,
235 "Command to be cancelled not found"},
236 {0x00808000, 0, 0,
237 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500241 "4101: Soft device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500279 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 "8009: Impending cache battery pack failure"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500284 {0x02040100, 0, 0,
285 "Logical Unit in process of becoming ready"},
286 {0x02040200, 0, 0,
287 "Initializing command required"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 {0x02040400, 0, 0,
289 "34FF: Disk device format in progress"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500290 {0x02040C00, 0, 0,
291 "Logical unit not accessible, target port in unavailable state"},
Brian King65f56472007-04-26 16:00:12 -0500292 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 {0x023F0000, 0, 0,
295 "Synchronization required"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500296 {0x02408500, 0, 0,
297 "IOA microcode download required"},
298 {0x02408600, 0, 0,
299 "Device bus connection is prohibited by host"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 {0x024E0000, 0, 0,
301 "No ready, IOA shutdown"},
302 {0x025A0000, 0, 0,
303 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500304 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 "3020: Storage subsystem configuration error"},
306 {0x03110B00, 0, 0,
307 "FFF5: Medium error, data unreadable, recommend reassign"},
308 {0x03110C00, 0, 0,
309 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500310 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500312 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500314 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500316 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 "3109: IOA timed out a device command"},
318 {0x04088000, 0, 0,
319 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500320 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500321 "4100: Hard device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800322 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310C: Logical block guard error detected by the device"},
324 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310C: Logical block reference tag error detected by the device"},
326 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327 "4170: Scatter list tag / sequence number error"},
328 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329 "8150: Logical block CRC error on IOA to Host transfer"},
330 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Logical block sequence number error on IOA to Host transfer"},
332 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333 "310D: Logical block reference tag error detected by the IOA"},
334 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335 "310D: Logical block guard error detected by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500336 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500338 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500340 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 "9002: IOA reserved area LRC error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800342 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343 "Hardware Error, IOA metadata access error"},
Brian King933916f2007-03-29 12:43:30 -0500344 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500346 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500348 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500350 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500352 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500354 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 "8150: PCI bus error"},
356 {0x04430000, 1, 0,
357 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500358 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500360 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500362 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500364 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 "8151: IOA microcode error"},
366 {0x04448500, 0, 0,
367 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500368 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500370 {0x04448700, 0, 0,
371 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 {0x04490000, 0, 0,
373 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500374 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500376 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500378 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500380 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500382 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500384 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500385 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500386 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500388 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600389 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500390 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500391 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500392 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500393 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500394 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500395 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500396 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500397 "4110: Unsupported enclosure function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500398 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399 "4120: SAS cable VPD cannot be read"},
Brian King933916f2007-03-29 12:43:30 -0500400 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 "FFF4: Command to logical unit failed"},
402 {0x05240000, 1, 0,
403 "Illegal request, invalid request type or request packet"},
404 {0x05250000, 0, 0,
405 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600406 {0x05258000, 0, 0,
407 "Illegal request, commands not allowed to this device"},
408 {0x05258100, 0, 0,
409 "Illegal request, command not allowed to a secondary adapter"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800410 {0x05258200, 0, 0,
411 "Illegal request, command not allowed to a non-optimized resource"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 {0x05260000, 0, 0,
413 "Illegal request, invalid field in parameter list"},
414 {0x05260100, 0, 0,
415 "Illegal request, parameter not supported"},
416 {0x05260200, 0, 0,
417 "Illegal request, parameter value invalid"},
418 {0x052C0000, 0, 0,
419 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600420 {0x052C8000, 1, 0,
421 "Illegal request, dual adapter support not enabled"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500422 {0x052C8100, 1, 0,
423 "Illegal request, another cable connector was physically disabled"},
424 {0x054E8000, 1, 0,
425 "Illegal request, inconsistent group id/group count"},
Brian King933916f2007-03-29 12:43:30 -0500426 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500428 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 "9040: Array protection temporarily suspended, protection resuming"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500430 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431 "4080: IOA exceeded maximum operating temperature"},
432 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433 "4085: Service required"},
Brian King933916f2007-03-29 12:43:30 -0500434 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500435 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500436 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 "FFFB: SCSI bus was reset"},
438 {0x06290500, 0, 0,
439 "FFFE: SCSI bus transition to single ended"},
440 {0x06290600, 0, 0,
441 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500442 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500444 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 "3029: A device replacement has occurred"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500446 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447 "4102: Device bus fabric performance degradation"},
Brian King933916f2007-03-29 12:43:30 -0500448 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500450 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600451 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500452 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500454 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500456 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500458 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600459 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500460 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500461 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500462 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500463 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500464 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500465 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500466 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500467 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500468 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500469 "4050: Enclosure does not support a required multipath function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500470 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471 "4121: Configuration error, required cable is missing"},
472 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473 "4122: Cable is not plugged into the correct location on remote IOA"},
474 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475 "4123: Configuration error, invalid cable vital product data"},
476 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4124: Configuration error, both cable ends are plugged into the same IOA"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800478 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500480 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500482 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500484 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500486 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600487 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500488 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600489 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500490 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 "9032: Array exposed but still protected"},
Brian Kinge4353402007-03-29 12:43:37 -0500492 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500494 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500495 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500496 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500497 "4060: Multipath redundancy level got worse"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 {0x07270000, 0, 0,
499 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500500 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500502 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500504 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500506 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500508 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500510 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500512 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500514 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500516 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500518 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500520 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500522 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500524 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500526 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500528 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500530 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500532 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500534 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500536 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500538 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 "9063: Maximum number of functional arrays has been exceeded"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500540 {0x07279A00, 0, 0,
541 "Data protect, other volume set problem"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 {0x0B260000, 0, 0,
543 "Aborted command, invalid descriptor"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500544 {0x0B3F9000, 0, 0,
545 "Target operating conditions have changed, dual adapter takeover"},
546 {0x0B530200, 0, 0,
547 "Aborted command, medium removal prevented"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 {0x0B5A0000, 0, 0,
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500549 "Command terminated by host"},
550 {0x0B5B8000, 0, 0,
551 "Aborted command, command terminated by host"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552};
553
554static const struct ipr_ses_table_entry ipr_ses_table[] = {
555 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
556 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
557 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
562 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
563 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
566 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
568};
569
570/*
571 * Function Prototypes
572 */
573static int ipr_reset_alert(struct ipr_cmnd *);
574static void ipr_process_ccn(struct ipr_cmnd *);
575static void ipr_process_error(struct ipr_cmnd *);
576static void ipr_reset_ioa_job(struct ipr_cmnd *);
577static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578 enum ipr_shutdown_type);
579
580#ifdef CONFIG_SCSI_IPR_TRACE
581/**
582 * ipr_trc_hook - Add a trace entry to the driver trace
583 * @ipr_cmd: ipr command struct
584 * @type: trace type
585 * @add_data: additional data
586 *
587 * Return value:
588 * none
589 **/
590static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591 u8 type, u32 add_data)
592{
593 struct ipr_trace_entry *trace_entry;
594 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
595
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600596 trace_entry = &ioa_cfg->trace[atomic_add_return
597 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 trace_entry->time = jiffies;
599 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
600 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800601 if (ipr_cmd->ioa_cfg->sis64)
602 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
603 else
604 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500605 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
607 trace_entry->u.add_data = add_data;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600608 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609}
610#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300611#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612#endif
613
614/**
Brian King172cd6e2012-07-17 08:14:40 -0500615 * ipr_lock_and_done - Acquire lock and complete command
616 * @ipr_cmd: ipr command struct
617 *
618 * Return value:
619 * none
620 **/
621static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
622{
623 unsigned long lock_flags;
624 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
625
626 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
627 ipr_cmd->done(ipr_cmd);
628 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
629}
630
631/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633 * @ipr_cmd: ipr command struct
634 *
635 * Return value:
636 * none
637 **/
638static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
639{
640 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700641 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
642 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
Wayne Boyera32c0552010-02-19 13:23:36 -0800643 dma_addr_t dma_addr = ipr_cmd->dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600644 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600646 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600648 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
Wayne Boyera32c0552010-02-19 13:23:36 -0800649 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800651 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800653
Wayne Boyer96d21f02010-05-10 09:13:27 -0700654 if (ipr_cmd->ioa_cfg->sis64) {
Wayne Boyera32c0552010-02-19 13:23:36 -0800655 ioarcb->u.sis64_addr_data.data_ioadl_addr =
656 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
Wayne Boyer96d21f02010-05-10 09:13:27 -0700657 ioasa64->u.gata.status = 0;
658 } else {
Wayne Boyera32c0552010-02-19 13:23:36 -0800659 ioarcb->write_ioadl_addr =
660 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
661 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700662 ioasa->u.gata.status = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800663 }
664
Wayne Boyer96d21f02010-05-10 09:13:27 -0700665 ioasa->hdr.ioasc = 0;
666 ioasa->hdr.residual_data_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500668 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 ipr_cmd->sense_buffer[0] = 0;
670 ipr_cmd->dma_use_sg = 0;
671}
672
673/**
674 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675 * @ipr_cmd: ipr command struct
676 *
677 * Return value:
678 * none
679 **/
Brian King172cd6e2012-07-17 08:14:40 -0500680static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
681 void (*fast_done) (struct ipr_cmnd *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 ipr_reinit_ipr_cmnd(ipr_cmd);
684 ipr_cmd->u.scratch = 0;
685 ipr_cmd->sibling = NULL;
Brian King172cd6e2012-07-17 08:14:40 -0500686 ipr_cmd->fast_done = fast_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 init_timer(&ipr_cmd->timer);
688}
689
690/**
Brian King00bfef22012-07-17 08:13:52 -0500691 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 * @ioa_cfg: ioa config struct
693 *
694 * Return value:
695 * pointer to ipr command struct
696 **/
697static
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600698struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600700 struct ipr_cmnd *ipr_cmd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600702 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
703 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
704 struct ipr_cmnd, queue);
705 list_del(&ipr_cmd->queue);
706 }
707
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
709 return ipr_cmd;
710}
711
712/**
Brian King00bfef22012-07-17 08:13:52 -0500713 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
714 * @ioa_cfg: ioa config struct
715 *
716 * Return value:
717 * pointer to ipr command struct
718 **/
719static
720struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
721{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600722 struct ipr_cmnd *ipr_cmd =
723 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
Brian King172cd6e2012-07-17 08:14:40 -0500724 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King00bfef22012-07-17 08:13:52 -0500725 return ipr_cmd;
726}
727
728/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
730 * @ioa_cfg: ioa config struct
731 * @clr_ints: interrupts to clear
732 *
733 * This function masks all interrupts on the adapter, then clears the
734 * interrupts specified in the mask
735 *
736 * Return value:
737 * none
738 **/
739static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
740 u32 clr_ints)
741{
742 volatile u32 int_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600743 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
745 /* Stop new interrupts */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600746 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
747 spin_lock(&ioa_cfg->hrrq[i]._lock);
748 ioa_cfg->hrrq[i].allow_interrupts = 0;
749 spin_unlock(&ioa_cfg->hrrq[i]._lock);
750 }
751 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
753 /* Set interrupt mask to stop all new interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800754 if (ioa_cfg->sis64)
755 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
756 else
757 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
759 /* Clear any pending interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800760 if (ioa_cfg->sis64)
761 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
762 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
764}
765
766/**
767 * ipr_save_pcix_cmd_reg - Save PCI-X command register
768 * @ioa_cfg: ioa config struct
769 *
770 * Return value:
771 * 0 on success / -EIO on failure
772 **/
773static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
774{
775 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
776
Brian King7dce0e12007-01-23 11:25:30 -0600777 if (pcix_cmd_reg == 0)
778 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
781 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
782 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
783 return -EIO;
784 }
785
786 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
787 return 0;
788}
789
790/**
791 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792 * @ioa_cfg: ioa config struct
793 *
794 * Return value:
795 * 0 on success / -EIO on failure
796 **/
797static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
798{
799 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
800
801 if (pcix_cmd_reg) {
802 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
803 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
804 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
805 return -EIO;
806 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 }
808
809 return 0;
810}
811
812/**
Brian King35a39692006-09-25 12:39:20 -0500813 * ipr_sata_eh_done - done function for aborted SATA commands
814 * @ipr_cmd: ipr command struct
815 *
816 * This function is invoked for ops generated to SATA
817 * devices which are being aborted.
818 *
819 * Return value:
820 * none
821 **/
822static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
823{
Brian King35a39692006-09-25 12:39:20 -0500824 struct ata_queued_cmd *qc = ipr_cmd->qc;
825 struct ipr_sata_port *sata_port = qc->ap->private_data;
826
827 qc->err_mask |= AC_ERR_OTHER;
828 sata_port->ioasa.status |= ATA_BUSY;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600829 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King35a39692006-09-25 12:39:20 -0500830 ata_qc_complete(qc);
831}
832
833/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 * ipr_scsi_eh_done - mid-layer done function for aborted ops
835 * @ipr_cmd: ipr command struct
836 *
837 * This function is invoked by the interrupt handler for
838 * ops generated by the SCSI mid-layer which are being aborted.
839 *
840 * Return value:
841 * none
842 **/
843static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
844{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
846
847 scsi_cmd->result |= (DID_ERROR << 16);
848
FUJITA Tomonori63015bc2007-05-26 00:26:59 +0900849 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600851 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
853
854/**
855 * ipr_fail_all_ops - Fails all outstanding ops.
856 * @ioa_cfg: ioa config struct
857 *
858 * This function fails all outstanding ops.
859 *
860 * Return value:
861 * none
862 **/
863static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
864{
865 struct ipr_cmnd *ipr_cmd, *temp;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600866 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600869 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600870 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600871 list_for_each_entry_safe(ipr_cmd,
872 temp, &hrrq->hrrq_pending_q, queue) {
873 list_del(&ipr_cmd->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600875 ipr_cmd->s.ioasa.hdr.ioasc =
876 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
877 ipr_cmd->s.ioasa.hdr.ilid =
878 cpu_to_be32(IPR_DRIVER_ILID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600880 if (ipr_cmd->scsi_cmd)
881 ipr_cmd->done = ipr_scsi_eh_done;
882 else if (ipr_cmd->qc)
883 ipr_cmd->done = ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600885 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
886 IPR_IOASC_IOA_WAS_RESET);
887 del_timer(&ipr_cmd->timer);
888 ipr_cmd->done(ipr_cmd);
889 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600890 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 LEAVE;
893}
894
895/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800896 * ipr_send_command - Send driver initiated requests.
897 * @ipr_cmd: ipr command struct
898 *
899 * This function sends a command to the adapter using the correct write call.
900 * In the case of sis64, calculate the ioarcb size required. Then or in the
901 * appropriate bits.
902 *
903 * Return value:
904 * none
905 **/
906static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
907{
908 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
909 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
910
911 if (ioa_cfg->sis64) {
912 /* The default size is 256 bytes */
913 send_dma_addr |= 0x1;
914
915 /* If the number of ioadls * size of ioadl > 128 bytes,
916 then use a 512 byte ioarcb */
917 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
918 send_dma_addr |= 0x4;
919 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
920 } else
921 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
922}
923
924/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 * ipr_do_req - Send driver initiated requests.
926 * @ipr_cmd: ipr command struct
927 * @done: done function
928 * @timeout_func: timeout function
929 * @timeout: timeout value
930 *
931 * This function sends the specified command to the adapter with the
932 * timeout given. The done function is invoked on command completion.
933 *
934 * Return value:
935 * none
936 **/
937static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
938 void (*done) (struct ipr_cmnd *),
939 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
940{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600941 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
943 ipr_cmd->done = done;
944
945 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
946 ipr_cmd->timer.expires = jiffies + timeout;
947 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
948
949 add_timer(&ipr_cmd->timer);
950
951 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
952
Wayne Boyera32c0552010-02-19 13:23:36 -0800953 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954}
955
956/**
957 * ipr_internal_cmd_done - Op done function for an internally generated op.
958 * @ipr_cmd: ipr command struct
959 *
960 * This function is the op done function for an internally generated,
961 * blocking op. It simply wakes the sleeping thread.
962 *
963 * Return value:
964 * none
965 **/
966static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
967{
968 if (ipr_cmd->sibling)
969 ipr_cmd->sibling = NULL;
970 else
971 complete(&ipr_cmd->completion);
972}
973
974/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800975 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
976 * @ipr_cmd: ipr command struct
977 * @dma_addr: dma address
978 * @len: transfer length
979 * @flags: ioadl flag value
980 *
981 * This function initializes an ioadl in the case where there is only a single
982 * descriptor.
983 *
984 * Return value:
985 * nothing
986 **/
987static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
988 u32 len, int flags)
989{
990 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
991 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
992
993 ipr_cmd->dma_use_sg = 1;
994
995 if (ipr_cmd->ioa_cfg->sis64) {
996 ioadl64->flags = cpu_to_be32(flags);
997 ioadl64->data_len = cpu_to_be32(len);
998 ioadl64->address = cpu_to_be64(dma_addr);
999
1000 ipr_cmd->ioarcb.ioadl_len =
1001 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1002 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1003 } else {
1004 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1005 ioadl->address = cpu_to_be32(dma_addr);
1006
1007 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1008 ipr_cmd->ioarcb.read_ioadl_len =
1009 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1010 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1011 } else {
1012 ipr_cmd->ioarcb.ioadl_len =
1013 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1014 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1015 }
1016 }
1017}
1018
1019/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1021 * @ipr_cmd: ipr command struct
1022 * @timeout_func: function to invoke if command times out
1023 * @timeout: timeout
1024 *
1025 * Return value:
1026 * none
1027 **/
1028static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1029 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1030 u32 timeout)
1031{
1032 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1033
1034 init_completion(&ipr_cmd->completion);
1035 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1036
1037 spin_unlock_irq(ioa_cfg->host->host_lock);
1038 wait_for_completion(&ipr_cmd->completion);
1039 spin_lock_irq(ioa_cfg->host->host_lock);
1040}
1041
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001042static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1043{
1044 if (ioa_cfg->hrrq_num == 1)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001045 return 0;
1046 else
1047 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001048}
1049
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050/**
1051 * ipr_send_hcam - Send an HCAM to the adapter.
1052 * @ioa_cfg: ioa config struct
1053 * @type: HCAM type
1054 * @hostrcb: hostrcb struct
1055 *
1056 * This function will send a Host Controlled Async command to the adapter.
1057 * If HCAMs are currently not allowed to be issued to the adapter, it will
1058 * place the hostrcb on the free queue.
1059 *
1060 * Return value:
1061 * none
1062 **/
1063static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1064 struct ipr_hostrcb *hostrcb)
1065{
1066 struct ipr_cmnd *ipr_cmd;
1067 struct ipr_ioarcb *ioarcb;
1068
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001069 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001071 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1073
1074 ipr_cmd->u.hostrcb = hostrcb;
1075 ioarcb = &ipr_cmd->ioarcb;
1076
1077 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1078 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1079 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1080 ioarcb->cmd_pkt.cdb[1] = type;
1081 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1082 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1083
Wayne Boyera32c0552010-02-19 13:23:36 -08001084 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1085 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
1087 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1088 ipr_cmd->done = ipr_process_ccn;
1089 else
1090 ipr_cmd->done = ipr_process_error;
1091
1092 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1093
Wayne Boyera32c0552010-02-19 13:23:36 -08001094 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 } else {
1096 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1097 }
1098}
1099
1100/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001101 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001103 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 *
1105 * Return value:
1106 * none
1107 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001108static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03001110 switch (proto) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001111 case IPR_PROTO_SATA:
1112 case IPR_PROTO_SAS_STP:
1113 res->ata_class = ATA_DEV_ATA;
1114 break;
1115 case IPR_PROTO_SATA_ATAPI:
1116 case IPR_PROTO_SAS_STP_ATAPI:
1117 res->ata_class = ATA_DEV_ATAPI;
1118 break;
1119 default:
1120 res->ata_class = ATA_DEV_UNKNOWN;
1121 break;
1122 };
1123}
1124
1125/**
1126 * ipr_init_res_entry - Initialize a resource entry struct.
1127 * @res: resource entry struct
1128 * @cfgtew: config table entry wrapper struct
1129 *
1130 * Return value:
1131 * none
1132 **/
1133static void ipr_init_res_entry(struct ipr_resource_entry *res,
1134 struct ipr_config_table_entry_wrapper *cfgtew)
1135{
1136 int found = 0;
1137 unsigned int proto;
1138 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1139 struct ipr_resource_entry *gscsi_res = NULL;
1140
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06001141 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 res->in_erp = 0;
1143 res->add_to_ml = 0;
1144 res->del_from_ml = 0;
1145 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06001146 res->reset_occurred = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05001148 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001149
1150 if (ioa_cfg->sis64) {
1151 proto = cfgtew->u.cfgte64->proto;
1152 res->res_flags = cfgtew->u.cfgte64->res_flags;
1153 res->qmodel = IPR_QUEUEING_MODEL64(res);
Wayne Boyer438b0332010-05-10 09:13:00 -07001154 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001155
1156 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1157 sizeof(res->res_path));
1158
1159 res->bus = 0;
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001160 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1161 sizeof(res->dev_lun.scsi_lun));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001162 res->lun = scsilun_to_int(&res->dev_lun);
1163
1164 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1165 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1166 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1167 found = 1;
1168 res->target = gscsi_res->target;
1169 break;
1170 }
1171 }
1172 if (!found) {
1173 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1174 ioa_cfg->max_devs_supported);
1175 set_bit(res->target, ioa_cfg->target_ids);
1176 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001177 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1178 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1179 res->target = 0;
1180 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1181 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1182 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1183 ioa_cfg->max_devs_supported);
1184 set_bit(res->target, ioa_cfg->array_ids);
1185 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1186 res->bus = IPR_VSET_VIRTUAL_BUS;
1187 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1188 ioa_cfg->max_devs_supported);
1189 set_bit(res->target, ioa_cfg->vset_ids);
1190 } else {
1191 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1192 ioa_cfg->max_devs_supported);
1193 set_bit(res->target, ioa_cfg->target_ids);
1194 }
1195 } else {
1196 proto = cfgtew->u.cfgte->proto;
1197 res->qmodel = IPR_QUEUEING_MODEL(res);
1198 res->flags = cfgtew->u.cfgte->flags;
1199 if (res->flags & IPR_IS_IOA_RESOURCE)
1200 res->type = IPR_RES_TYPE_IOAFP;
1201 else
1202 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1203
1204 res->bus = cfgtew->u.cfgte->res_addr.bus;
1205 res->target = cfgtew->u.cfgte->res_addr.target;
1206 res->lun = cfgtew->u.cfgte->res_addr.lun;
Wayne Boyer46d74562010-08-11 07:15:17 -07001207 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001208 }
1209
1210 ipr_update_ata_class(res, proto);
1211}
1212
1213/**
1214 * ipr_is_same_device - Determine if two devices are the same.
1215 * @res: resource entry struct
1216 * @cfgtew: config table entry wrapper struct
1217 *
1218 * Return value:
1219 * 1 if the devices are the same / 0 otherwise
1220 **/
1221static int ipr_is_same_device(struct ipr_resource_entry *res,
1222 struct ipr_config_table_entry_wrapper *cfgtew)
1223{
1224 if (res->ioa_cfg->sis64) {
1225 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1226 sizeof(cfgtew->u.cfgte64->dev_id)) &&
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001227 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001228 sizeof(cfgtew->u.cfgte64->lun))) {
1229 return 1;
1230 }
1231 } else {
1232 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1233 res->target == cfgtew->u.cfgte->res_addr.target &&
1234 res->lun == cfgtew->u.cfgte->res_addr.lun)
1235 return 1;
1236 }
1237
1238 return 0;
1239}
1240
1241/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001242 * __ipr_format_res_path - Format the resource path for printing.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001243 * @res_path: resource path
1244 * @buf: buffer
Brian Kingb3b3b402013-01-11 17:43:49 -06001245 * @len: length of buffer provided
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001246 *
1247 * Return value:
1248 * pointer to buffer
1249 **/
Brian Kingb3b3b402013-01-11 17:43:49 -06001250static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001251{
1252 int i;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001253 char *p = buffer;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001254
Wayne Boyer46d74562010-08-11 07:15:17 -07001255 *p = '\0';
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001256 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1257 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1258 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001259
1260 return buffer;
1261}
1262
1263/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001264 * ipr_format_res_path - Format the resource path for printing.
1265 * @ioa_cfg: ioa config struct
1266 * @res_path: resource path
1267 * @buf: buffer
1268 * @len: length of buffer provided
1269 *
1270 * Return value:
1271 * pointer to buffer
1272 **/
1273static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1274 u8 *res_path, char *buffer, int len)
1275{
1276 char *p = buffer;
1277
1278 *p = '\0';
1279 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1280 __ipr_format_res_path(res_path, p, len - (buffer - p));
1281 return buffer;
1282}
1283
1284/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001285 * ipr_update_res_entry - Update the resource entry.
1286 * @res: resource entry struct
1287 * @cfgtew: config table entry wrapper struct
1288 *
1289 * Return value:
1290 * none
1291 **/
1292static void ipr_update_res_entry(struct ipr_resource_entry *res,
1293 struct ipr_config_table_entry_wrapper *cfgtew)
1294{
1295 char buffer[IPR_MAX_RES_PATH_LENGTH];
1296 unsigned int proto;
1297 int new_path = 0;
1298
1299 if (res->ioa_cfg->sis64) {
1300 res->flags = cfgtew->u.cfgte64->flags;
1301 res->res_flags = cfgtew->u.cfgte64->res_flags;
Wayne Boyer75576bb2010-07-14 10:50:14 -07001302 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001303
1304 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1305 sizeof(struct ipr_std_inq_data));
1306
1307 res->qmodel = IPR_QUEUEING_MODEL64(res);
1308 proto = cfgtew->u.cfgte64->proto;
1309 res->res_handle = cfgtew->u.cfgte64->res_handle;
1310 res->dev_id = cfgtew->u.cfgte64->dev_id;
1311
1312 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1313 sizeof(res->dev_lun.scsi_lun));
1314
1315 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1316 sizeof(res->res_path))) {
1317 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1318 sizeof(res->res_path));
1319 new_path = 1;
1320 }
1321
1322 if (res->sdev && new_path)
1323 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06001324 ipr_format_res_path(res->ioa_cfg,
1325 res->res_path, buffer, sizeof(buffer)));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001326 } else {
1327 res->flags = cfgtew->u.cfgte->flags;
1328 if (res->flags & IPR_IS_IOA_RESOURCE)
1329 res->type = IPR_RES_TYPE_IOAFP;
1330 else
1331 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1332
1333 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1334 sizeof(struct ipr_std_inq_data));
1335
1336 res->qmodel = IPR_QUEUEING_MODEL(res);
1337 proto = cfgtew->u.cfgte->proto;
1338 res->res_handle = cfgtew->u.cfgte->res_handle;
1339 }
1340
1341 ipr_update_ata_class(res, proto);
1342}
1343
1344/**
1345 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1346 * for the resource.
1347 * @res: resource entry struct
1348 * @cfgtew: config table entry wrapper struct
1349 *
1350 * Return value:
1351 * none
1352 **/
1353static void ipr_clear_res_target(struct ipr_resource_entry *res)
1354{
1355 struct ipr_resource_entry *gscsi_res = NULL;
1356 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1357
1358 if (!ioa_cfg->sis64)
1359 return;
1360
1361 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1362 clear_bit(res->target, ioa_cfg->array_ids);
1363 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1364 clear_bit(res->target, ioa_cfg->vset_ids);
1365 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1366 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1367 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1368 return;
1369 clear_bit(res->target, ioa_cfg->target_ids);
1370
1371 } else if (res->bus == 0)
1372 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373}
1374
1375/**
1376 * ipr_handle_config_change - Handle a config change from the adapter
1377 * @ioa_cfg: ioa config struct
1378 * @hostrcb: hostrcb
1379 *
1380 * Return value:
1381 * none
1382 **/
1383static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001384 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385{
1386 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001387 struct ipr_config_table_entry_wrapper cfgtew;
1388 __be32 cc_res_handle;
1389
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 u32 is_ndn = 1;
1391
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001392 if (ioa_cfg->sis64) {
1393 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1394 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1395 } else {
1396 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1397 cc_res_handle = cfgtew.u.cfgte->res_handle;
1398 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
1400 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001401 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 is_ndn = 0;
1403 break;
1404 }
1405 }
1406
1407 if (is_ndn) {
1408 if (list_empty(&ioa_cfg->free_res_q)) {
1409 ipr_send_hcam(ioa_cfg,
1410 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1411 hostrcb);
1412 return;
1413 }
1414
1415 res = list_entry(ioa_cfg->free_res_q.next,
1416 struct ipr_resource_entry, queue);
1417
1418 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001419 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1421 }
1422
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001423 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
1425 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1426 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001428 res->res_handle = IPR_INVALID_RES_HANDLE;
Brian Kingf688f962014-12-02 12:47:37 -06001429 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001430 } else {
1431 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001433 }
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02001434 } else if (!res->sdev || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 res->add_to_ml = 1;
Brian Kingf688f962014-12-02 12:47:37 -06001436 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 }
1438
1439 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1440}
1441
1442/**
1443 * ipr_process_ccn - Op done function for a CCN.
1444 * @ipr_cmd: ipr command struct
1445 *
1446 * This function is the op done function for a configuration
1447 * change notification host controlled async from the adapter.
1448 *
1449 * Return value:
1450 * none
1451 **/
1452static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1453{
1454 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1455 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07001456 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457
1458 list_del(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001459 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460
1461 if (ioasc) {
1462 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1463 dev_err(&ioa_cfg->pdev->dev,
1464 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1465
1466 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1467 } else {
1468 ipr_handle_config_change(ioa_cfg, hostrcb);
1469 }
1470}
1471
1472/**
Brian King8cf093e2007-04-26 16:00:14 -05001473 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1474 * @i: index into buffer
1475 * @buf: string to modify
1476 *
1477 * This function will strip all trailing whitespace, pad the end
1478 * of the string with a single space, and NULL terminate the string.
1479 *
1480 * Return value:
1481 * new length of string
1482 **/
1483static int strip_and_pad_whitespace(int i, char *buf)
1484{
1485 while (i && buf[i] == ' ')
1486 i--;
1487 buf[i+1] = ' ';
1488 buf[i+2] = '\0';
1489 return i + 2;
1490}
1491
1492/**
1493 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1494 * @prefix: string to print at start of printk
1495 * @hostrcb: hostrcb pointer
1496 * @vpd: vendor/product id/sn struct
1497 *
1498 * Return value:
1499 * none
1500 **/
1501static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1502 struct ipr_vpd *vpd)
1503{
1504 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1505 int i = 0;
1506
1507 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1508 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1509
1510 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1511 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1512
1513 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1514 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1515
1516 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1517}
1518
1519/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001521 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 *
1523 * Return value:
1524 * none
1525 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001526static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527{
1528 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1529 + IPR_SERIAL_NUM_LEN];
1530
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001531 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1532 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 IPR_PROD_ID_LEN);
1534 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1535 ipr_err("Vendor/Product ID: %s\n", buffer);
1536
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001537 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1539 ipr_err(" Serial Number: %s\n", buffer);
1540}
1541
1542/**
Brian King8cf093e2007-04-26 16:00:14 -05001543 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1544 * @prefix: string to print at start of printk
1545 * @hostrcb: hostrcb pointer
1546 * @vpd: vendor/product id/sn/wwn struct
1547 *
1548 * Return value:
1549 * none
1550 **/
1551static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1552 struct ipr_ext_vpd *vpd)
1553{
1554 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1555 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1556 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1557}
1558
1559/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001560 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1561 * @vpd: vendor/product id/sn/wwn struct
1562 *
1563 * Return value:
1564 * none
1565 **/
1566static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1567{
1568 ipr_log_vpd(&vpd->vpd);
1569 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1570 be32_to_cpu(vpd->wwid[1]));
1571}
1572
1573/**
1574 * ipr_log_enhanced_cache_error - Log a cache error.
1575 * @ioa_cfg: ioa config struct
1576 * @hostrcb: hostrcb struct
1577 *
1578 * Return value:
1579 * none
1580 **/
1581static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1582 struct ipr_hostrcb *hostrcb)
1583{
Wayne Boyer4565e372010-02-19 13:24:07 -08001584 struct ipr_hostrcb_type_12_error *error;
1585
1586 if (ioa_cfg->sis64)
1587 error = &hostrcb->hcam.u.error64.u.type_12_error;
1588 else
1589 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001590
1591 ipr_err("-----Current Configuration-----\n");
1592 ipr_err("Cache Directory Card Information:\n");
1593 ipr_log_ext_vpd(&error->ioa_vpd);
1594 ipr_err("Adapter Card Information:\n");
1595 ipr_log_ext_vpd(&error->cfc_vpd);
1596
1597 ipr_err("-----Expected Configuration-----\n");
1598 ipr_err("Cache Directory Card Information:\n");
1599 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1600 ipr_err("Adapter Card Information:\n");
1601 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1602
1603 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1604 be32_to_cpu(error->ioa_data[0]),
1605 be32_to_cpu(error->ioa_data[1]),
1606 be32_to_cpu(error->ioa_data[2]));
1607}
1608
1609/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 * ipr_log_cache_error - Log a cache error.
1611 * @ioa_cfg: ioa config struct
1612 * @hostrcb: hostrcb struct
1613 *
1614 * Return value:
1615 * none
1616 **/
1617static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1618 struct ipr_hostrcb *hostrcb)
1619{
1620 struct ipr_hostrcb_type_02_error *error =
1621 &hostrcb->hcam.u.error.u.type_02_error;
1622
1623 ipr_err("-----Current Configuration-----\n");
1624 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001625 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001627 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
1629 ipr_err("-----Expected Configuration-----\n");
1630 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001631 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001633 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634
1635 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1636 be32_to_cpu(error->ioa_data[0]),
1637 be32_to_cpu(error->ioa_data[1]),
1638 be32_to_cpu(error->ioa_data[2]));
1639}
1640
1641/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001642 * ipr_log_enhanced_config_error - Log a configuration error.
1643 * @ioa_cfg: ioa config struct
1644 * @hostrcb: hostrcb struct
1645 *
1646 * Return value:
1647 * none
1648 **/
1649static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1650 struct ipr_hostrcb *hostrcb)
1651{
1652 int errors_logged, i;
1653 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1654 struct ipr_hostrcb_type_13_error *error;
1655
1656 error = &hostrcb->hcam.u.error.u.type_13_error;
1657 errors_logged = be32_to_cpu(error->errors_logged);
1658
1659 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1660 be32_to_cpu(error->errors_detected), errors_logged);
1661
1662 dev_entry = error->dev;
1663
1664 for (i = 0; i < errors_logged; i++, dev_entry++) {
1665 ipr_err_separator;
1666
1667 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1668 ipr_log_ext_vpd(&dev_entry->vpd);
1669
1670 ipr_err("-----New Device Information-----\n");
1671 ipr_log_ext_vpd(&dev_entry->new_vpd);
1672
1673 ipr_err("Cache Directory Card Information:\n");
1674 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1675
1676 ipr_err("Adapter Card Information:\n");
1677 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1678 }
1679}
1680
1681/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001682 * ipr_log_sis64_config_error - Log a device error.
1683 * @ioa_cfg: ioa config struct
1684 * @hostrcb: hostrcb struct
1685 *
1686 * Return value:
1687 * none
1688 **/
1689static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1690 struct ipr_hostrcb *hostrcb)
1691{
1692 int errors_logged, i;
1693 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1694 struct ipr_hostrcb_type_23_error *error;
1695 char buffer[IPR_MAX_RES_PATH_LENGTH];
1696
1697 error = &hostrcb->hcam.u.error64.u.type_23_error;
1698 errors_logged = be32_to_cpu(error->errors_logged);
1699
1700 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1701 be32_to_cpu(error->errors_detected), errors_logged);
1702
1703 dev_entry = error->dev;
1704
1705 for (i = 0; i < errors_logged; i++, dev_entry++) {
1706 ipr_err_separator;
1707
1708 ipr_err("Device %d : %s", i + 1,
Brian Kingb3b3b402013-01-11 17:43:49 -06001709 __ipr_format_res_path(dev_entry->res_path,
1710 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08001711 ipr_log_ext_vpd(&dev_entry->vpd);
1712
1713 ipr_err("-----New Device Information-----\n");
1714 ipr_log_ext_vpd(&dev_entry->new_vpd);
1715
1716 ipr_err("Cache Directory Card Information:\n");
1717 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1718
1719 ipr_err("Adapter Card Information:\n");
1720 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1721 }
1722}
1723
1724/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 * ipr_log_config_error - Log a configuration error.
1726 * @ioa_cfg: ioa config struct
1727 * @hostrcb: hostrcb struct
1728 *
1729 * Return value:
1730 * none
1731 **/
1732static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1733 struct ipr_hostrcb *hostrcb)
1734{
1735 int errors_logged, i;
1736 struct ipr_hostrcb_device_data_entry *dev_entry;
1737 struct ipr_hostrcb_type_03_error *error;
1738
1739 error = &hostrcb->hcam.u.error.u.type_03_error;
1740 errors_logged = be32_to_cpu(error->errors_logged);
1741
1742 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1743 be32_to_cpu(error->errors_detected), errors_logged);
1744
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001745 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
1747 for (i = 0; i < errors_logged; i++, dev_entry++) {
1748 ipr_err_separator;
1749
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001750 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001751 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
1753 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001754 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755
1756 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001757 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758
1759 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001760 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761
1762 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1763 be32_to_cpu(dev_entry->ioa_data[0]),
1764 be32_to_cpu(dev_entry->ioa_data[1]),
1765 be32_to_cpu(dev_entry->ioa_data[2]),
1766 be32_to_cpu(dev_entry->ioa_data[3]),
1767 be32_to_cpu(dev_entry->ioa_data[4]));
1768 }
1769}
1770
1771/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001772 * ipr_log_enhanced_array_error - Log an array configuration error.
1773 * @ioa_cfg: ioa config struct
1774 * @hostrcb: hostrcb struct
1775 *
1776 * Return value:
1777 * none
1778 **/
1779static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1780 struct ipr_hostrcb *hostrcb)
1781{
1782 int i, num_entries;
1783 struct ipr_hostrcb_type_14_error *error;
1784 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1785 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1786
1787 error = &hostrcb->hcam.u.error.u.type_14_error;
1788
1789 ipr_err_separator;
1790
1791 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1792 error->protection_level,
1793 ioa_cfg->host->host_no,
1794 error->last_func_vset_res_addr.bus,
1795 error->last_func_vset_res_addr.target,
1796 error->last_func_vset_res_addr.lun);
1797
1798 ipr_err_separator;
1799
1800 array_entry = error->array_member;
1801 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
Wayne Boyer72620262010-09-27 10:45:28 -07001802 ARRAY_SIZE(error->array_member));
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001803
1804 for (i = 0; i < num_entries; i++, array_entry++) {
1805 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1806 continue;
1807
1808 if (be32_to_cpu(error->exposed_mode_adn) == i)
1809 ipr_err("Exposed Array Member %d:\n", i);
1810 else
1811 ipr_err("Array Member %d:\n", i);
1812
1813 ipr_log_ext_vpd(&array_entry->vpd);
1814 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1815 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1816 "Expected Location");
1817
1818 ipr_err_separator;
1819 }
1820}
1821
1822/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 * ipr_log_array_error - Log an array configuration error.
1824 * @ioa_cfg: ioa config struct
1825 * @hostrcb: hostrcb struct
1826 *
1827 * Return value:
1828 * none
1829 **/
1830static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1831 struct ipr_hostrcb *hostrcb)
1832{
1833 int i;
1834 struct ipr_hostrcb_type_04_error *error;
1835 struct ipr_hostrcb_array_data_entry *array_entry;
1836 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1837
1838 error = &hostrcb->hcam.u.error.u.type_04_error;
1839
1840 ipr_err_separator;
1841
1842 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1843 error->protection_level,
1844 ioa_cfg->host->host_no,
1845 error->last_func_vset_res_addr.bus,
1846 error->last_func_vset_res_addr.target,
1847 error->last_func_vset_res_addr.lun);
1848
1849 ipr_err_separator;
1850
1851 array_entry = error->array_member;
1852
1853 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001854 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 continue;
1856
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001857 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001859 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001862 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001864 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1865 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1866 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867
1868 ipr_err_separator;
1869
1870 if (i == 9)
1871 array_entry = error->array_member2;
1872 else
1873 array_entry++;
1874 }
1875}
1876
1877/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001878 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001879 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001880 * @data: IOA error data
1881 * @len: data length
1882 *
1883 * Return value:
1884 * none
1885 **/
Brian Kingac719ab2006-11-21 10:28:42 -06001886static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001887{
1888 int i;
1889
1890 if (len == 0)
1891 return;
1892
Brian Kingac719ab2006-11-21 10:28:42 -06001893 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1894 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1895
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001896 for (i = 0; i < len / 4; i += 4) {
1897 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1898 be32_to_cpu(data[i]),
1899 be32_to_cpu(data[i+1]),
1900 be32_to_cpu(data[i+2]),
1901 be32_to_cpu(data[i+3]));
1902 }
1903}
1904
1905/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001906 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1907 * @ioa_cfg: ioa config struct
1908 * @hostrcb: hostrcb struct
1909 *
1910 * Return value:
1911 * none
1912 **/
1913static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1914 struct ipr_hostrcb *hostrcb)
1915{
1916 struct ipr_hostrcb_type_17_error *error;
1917
Wayne Boyer4565e372010-02-19 13:24:07 -08001918 if (ioa_cfg->sis64)
1919 error = &hostrcb->hcam.u.error64.u.type_17_error;
1920 else
1921 error = &hostrcb->hcam.u.error.u.type_17_error;
1922
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001923 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001924 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001925
Brian King8cf093e2007-04-26 16:00:14 -05001926 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1927 be32_to_cpu(hostrcb->hcam.u.error.prc));
1928 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001929 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001930 be32_to_cpu(hostrcb->hcam.length) -
1931 (offsetof(struct ipr_hostrcb_error, u) +
1932 offsetof(struct ipr_hostrcb_type_17_error, data)));
1933}
1934
1935/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001936 * ipr_log_dual_ioa_error - Log a dual adapter error.
1937 * @ioa_cfg: ioa config struct
1938 * @hostrcb: hostrcb struct
1939 *
1940 * Return value:
1941 * none
1942 **/
1943static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1944 struct ipr_hostrcb *hostrcb)
1945{
1946 struct ipr_hostrcb_type_07_error *error;
1947
1948 error = &hostrcb->hcam.u.error.u.type_07_error;
1949 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001950 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001951
Brian King8cf093e2007-04-26 16:00:14 -05001952 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1953 be32_to_cpu(hostrcb->hcam.u.error.prc));
1954 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001955 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001956 be32_to_cpu(hostrcb->hcam.length) -
1957 (offsetof(struct ipr_hostrcb_error, u) +
1958 offsetof(struct ipr_hostrcb_type_07_error, data)));
1959}
1960
Brian King49dc6a12006-11-21 10:28:35 -06001961static const struct {
1962 u8 active;
1963 char *desc;
1964} path_active_desc[] = {
1965 { IPR_PATH_NO_INFO, "Path" },
1966 { IPR_PATH_ACTIVE, "Active path" },
1967 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1968};
1969
1970static const struct {
1971 u8 state;
1972 char *desc;
1973} path_state_desc[] = {
1974 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1975 { IPR_PATH_HEALTHY, "is healthy" },
1976 { IPR_PATH_DEGRADED, "is degraded" },
1977 { IPR_PATH_FAILED, "is failed" }
1978};
1979
1980/**
1981 * ipr_log_fabric_path - Log a fabric path error
1982 * @hostrcb: hostrcb struct
1983 * @fabric: fabric descriptor
1984 *
1985 * Return value:
1986 * none
1987 **/
1988static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1989 struct ipr_hostrcb_fabric_desc *fabric)
1990{
1991 int i, j;
1992 u8 path_state = fabric->path_state;
1993 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1994 u8 state = path_state & IPR_PATH_STATE_MASK;
1995
1996 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1997 if (path_active_desc[i].active != active)
1998 continue;
1999
2000 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2001 if (path_state_desc[j].state != state)
2002 continue;
2003
2004 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2005 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2006 path_active_desc[i].desc, path_state_desc[j].desc,
2007 fabric->ioa_port);
2008 } else if (fabric->cascaded_expander == 0xff) {
2009 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2010 path_active_desc[i].desc, path_state_desc[j].desc,
2011 fabric->ioa_port, fabric->phy);
2012 } else if (fabric->phy == 0xff) {
2013 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2014 path_active_desc[i].desc, path_state_desc[j].desc,
2015 fabric->ioa_port, fabric->cascaded_expander);
2016 } else {
2017 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2018 path_active_desc[i].desc, path_state_desc[j].desc,
2019 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2020 }
2021 return;
2022 }
2023 }
2024
2025 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2026 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2027}
2028
Wayne Boyer4565e372010-02-19 13:24:07 -08002029/**
2030 * ipr_log64_fabric_path - Log a fabric path error
2031 * @hostrcb: hostrcb struct
2032 * @fabric: fabric descriptor
2033 *
2034 * Return value:
2035 * none
2036 **/
2037static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2038 struct ipr_hostrcb64_fabric_desc *fabric)
2039{
2040 int i, j;
2041 u8 path_state = fabric->path_state;
2042 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2043 u8 state = path_state & IPR_PATH_STATE_MASK;
2044 char buffer[IPR_MAX_RES_PATH_LENGTH];
2045
2046 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2047 if (path_active_desc[i].active != active)
2048 continue;
2049
2050 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2051 if (path_state_desc[j].state != state)
2052 continue;
2053
2054 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2055 path_active_desc[i].desc, path_state_desc[j].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002056 ipr_format_res_path(hostrcb->ioa_cfg,
2057 fabric->res_path,
2058 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002059 return;
2060 }
2061 }
2062
2063 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
Brian Kingb3b3b402013-01-11 17:43:49 -06002064 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2065 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002066}
2067
Brian King49dc6a12006-11-21 10:28:35 -06002068static const struct {
2069 u8 type;
2070 char *desc;
2071} path_type_desc[] = {
2072 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2073 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2074 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2075 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2076};
2077
2078static const struct {
2079 u8 status;
2080 char *desc;
2081} path_status_desc[] = {
2082 { IPR_PATH_CFG_NO_PROB, "Functional" },
2083 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2084 { IPR_PATH_CFG_FAILED, "Failed" },
2085 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2086 { IPR_PATH_NOT_DETECTED, "Missing" },
2087 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2088};
2089
2090static const char *link_rate[] = {
2091 "unknown",
2092 "disabled",
2093 "phy reset problem",
2094 "spinup hold",
2095 "port selector",
2096 "unknown",
2097 "unknown",
2098 "unknown",
2099 "1.5Gbps",
2100 "3.0Gbps",
2101 "unknown",
2102 "unknown",
2103 "unknown",
2104 "unknown",
2105 "unknown",
2106 "unknown"
2107};
2108
2109/**
2110 * ipr_log_path_elem - Log a fabric path element.
2111 * @hostrcb: hostrcb struct
2112 * @cfg: fabric path element struct
2113 *
2114 * Return value:
2115 * none
2116 **/
2117static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2118 struct ipr_hostrcb_config_element *cfg)
2119{
2120 int i, j;
2121 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2122 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2123
2124 if (type == IPR_PATH_CFG_NOT_EXIST)
2125 return;
2126
2127 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2128 if (path_type_desc[i].type != type)
2129 continue;
2130
2131 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2132 if (path_status_desc[j].status != status)
2133 continue;
2134
2135 if (type == IPR_PATH_CFG_IOA_PORT) {
2136 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2137 path_status_desc[j].desc, path_type_desc[i].desc,
2138 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2139 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2140 } else {
2141 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2142 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2143 path_status_desc[j].desc, path_type_desc[i].desc,
2144 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2145 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2146 } else if (cfg->cascaded_expander == 0xff) {
2147 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2148 "WWN=%08X%08X\n", path_status_desc[j].desc,
2149 path_type_desc[i].desc, cfg->phy,
2150 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2151 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2152 } else if (cfg->phy == 0xff) {
2153 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2154 "WWN=%08X%08X\n", path_status_desc[j].desc,
2155 path_type_desc[i].desc, cfg->cascaded_expander,
2156 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2157 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2158 } else {
2159 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2160 "WWN=%08X%08X\n", path_status_desc[j].desc,
2161 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2162 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2163 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2164 }
2165 }
2166 return;
2167 }
2168 }
2169
2170 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2171 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2172 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2173 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2174}
2175
2176/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002177 * ipr_log64_path_elem - Log a fabric path element.
2178 * @hostrcb: hostrcb struct
2179 * @cfg: fabric path element struct
2180 *
2181 * Return value:
2182 * none
2183 **/
2184static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2185 struct ipr_hostrcb64_config_element *cfg)
2186{
2187 int i, j;
2188 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2189 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2190 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2191 char buffer[IPR_MAX_RES_PATH_LENGTH];
2192
2193 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2194 return;
2195
2196 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2197 if (path_type_desc[i].type != type)
2198 continue;
2199
2200 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2201 if (path_status_desc[j].status != status)
2202 continue;
2203
2204 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2205 path_status_desc[j].desc, path_type_desc[i].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002206 ipr_format_res_path(hostrcb->ioa_cfg,
2207 cfg->res_path, buffer, sizeof(buffer)),
2208 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2209 be32_to_cpu(cfg->wwid[0]),
2210 be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002211 return;
2212 }
2213 }
2214 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2215 "WWN=%08X%08X\n", cfg->type_status,
Brian Kingb3b3b402013-01-11 17:43:49 -06002216 ipr_format_res_path(hostrcb->ioa_cfg,
2217 cfg->res_path, buffer, sizeof(buffer)),
2218 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002220}
2221
2222/**
Brian King49dc6a12006-11-21 10:28:35 -06002223 * ipr_log_fabric_error - Log a fabric error.
2224 * @ioa_cfg: ioa config struct
2225 * @hostrcb: hostrcb struct
2226 *
2227 * Return value:
2228 * none
2229 **/
2230static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2231 struct ipr_hostrcb *hostrcb)
2232{
2233 struct ipr_hostrcb_type_20_error *error;
2234 struct ipr_hostrcb_fabric_desc *fabric;
2235 struct ipr_hostrcb_config_element *cfg;
2236 int i, add_len;
2237
2238 error = &hostrcb->hcam.u.error.u.type_20_error;
2239 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2240 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2241
2242 add_len = be32_to_cpu(hostrcb->hcam.length) -
2243 (offsetof(struct ipr_hostrcb_error, u) +
2244 offsetof(struct ipr_hostrcb_type_20_error, desc));
2245
2246 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2247 ipr_log_fabric_path(hostrcb, fabric);
2248 for_each_fabric_cfg(fabric, cfg)
2249 ipr_log_path_elem(hostrcb, cfg);
2250
2251 add_len -= be16_to_cpu(fabric->length);
2252 fabric = (struct ipr_hostrcb_fabric_desc *)
2253 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2254 }
2255
Brian Kingac719ab2006-11-21 10:28:42 -06002256 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002257}
2258
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002259/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002260 * ipr_log_sis64_array_error - Log a sis64 array error.
2261 * @ioa_cfg: ioa config struct
2262 * @hostrcb: hostrcb struct
2263 *
2264 * Return value:
2265 * none
2266 **/
2267static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2268 struct ipr_hostrcb *hostrcb)
2269{
2270 int i, num_entries;
2271 struct ipr_hostrcb_type_24_error *error;
2272 struct ipr_hostrcb64_array_data_entry *array_entry;
2273 char buffer[IPR_MAX_RES_PATH_LENGTH];
2274 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2275
2276 error = &hostrcb->hcam.u.error64.u.type_24_error;
2277
2278 ipr_err_separator;
2279
2280 ipr_err("RAID %s Array Configuration: %s\n",
2281 error->protection_level,
Brian Kingb3b3b402013-01-11 17:43:49 -06002282 ipr_format_res_path(ioa_cfg, error->last_res_path,
2283 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002284
2285 ipr_err_separator;
2286
2287 array_entry = error->array_member;
Wayne Boyer72620262010-09-27 10:45:28 -07002288 num_entries = min_t(u32, error->num_entries,
2289 ARRAY_SIZE(error->array_member));
Wayne Boyer4565e372010-02-19 13:24:07 -08002290
2291 for (i = 0; i < num_entries; i++, array_entry++) {
2292
2293 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2294 continue;
2295
2296 if (error->exposed_mode_adn == i)
2297 ipr_err("Exposed Array Member %d:\n", i);
2298 else
2299 ipr_err("Array Member %d:\n", i);
2300
2301 ipr_err("Array Member %d:\n", i);
2302 ipr_log_ext_vpd(&array_entry->vpd);
Wayne Boyer72620262010-09-27 10:45:28 -07002303 ipr_err("Current Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002304 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2305 buffer, sizeof(buffer)));
Wayne Boyer72620262010-09-27 10:45:28 -07002306 ipr_err("Expected Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002307 ipr_format_res_path(ioa_cfg,
2308 array_entry->expected_res_path,
2309 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002310
2311 ipr_err_separator;
2312 }
2313}
2314
2315/**
2316 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2317 * @ioa_cfg: ioa config struct
2318 * @hostrcb: hostrcb struct
2319 *
2320 * Return value:
2321 * none
2322 **/
2323static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2324 struct ipr_hostrcb *hostrcb)
2325{
2326 struct ipr_hostrcb_type_30_error *error;
2327 struct ipr_hostrcb64_fabric_desc *fabric;
2328 struct ipr_hostrcb64_config_element *cfg;
2329 int i, add_len;
2330
2331 error = &hostrcb->hcam.u.error64.u.type_30_error;
2332
2333 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2334 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2335
2336 add_len = be32_to_cpu(hostrcb->hcam.length) -
2337 (offsetof(struct ipr_hostrcb64_error, u) +
2338 offsetof(struct ipr_hostrcb_type_30_error, desc));
2339
2340 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2341 ipr_log64_fabric_path(hostrcb, fabric);
2342 for_each_fabric_cfg(fabric, cfg)
2343 ipr_log64_path_elem(hostrcb, cfg);
2344
2345 add_len -= be16_to_cpu(fabric->length);
2346 fabric = (struct ipr_hostrcb64_fabric_desc *)
2347 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2348 }
2349
2350 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2351}
2352
2353/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 * ipr_log_generic_error - Log an adapter error.
2355 * @ioa_cfg: ioa config struct
2356 * @hostrcb: hostrcb struct
2357 *
2358 * Return value:
2359 * none
2360 **/
2361static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2362 struct ipr_hostrcb *hostrcb)
2363{
Brian Kingac719ab2006-11-21 10:28:42 -06002364 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002365 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366}
2367
2368/**
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002369 * ipr_log_sis64_device_error - Log a cache error.
2370 * @ioa_cfg: ioa config struct
2371 * @hostrcb: hostrcb struct
2372 *
2373 * Return value:
2374 * none
2375 **/
2376static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2377 struct ipr_hostrcb *hostrcb)
2378{
2379 struct ipr_hostrcb_type_21_error *error;
2380 char buffer[IPR_MAX_RES_PATH_LENGTH];
2381
2382 error = &hostrcb->hcam.u.error64.u.type_21_error;
2383
2384 ipr_err("-----Failing Device Information-----\n");
2385 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2386 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2387 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2388 ipr_err("Device Resource Path: %s\n",
2389 __ipr_format_res_path(error->res_path,
2390 buffer, sizeof(buffer)));
2391 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2392 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2393 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2394 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2395 ipr_err("SCSI Sense Data:\n");
2396 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2397 ipr_err("SCSI Command Descriptor Block: \n");
2398 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2399
2400 ipr_err("Additional IOA Data:\n");
2401 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2402}
2403
2404/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2406 * @ioasc: IOASC
2407 *
2408 * This function will return the index of into the ipr_error_table
2409 * for the specified IOASC. If the IOASC is not in the table,
2410 * 0 will be returned, which points to the entry used for unknown errors.
2411 *
2412 * Return value:
2413 * index into the ipr_error_table
2414 **/
2415static u32 ipr_get_error(u32 ioasc)
2416{
2417 int i;
2418
2419 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002420 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 return i;
2422
2423 return 0;
2424}
2425
2426/**
2427 * ipr_handle_log_data - Log an adapter error.
2428 * @ioa_cfg: ioa config struct
2429 * @hostrcb: hostrcb struct
2430 *
2431 * This function logs an adapter error to the system.
2432 *
2433 * Return value:
2434 * none
2435 **/
2436static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2437 struct ipr_hostrcb *hostrcb)
2438{
2439 u32 ioasc;
2440 int error_index;
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002441 struct ipr_hostrcb_type_21_error *error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442
2443 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2444 return;
2445
2446 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2447 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2448
Wayne Boyer4565e372010-02-19 13:24:07 -08002449 if (ioa_cfg->sis64)
2450 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2451 else
2452 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
Wayne Boyer4565e372010-02-19 13:24:07 -08002454 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2455 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2457 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002458 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 }
2460
2461 error_index = ipr_get_error(ioasc);
2462
2463 if (!ipr_error_table[error_index].log_hcam)
2464 return;
2465
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002466 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2467 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2468 error = &hostrcb->hcam.u.error64.u.type_21_error;
2469
2470 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2471 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2472 return;
2473 }
2474
Brian King49dc6a12006-11-21 10:28:35 -06002475 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476
2477 /* Set indication we have logged an error */
2478 ioa_cfg->errors_logged++;
2479
Brian King933916f2007-03-29 12:43:30 -05002480 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002482 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2483 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484
2485 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 case IPR_HOST_RCB_OVERLAY_ID_2:
2487 ipr_log_cache_error(ioa_cfg, hostrcb);
2488 break;
2489 case IPR_HOST_RCB_OVERLAY_ID_3:
2490 ipr_log_config_error(ioa_cfg, hostrcb);
2491 break;
2492 case IPR_HOST_RCB_OVERLAY_ID_4:
2493 case IPR_HOST_RCB_OVERLAY_ID_6:
2494 ipr_log_array_error(ioa_cfg, hostrcb);
2495 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002496 case IPR_HOST_RCB_OVERLAY_ID_7:
2497 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2498 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002499 case IPR_HOST_RCB_OVERLAY_ID_12:
2500 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2501 break;
2502 case IPR_HOST_RCB_OVERLAY_ID_13:
2503 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2504 break;
2505 case IPR_HOST_RCB_OVERLAY_ID_14:
2506 case IPR_HOST_RCB_OVERLAY_ID_16:
2507 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2508 break;
2509 case IPR_HOST_RCB_OVERLAY_ID_17:
2510 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2511 break;
Brian King49dc6a12006-11-21 10:28:35 -06002512 case IPR_HOST_RCB_OVERLAY_ID_20:
2513 ipr_log_fabric_error(ioa_cfg, hostrcb);
2514 break;
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002515 case IPR_HOST_RCB_OVERLAY_ID_21:
2516 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2517 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002518 case IPR_HOST_RCB_OVERLAY_ID_23:
2519 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2520 break;
2521 case IPR_HOST_RCB_OVERLAY_ID_24:
2522 case IPR_HOST_RCB_OVERLAY_ID_26:
2523 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2524 break;
2525 case IPR_HOST_RCB_OVERLAY_ID_30:
2526 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2527 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002528 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002531 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 break;
2533 }
2534}
2535
2536/**
2537 * ipr_process_error - Op done function for an adapter error log.
2538 * @ipr_cmd: ipr command struct
2539 *
2540 * This function is the op done function for an error log host
2541 * controlled async from the adapter. It will log the error and
2542 * send the HCAM back to the adapter.
2543 *
2544 * Return value:
2545 * none
2546 **/
2547static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2548{
2549 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2550 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07002551 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002552 u32 fd_ioasc;
2553
2554 if (ioa_cfg->sis64)
2555 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2556 else
2557 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558
2559 list_del(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06002560 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561
2562 if (!ioasc) {
2563 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002564 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2565 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2567 dev_err(&ioa_cfg->pdev->dev,
2568 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2569 }
2570
2571 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2572}
2573
2574/**
2575 * ipr_timeout - An internally generated op has timed out.
2576 * @ipr_cmd: ipr command struct
2577 *
2578 * This function blocks host requests and initiates an
2579 * adapter reset.
2580 *
2581 * Return value:
2582 * none
2583 **/
2584static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2585{
2586 unsigned long lock_flags = 0;
2587 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2588
2589 ENTER;
2590 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2591
2592 ioa_cfg->errors_logged++;
2593 dev_err(&ioa_cfg->pdev->dev,
2594 "Adapter being reset due to command timeout.\n");
2595
2596 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2597 ioa_cfg->sdt_state = GET_DUMP;
2598
2599 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2600 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2601
2602 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2603 LEAVE;
2604}
2605
2606/**
2607 * ipr_oper_timeout - Adapter timed out transitioning to operational
2608 * @ipr_cmd: ipr command struct
2609 *
2610 * This function blocks host requests and initiates an
2611 * adapter reset.
2612 *
2613 * Return value:
2614 * none
2615 **/
2616static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2617{
2618 unsigned long lock_flags = 0;
2619 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2620
2621 ENTER;
2622 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2623
2624 ioa_cfg->errors_logged++;
2625 dev_err(&ioa_cfg->pdev->dev,
2626 "Adapter timed out transitioning to operational.\n");
2627
2628 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2629 ioa_cfg->sdt_state = GET_DUMP;
2630
2631 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2632 if (ipr_fastfail)
2633 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2634 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2635 }
2636
2637 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2638 LEAVE;
2639}
2640
2641/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 * ipr_find_ses_entry - Find matching SES in SES table
2643 * @res: resource entry struct of SES
2644 *
2645 * Return value:
2646 * pointer to SES table entry / NULL on failure
2647 **/
2648static const struct ipr_ses_table_entry *
2649ipr_find_ses_entry(struct ipr_resource_entry *res)
2650{
2651 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002652 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2654
2655 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2656 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2657 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002658 vpids = &res->std_inq_data.vpids;
2659 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 matches++;
2661 else
2662 break;
2663 } else
2664 matches++;
2665 }
2666
2667 if (matches == IPR_PROD_ID_LEN)
2668 return ste;
2669 }
2670
2671 return NULL;
2672}
2673
2674/**
2675 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2676 * @ioa_cfg: ioa config struct
2677 * @bus: SCSI bus
2678 * @bus_width: bus width
2679 *
2680 * Return value:
2681 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2682 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2683 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2684 * max 160MHz = max 320MB/sec).
2685 **/
2686static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2687{
2688 struct ipr_resource_entry *res;
2689 const struct ipr_ses_table_entry *ste;
2690 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2691
2692 /* Loop through each config table entry in the config table buffer */
2693 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002694 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695 continue;
2696
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002697 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 continue;
2699
2700 if (!(ste = ipr_find_ses_entry(res)))
2701 continue;
2702
2703 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2704 }
2705
2706 return max_xfer_rate;
2707}
2708
2709/**
2710 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2711 * @ioa_cfg: ioa config struct
2712 * @max_delay: max delay in micro-seconds to wait
2713 *
2714 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2715 *
2716 * Return value:
2717 * 0 on success / other on failure
2718 **/
2719static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2720{
2721 volatile u32 pcii_reg;
2722 int delay = 1;
2723
2724 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2725 while (delay < max_delay) {
2726 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2727
2728 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2729 return 0;
2730
2731 /* udelay cannot be used if delay is more than a few milliseconds */
2732 if ((delay / 1000) > MAX_UDELAY_MS)
2733 mdelay(delay / 1000);
2734 else
2735 udelay(delay);
2736
2737 delay += delay;
2738 }
2739 return -EIO;
2740}
2741
2742/**
Wayne Boyerdcbad002010-02-19 13:24:14 -08002743 * ipr_get_sis64_dump_data_section - Dump IOA memory
2744 * @ioa_cfg: ioa config struct
2745 * @start_addr: adapter address to dump
2746 * @dest: destination kernel buffer
2747 * @length_in_words: length to dump in 4 byte words
2748 *
2749 * Return value:
2750 * 0 on success
2751 **/
2752static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2753 u32 start_addr,
2754 __be32 *dest, u32 length_in_words)
2755{
2756 int i;
2757
2758 for (i = 0; i < length_in_words; i++) {
2759 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2760 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2761 dest++;
2762 }
2763
2764 return 0;
2765}
2766
2767/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 * ipr_get_ldump_data_section - Dump IOA memory
2769 * @ioa_cfg: ioa config struct
2770 * @start_addr: adapter address to dump
2771 * @dest: destination kernel buffer
2772 * @length_in_words: length to dump in 4 byte words
2773 *
2774 * Return value:
2775 * 0 on success / -EIO on failure
2776 **/
2777static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2778 u32 start_addr,
2779 __be32 *dest, u32 length_in_words)
2780{
2781 volatile u32 temp_pcii_reg;
2782 int i, delay = 0;
2783
Wayne Boyerdcbad002010-02-19 13:24:14 -08002784 if (ioa_cfg->sis64)
2785 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2786 dest, length_in_words);
2787
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 /* Write IOA interrupt reg starting LDUMP state */
2789 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
Wayne Boyer214777b2010-02-19 13:24:26 -08002790 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791
2792 /* Wait for IO debug acknowledge */
2793 if (ipr_wait_iodbg_ack(ioa_cfg,
2794 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2795 dev_err(&ioa_cfg->pdev->dev,
2796 "IOA dump long data transfer timeout\n");
2797 return -EIO;
2798 }
2799
2800 /* Signal LDUMP interlocked - clear IO debug ack */
2801 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2802 ioa_cfg->regs.clr_interrupt_reg);
2803
2804 /* Write Mailbox with starting address */
2805 writel(start_addr, ioa_cfg->ioa_mailbox);
2806
2807 /* Signal address valid - clear IOA Reset alert */
2808 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002809 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810
2811 for (i = 0; i < length_in_words; i++) {
2812 /* Wait for IO debug acknowledge */
2813 if (ipr_wait_iodbg_ack(ioa_cfg,
2814 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2815 dev_err(&ioa_cfg->pdev->dev,
2816 "IOA dump short data transfer timeout\n");
2817 return -EIO;
2818 }
2819
2820 /* Read data from mailbox and increment destination pointer */
2821 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2822 dest++;
2823
2824 /* For all but the last word of data, signal data received */
2825 if (i < (length_in_words - 1)) {
2826 /* Signal dump data received - Clear IO debug Ack */
2827 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2828 ioa_cfg->regs.clr_interrupt_reg);
2829 }
2830 }
2831
2832 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2833 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002834 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835
2836 writel(IPR_UPROCI_IO_DEBUG_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002837 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838
2839 /* Signal dump data received - Clear IO debug Ack */
2840 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2841 ioa_cfg->regs.clr_interrupt_reg);
2842
2843 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2844 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2845 temp_pcii_reg =
Wayne Boyer214777b2010-02-19 13:24:26 -08002846 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847
2848 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2849 return 0;
2850
2851 udelay(10);
2852 delay += 10;
2853 }
2854
2855 return 0;
2856}
2857
2858#ifdef CONFIG_SCSI_IPR_DUMP
2859/**
2860 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2861 * @ioa_cfg: ioa config struct
2862 * @pci_address: adapter address
2863 * @length: length of data to copy
2864 *
2865 * Copy data from PCI adapter to kernel buffer.
2866 * Note: length MUST be a 4 byte multiple
2867 * Return value:
2868 * 0 on success / other on failure
2869 **/
2870static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2871 unsigned long pci_address, u32 length)
2872{
2873 int bytes_copied = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002874 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 __be32 *page;
2876 unsigned long lock_flags = 0;
2877 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2878
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002879 if (ioa_cfg->sis64)
2880 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2881 else
2882 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2883
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 while (bytes_copied < length &&
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002885 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 if (ioa_dump->page_offset >= PAGE_SIZE ||
2887 ioa_dump->page_offset == 0) {
2888 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2889
2890 if (!page) {
2891 ipr_trace;
2892 return bytes_copied;
2893 }
2894
2895 ioa_dump->page_offset = 0;
2896 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2897 ioa_dump->next_page_index++;
2898 } else
2899 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2900
2901 rem_len = length - bytes_copied;
2902 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2903 cur_len = min(rem_len, rem_page_len);
2904
2905 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2906 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2907 rc = -EIO;
2908 } else {
2909 rc = ipr_get_ldump_data_section(ioa_cfg,
2910 pci_address + bytes_copied,
2911 &page[ioa_dump->page_offset / 4],
2912 (cur_len / sizeof(u32)));
2913 }
2914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2915
2916 if (!rc) {
2917 ioa_dump->page_offset += cur_len;
2918 bytes_copied += cur_len;
2919 } else {
2920 ipr_trace;
2921 break;
2922 }
2923 schedule();
2924 }
2925
2926 return bytes_copied;
2927}
2928
2929/**
2930 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2931 * @hdr: dump entry header struct
2932 *
2933 * Return value:
2934 * nothing
2935 **/
2936static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2937{
2938 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2939 hdr->num_elems = 1;
2940 hdr->offset = sizeof(*hdr);
2941 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2942}
2943
2944/**
2945 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2946 * @ioa_cfg: ioa config struct
2947 * @driver_dump: driver dump struct
2948 *
2949 * Return value:
2950 * nothing
2951 **/
2952static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2953 struct ipr_driver_dump *driver_dump)
2954{
2955 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2956
2957 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2958 driver_dump->ioa_type_entry.hdr.len =
2959 sizeof(struct ipr_dump_ioa_type_entry) -
2960 sizeof(struct ipr_dump_entry_header);
2961 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2962 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2963 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2964 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2965 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2966 ucode_vpd->minor_release[1];
2967 driver_dump->hdr.num_entries++;
2968}
2969
2970/**
2971 * ipr_dump_version_data - Fill in the driver version in the dump.
2972 * @ioa_cfg: ioa config struct
2973 * @driver_dump: driver dump struct
2974 *
2975 * Return value:
2976 * nothing
2977 **/
2978static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2979 struct ipr_driver_dump *driver_dump)
2980{
2981 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2982 driver_dump->version_entry.hdr.len =
2983 sizeof(struct ipr_dump_version_entry) -
2984 sizeof(struct ipr_dump_entry_header);
2985 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2986 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2987 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2988 driver_dump->hdr.num_entries++;
2989}
2990
2991/**
2992 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2993 * @ioa_cfg: ioa config struct
2994 * @driver_dump: driver dump struct
2995 *
2996 * Return value:
2997 * nothing
2998 **/
2999static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3000 struct ipr_driver_dump *driver_dump)
3001{
3002 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3003 driver_dump->trace_entry.hdr.len =
3004 sizeof(struct ipr_dump_trace_entry) -
3005 sizeof(struct ipr_dump_entry_header);
3006 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3007 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3008 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3009 driver_dump->hdr.num_entries++;
3010}
3011
3012/**
3013 * ipr_dump_location_data - Fill in the IOA location in the dump.
3014 * @ioa_cfg: ioa config struct
3015 * @driver_dump: driver dump struct
3016 *
3017 * Return value:
3018 * nothing
3019 **/
3020static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3021 struct ipr_driver_dump *driver_dump)
3022{
3023 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3024 driver_dump->location_entry.hdr.len =
3025 sizeof(struct ipr_dump_location_entry) -
3026 sizeof(struct ipr_dump_entry_header);
3027 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3028 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01003029 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 driver_dump->hdr.num_entries++;
3031}
3032
3033/**
3034 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3035 * @ioa_cfg: ioa config struct
3036 * @dump: dump struct
3037 *
3038 * Return value:
3039 * nothing
3040 **/
3041static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3042{
3043 unsigned long start_addr, sdt_word;
3044 unsigned long lock_flags = 0;
3045 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3046 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003047 u32 num_entries, max_num_entries, start_off, end_off;
3048 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 struct ipr_sdt *sdt;
Wayne Boyerdcbad002010-02-19 13:24:14 -08003050 int valid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 int i;
3052
3053 ENTER;
3054
3055 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3056
Brian King41e9a692011-09-21 08:51:11 -05003057 if (ioa_cfg->sdt_state != READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3059 return;
3060 }
3061
Wayne Boyer110def82010-11-04 09:36:16 -07003062 if (ioa_cfg->sis64) {
3063 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3064 ssleep(IPR_DUMP_DELAY_SECONDS);
3065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3066 }
3067
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 start_addr = readl(ioa_cfg->ioa_mailbox);
3069
Wayne Boyerdcbad002010-02-19 13:24:14 -08003070 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071 dev_err(&ioa_cfg->pdev->dev,
3072 "Invalid dump table format: %lx\n", start_addr);
3073 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3074 return;
3075 }
3076
3077 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3078
3079 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3080
3081 /* Initialize the overall dump header */
3082 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3083 driver_dump->hdr.num_entries = 1;
3084 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3085 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3086 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3087 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3088
3089 ipr_dump_version_data(ioa_cfg, driver_dump);
3090 ipr_dump_location_data(ioa_cfg, driver_dump);
3091 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3092 ipr_dump_trace_data(ioa_cfg, driver_dump);
3093
3094 /* Update dump_header */
3095 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3096
3097 /* IOA Dump entry */
3098 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099 ioa_dump->hdr.len = 0;
3100 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3101 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3102
3103 /* First entries in sdt are actually a list of dump addresses and
3104 lengths to gather the real dump data. sdt represents the pointer
3105 to the ioa generated dump table. Dump data will be extracted based
3106 on entries in this table */
3107 sdt = &ioa_dump->sdt;
3108
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003109 if (ioa_cfg->sis64) {
3110 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3111 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3112 } else {
3113 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3114 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3115 }
3116
3117 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3118 (max_num_entries * sizeof(struct ipr_sdt_entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003120 bytes_to_copy / sizeof(__be32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121
3122 /* Smart Dump table is ready to use and the first entry is valid */
Wayne Boyerdcbad002010-02-19 13:24:14 -08003123 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3124 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 dev_err(&ioa_cfg->pdev->dev,
3126 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3127 rc, be32_to_cpu(sdt->hdr.state));
3128 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3129 ioa_cfg->sdt_state = DUMP_OBTAINED;
3130 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3131 return;
3132 }
3133
3134 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3135
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003136 if (num_entries > max_num_entries)
3137 num_entries = max_num_entries;
3138
3139 /* Update dump length to the actual data to be copied */
3140 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3141 if (ioa_cfg->sis64)
3142 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3143 else
3144 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145
3146 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3147
3148 for (i = 0; i < num_entries; i++) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003149 if (ioa_dump->hdr.len > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3151 break;
3152 }
3153
3154 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
Wayne Boyerdcbad002010-02-19 13:24:14 -08003155 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3156 if (ioa_cfg->sis64)
3157 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3158 else {
3159 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3160 end_off = be32_to_cpu(sdt->entry[i].end_token);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161
Wayne Boyerdcbad002010-02-19 13:24:14 -08003162 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3163 bytes_to_copy = end_off - start_off;
3164 else
3165 valid = 0;
3166 }
3167 if (valid) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003168 if (bytes_to_copy > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3170 continue;
3171 }
3172
3173 /* Copy data from adapter to driver buffers */
3174 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3175 bytes_to_copy);
3176
3177 ioa_dump->hdr.len += bytes_copied;
3178
3179 if (bytes_copied != bytes_to_copy) {
3180 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3181 break;
3182 }
3183 }
3184 }
3185 }
3186
3187 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3188
3189 /* Update dump_header */
3190 driver_dump->hdr.len += ioa_dump->hdr.len;
3191 wmb();
3192 ioa_cfg->sdt_state = DUMP_OBTAINED;
3193 LEAVE;
3194}
3195
3196#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003197#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198#endif
3199
3200/**
3201 * ipr_release_dump - Free adapter dump memory
3202 * @kref: kref struct
3203 *
3204 * Return value:
3205 * nothing
3206 **/
3207static void ipr_release_dump(struct kref *kref)
3208{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003209 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3211 unsigned long lock_flags = 0;
3212 int i;
3213
3214 ENTER;
3215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3216 ioa_cfg->dump = NULL;
3217 ioa_cfg->sdt_state = INACTIVE;
3218 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3219
3220 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3221 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3222
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003223 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224 kfree(dump);
3225 LEAVE;
3226}
3227
3228/**
3229 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00003230 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231 *
3232 * Called at task level from a work thread. This function takes care
3233 * of adding and removing device from the mid-layer as configuration
3234 * changes are detected by the adapter.
3235 *
3236 * Return value:
3237 * nothing
3238 **/
David Howellsc4028952006-11-22 14:57:56 +00003239static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240{
3241 unsigned long lock_flags;
3242 struct ipr_resource_entry *res;
3243 struct scsi_device *sdev;
3244 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00003245 struct ipr_ioa_cfg *ioa_cfg =
3246 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 u8 bus, target, lun;
3248 int did_work;
3249
3250 ENTER;
3251 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3252
Brian King41e9a692011-09-21 08:51:11 -05003253 if (ioa_cfg->sdt_state == READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254 dump = ioa_cfg->dump;
3255 if (!dump) {
3256 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3257 return;
3258 }
3259 kref_get(&dump->kref);
3260 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3261 ipr_get_ioa_dump(ioa_cfg, dump);
3262 kref_put(&dump->kref, ipr_release_dump);
3263
3264 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King4c647e92011-10-15 09:08:56 -05003265 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3267 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3268 return;
3269 }
3270
3271restart:
3272 do {
3273 did_work = 0;
Brian Kingf688f962014-12-02 12:47:37 -06003274 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3276 return;
3277 }
3278
3279 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3280 if (res->del_from_ml && res->sdev) {
3281 did_work = 1;
3282 sdev = res->sdev;
3283 if (!scsi_device_get(sdev)) {
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02003284 if (!res->add_to_ml)
3285 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3286 else
3287 res->del_from_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3289 scsi_remove_device(sdev);
3290 scsi_device_put(sdev);
3291 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3292 }
3293 break;
3294 }
3295 }
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003296 } while (did_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297
3298 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3299 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003300 bus = res->bus;
3301 target = res->target;
3302 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003303 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3305 scsi_add_device(ioa_cfg->host, bus, target, lun);
3306 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3307 goto restart;
3308 }
3309 }
3310
Brian Kingf688f962014-12-02 12:47:37 -06003311 ioa_cfg->scan_done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003313 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314 LEAVE;
3315}
3316
3317#ifdef CONFIG_SCSI_IPR_TRACE
3318/**
3319 * ipr_read_trace - Dump the adapter trace
Chris Wright2c3c8be2010-05-12 18:28:57 -07003320 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003322 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 * @buf: buffer
3324 * @off: offset
3325 * @count: buffer size
3326 *
3327 * Return value:
3328 * number of bytes printed to buffer
3329 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003330static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003331 struct bin_attribute *bin_attr,
3332 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333{
Tony Jonesee959b02008-02-22 00:13:36 +01003334 struct device *dev = container_of(kobj, struct device, kobj);
3335 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3337 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003338 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339
3340 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003341 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3342 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003344
3345 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346}
3347
3348static struct bin_attribute ipr_trace_attr = {
3349 .attr = {
3350 .name = "trace",
3351 .mode = S_IRUGO,
3352 },
3353 .size = 0,
3354 .read = ipr_read_trace,
3355};
3356#endif
3357
3358/**
3359 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003360 * @dev: class device struct
3361 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 *
3363 * Return value:
3364 * number of bytes printed to buffer
3365 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003366static ssize_t ipr_show_fw_version(struct device *dev,
3367 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368{
Tony Jonesee959b02008-02-22 00:13:36 +01003369 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3371 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3372 unsigned long lock_flags = 0;
3373 int len;
3374
3375 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3376 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3377 ucode_vpd->major_release, ucode_vpd->card_type,
3378 ucode_vpd->minor_release[0],
3379 ucode_vpd->minor_release[1]);
3380 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3381 return len;
3382}
3383
Tony Jonesee959b02008-02-22 00:13:36 +01003384static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 .attr = {
3386 .name = "fw_version",
3387 .mode = S_IRUGO,
3388 },
3389 .show = ipr_show_fw_version,
3390};
3391
3392/**
3393 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003394 * @dev: class device struct
3395 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 *
3397 * Return value:
3398 * number of bytes printed to buffer
3399 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003400static ssize_t ipr_show_log_level(struct device *dev,
3401 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402{
Tony Jonesee959b02008-02-22 00:13:36 +01003403 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3405 unsigned long lock_flags = 0;
3406 int len;
3407
3408 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3409 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3411 return len;
3412}
3413
3414/**
3415 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003416 * @dev: class device struct
3417 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 *
3419 * Return value:
3420 * number of bytes printed to buffer
3421 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003422static ssize_t ipr_store_log_level(struct device *dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003423 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 const char *buf, size_t count)
3425{
Tony Jonesee959b02008-02-22 00:13:36 +01003426 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3428 unsigned long lock_flags = 0;
3429
3430 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3431 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3433 return strlen(buf);
3434}
3435
Tony Jonesee959b02008-02-22 00:13:36 +01003436static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437 .attr = {
3438 .name = "log_level",
3439 .mode = S_IRUGO | S_IWUSR,
3440 },
3441 .show = ipr_show_log_level,
3442 .store = ipr_store_log_level
3443};
3444
3445/**
3446 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003447 * @dev: device struct
3448 * @buf: buffer
3449 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 *
3451 * This function will reset the adapter and wait a reasonable
3452 * amount of time for any errors that the adapter might log.
3453 *
3454 * Return value:
3455 * count on success / other on failure
3456 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003457static ssize_t ipr_store_diagnostics(struct device *dev,
3458 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 const char *buf, size_t count)
3460{
Tony Jonesee959b02008-02-22 00:13:36 +01003461 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3463 unsigned long lock_flags = 0;
3464 int rc = count;
3465
3466 if (!capable(CAP_SYS_ADMIN))
3467 return -EACCES;
3468
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003470 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003471 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3472 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3473 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3474 }
3475
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 ioa_cfg->errors_logged = 0;
3477 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3478
3479 if (ioa_cfg->in_reset_reload) {
3480 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3481 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3482
3483 /* Wait for a second for any errors to be logged */
3484 msleep(1000);
3485 } else {
3486 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3487 return -EIO;
3488 }
3489
3490 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3491 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3492 rc = -EIO;
3493 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3494
3495 return rc;
3496}
3497
Tony Jonesee959b02008-02-22 00:13:36 +01003498static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 .attr = {
3500 .name = "run_diagnostics",
3501 .mode = S_IWUSR,
3502 },
3503 .store = ipr_store_diagnostics
3504};
3505
3506/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003507 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003508 * @class_dev: device struct
3509 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003510 *
3511 * Return value:
3512 * number of bytes printed to buffer
3513 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003514static ssize_t ipr_show_adapter_state(struct device *dev,
3515 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003516{
Tony Jonesee959b02008-02-22 00:13:36 +01003517 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003518 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3519 unsigned long lock_flags = 0;
3520 int len;
3521
3522 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003523 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003524 len = snprintf(buf, PAGE_SIZE, "offline\n");
3525 else
3526 len = snprintf(buf, PAGE_SIZE, "online\n");
3527 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3528 return len;
3529}
3530
3531/**
3532 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003533 * @dev: device struct
3534 * @buf: buffer
3535 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003536 *
3537 * This function will change the adapter's state.
3538 *
3539 * Return value:
3540 * count on success / other on failure
3541 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003542static ssize_t ipr_store_adapter_state(struct device *dev,
3543 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003544 const char *buf, size_t count)
3545{
Tony Jonesee959b02008-02-22 00:13:36 +01003546 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003547 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3548 unsigned long lock_flags;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003549 int result = count, i;
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003550
3551 if (!capable(CAP_SYS_ADMIN))
3552 return -EACCES;
3553
3554 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003555 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3556 !strncmp(buf, "online", 6)) {
3557 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3558 spin_lock(&ioa_cfg->hrrq[i]._lock);
3559 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3560 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3561 }
3562 wmb();
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003563 ioa_cfg->reset_retries = 0;
3564 ioa_cfg->in_ioa_bringdown = 0;
3565 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3566 }
3567 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3568 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3569
3570 return result;
3571}
3572
Tony Jonesee959b02008-02-22 00:13:36 +01003573static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003574 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003575 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003576 .mode = S_IRUGO | S_IWUSR,
3577 },
3578 .show = ipr_show_adapter_state,
3579 .store = ipr_store_adapter_state
3580};
3581
3582/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003584 * @dev: device struct
3585 * @buf: buffer
3586 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587 *
3588 * This function will reset the adapter.
3589 *
3590 * Return value:
3591 * count on success / other on failure
3592 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003593static ssize_t ipr_store_reset_adapter(struct device *dev,
3594 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 const char *buf, size_t count)
3596{
Tony Jonesee959b02008-02-22 00:13:36 +01003597 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3599 unsigned long lock_flags;
3600 int result = count;
3601
3602 if (!capable(CAP_SYS_ADMIN))
3603 return -EACCES;
3604
3605 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3606 if (!ioa_cfg->in_reset_reload)
3607 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3608 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3609 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3610
3611 return result;
3612}
3613
Tony Jonesee959b02008-02-22 00:13:36 +01003614static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615 .attr = {
3616 .name = "reset_host",
3617 .mode = S_IWUSR,
3618 },
3619 .store = ipr_store_reset_adapter
3620};
3621
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003622static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3623 /**
3624 * ipr_show_iopoll_weight - Show ipr polling mode
3625 * @dev: class device struct
3626 * @buf: buffer
3627 *
3628 * Return value:
3629 * number of bytes printed to buffer
3630 **/
3631static ssize_t ipr_show_iopoll_weight(struct device *dev,
3632 struct device_attribute *attr, char *buf)
3633{
3634 struct Scsi_Host *shost = class_to_shost(dev);
3635 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3636 unsigned long lock_flags = 0;
3637 int len;
3638
3639 spin_lock_irqsave(shost->host_lock, lock_flags);
3640 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3641 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3642
3643 return len;
3644}
3645
3646/**
3647 * ipr_store_iopoll_weight - Change the adapter's polling mode
3648 * @dev: class device struct
3649 * @buf: buffer
3650 *
3651 * Return value:
3652 * number of bytes printed to buffer
3653 **/
3654static ssize_t ipr_store_iopoll_weight(struct device *dev,
3655 struct device_attribute *attr,
3656 const char *buf, size_t count)
3657{
3658 struct Scsi_Host *shost = class_to_shost(dev);
3659 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3660 unsigned long user_iopoll_weight;
3661 unsigned long lock_flags = 0;
3662 int i;
3663
3664 if (!ioa_cfg->sis64) {
3665 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3666 return -EINVAL;
3667 }
3668 if (kstrtoul(buf, 10, &user_iopoll_weight))
3669 return -EINVAL;
3670
3671 if (user_iopoll_weight > 256) {
3672 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3673 return -EINVAL;
3674 }
3675
3676 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3677 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3678 return strlen(buf);
3679 }
3680
Jens Axboe89f8b332014-03-13 09:38:42 -06003681 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003682 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3683 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3684 }
3685
3686 spin_lock_irqsave(shost->host_lock, lock_flags);
3687 ioa_cfg->iopoll_weight = user_iopoll_weight;
Jens Axboe89f8b332014-03-13 09:38:42 -06003688 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003689 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3690 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3691 ioa_cfg->iopoll_weight, ipr_iopoll);
3692 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3693 }
3694 }
3695 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3696
3697 return strlen(buf);
3698}
3699
3700static struct device_attribute ipr_iopoll_weight_attr = {
3701 .attr = {
3702 .name = "iopoll_weight",
3703 .mode = S_IRUGO | S_IWUSR,
3704 },
3705 .show = ipr_show_iopoll_weight,
3706 .store = ipr_store_iopoll_weight
3707};
3708
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709/**
3710 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3711 * @buf_len: buffer length
3712 *
3713 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3714 * list to use for microcode download
3715 *
3716 * Return value:
3717 * pointer to sglist / NULL on failure
3718 **/
3719static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3720{
3721 int sg_size, order, bsize_elem, num_elem, i, j;
3722 struct ipr_sglist *sglist;
3723 struct scatterlist *scatterlist;
3724 struct page *page;
3725
3726 /* Get the minimum size per scatter/gather element */
3727 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3728
3729 /* Get the actual size per element */
3730 order = get_order(sg_size);
3731
3732 /* Determine the actual number of bytes per element */
3733 bsize_elem = PAGE_SIZE * (1 << order);
3734
3735 /* Determine the actual number of sg entries needed */
3736 if (buf_len % bsize_elem)
3737 num_elem = (buf_len / bsize_elem) + 1;
3738 else
3739 num_elem = buf_len / bsize_elem;
3740
3741 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003742 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743 (sizeof(struct scatterlist) * (num_elem - 1)),
3744 GFP_KERNEL);
3745
3746 if (sglist == NULL) {
3747 ipr_trace;
3748 return NULL;
3749 }
3750
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 scatterlist = sglist->scatterlist;
Jens Axboe45711f12007-10-22 21:19:53 +02003752 sg_init_table(scatterlist, num_elem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753
3754 sglist->order = order;
3755 sglist->num_sg = num_elem;
3756
3757 /* Allocate a bunch of sg elements */
3758 for (i = 0; i < num_elem; i++) {
3759 page = alloc_pages(GFP_KERNEL, order);
3760 if (!page) {
3761 ipr_trace;
3762
3763 /* Free up what we already allocated */
3764 for (j = i - 1; j >= 0; j--)
Jens Axboe45711f12007-10-22 21:19:53 +02003765 __free_pages(sg_page(&scatterlist[j]), order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766 kfree(sglist);
3767 return NULL;
3768 }
3769
Jens Axboe642f1492007-10-24 11:20:47 +02003770 sg_set_page(&scatterlist[i], page, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771 }
3772
3773 return sglist;
3774}
3775
3776/**
3777 * ipr_free_ucode_buffer - Frees a microcode download buffer
3778 * @p_dnld: scatter/gather list pointer
3779 *
3780 * Free a DMA'able ucode download buffer previously allocated with
3781 * ipr_alloc_ucode_buffer
3782 *
3783 * Return value:
3784 * nothing
3785 **/
3786static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3787{
3788 int i;
3789
3790 for (i = 0; i < sglist->num_sg; i++)
Jens Axboe45711f12007-10-22 21:19:53 +02003791 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792
3793 kfree(sglist);
3794}
3795
3796/**
3797 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3798 * @sglist: scatter/gather list pointer
3799 * @buffer: buffer pointer
3800 * @len: buffer length
3801 *
3802 * Copy a microcode image from a user buffer into a buffer allocated by
3803 * ipr_alloc_ucode_buffer
3804 *
3805 * Return value:
3806 * 0 on success / other on failure
3807 **/
3808static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3809 u8 *buffer, u32 len)
3810{
3811 int bsize_elem, i, result = 0;
3812 struct scatterlist *scatterlist;
3813 void *kaddr;
3814
3815 /* Determine the actual number of bytes per element */
3816 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3817
3818 scatterlist = sglist->scatterlist;
3819
3820 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003821 struct page *page = sg_page(&scatterlist[i]);
3822
3823 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003825 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826
3827 scatterlist[i].length = bsize_elem;
3828
3829 if (result != 0) {
3830 ipr_trace;
3831 return result;
3832 }
3833 }
3834
3835 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003836 struct page *page = sg_page(&scatterlist[i]);
3837
3838 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003840 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003841
3842 scatterlist[i].length = len % bsize_elem;
3843 }
3844
3845 sglist->buffer_len = len;
3846 return result;
3847}
3848
3849/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003850 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3851 * @ipr_cmd: ipr command struct
3852 * @sglist: scatter/gather list
3853 *
3854 * Builds a microcode download IOA data list (IOADL).
3855 *
3856 **/
3857static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3858 struct ipr_sglist *sglist)
3859{
3860 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3861 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3862 struct scatterlist *scatterlist = sglist->scatterlist;
3863 int i;
3864
3865 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3866 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3867 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3868
3869 ioarcb->ioadl_len =
3870 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3871 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3872 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3873 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3874 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3875 }
3876
3877 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3878}
3879
3880/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003881 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882 * @ipr_cmd: ipr command struct
3883 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003885 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003888static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3889 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003892 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893 struct scatterlist *scatterlist = sglist->scatterlist;
3894 int i;
3895
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003896 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08003898 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3899
3900 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3902
3903 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3904 ioadl[i].flags_and_data_len =
3905 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3906 ioadl[i].address =
3907 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3908 }
3909
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003910 ioadl[i-1].flags_and_data_len |=
3911 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3912}
3913
3914/**
3915 * ipr_update_ioa_ucode - Update IOA's microcode
3916 * @ioa_cfg: ioa config struct
3917 * @sglist: scatter/gather list
3918 *
3919 * Initiate an adapter reset to update the IOA's microcode
3920 *
3921 * Return value:
3922 * 0 on success / -EIO on failure
3923 **/
3924static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3925 struct ipr_sglist *sglist)
3926{
3927 unsigned long lock_flags;
3928
3929 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003930 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003931 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3932 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3933 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3934 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003935
3936 if (ioa_cfg->ucode_sglist) {
3937 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3938 dev_err(&ioa_cfg->pdev->dev,
3939 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940 return -EIO;
3941 }
3942
Anton Blanchardd73341b2014-10-30 17:27:08 -05003943 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3944 sglist->scatterlist, sglist->num_sg,
3945 DMA_TO_DEVICE);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003946
3947 if (!sglist->num_dma_sg) {
3948 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3949 dev_err(&ioa_cfg->pdev->dev,
3950 "Failed to map microcode download buffer!\n");
3951 return -EIO;
3952 }
3953
3954 ioa_cfg->ucode_sglist = sglist;
3955 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3957 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3958
3959 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3960 ioa_cfg->ucode_sglist = NULL;
3961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 return 0;
3963}
3964
3965/**
3966 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003967 * @class_dev: device struct
3968 * @buf: buffer
3969 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970 *
3971 * This function will update the firmware on the adapter.
3972 *
3973 * Return value:
3974 * count on success / other on failure
3975 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003976static ssize_t ipr_store_update_fw(struct device *dev,
3977 struct device_attribute *attr,
3978 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979{
Tony Jonesee959b02008-02-22 00:13:36 +01003980 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3982 struct ipr_ucode_image_header *image_hdr;
3983 const struct firmware *fw_entry;
3984 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 char fname[100];
3986 char *src;
3987 int len, result, dnld_size;
3988
3989 if (!capable(CAP_SYS_ADMIN))
3990 return -EACCES;
3991
3992 len = snprintf(fname, 99, "%s", buf);
3993 fname[len-1] = '\0';
3994
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003995 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003996 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3997 return -EIO;
3998 }
3999
4000 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4001
Linus Torvalds1da177e2005-04-16 15:20:36 -07004002 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4003 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4004 sglist = ipr_alloc_ucode_buffer(dnld_size);
4005
4006 if (!sglist) {
4007 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4008 release_firmware(fw_entry);
4009 return -ENOMEM;
4010 }
4011
4012 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4013
4014 if (result) {
4015 dev_err(&ioa_cfg->pdev->dev,
4016 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004017 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018 }
4019
Wayne Boyer14ed9cc2011-10-03 20:54:37 -07004020 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4021
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004022 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004024 if (!result)
4025 result = count;
4026out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004027 ipr_free_ucode_buffer(sglist);
4028 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004029 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004030}
4031
Tony Jonesee959b02008-02-22 00:13:36 +01004032static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 .attr = {
4034 .name = "update_fw",
4035 .mode = S_IWUSR,
4036 },
4037 .store = ipr_store_update_fw
4038};
4039
Wayne Boyer75576bb2010-07-14 10:50:14 -07004040/**
4041 * ipr_show_fw_type - Show the adapter's firmware type.
4042 * @dev: class device struct
4043 * @buf: buffer
4044 *
4045 * Return value:
4046 * number of bytes printed to buffer
4047 **/
4048static ssize_t ipr_show_fw_type(struct device *dev,
4049 struct device_attribute *attr, char *buf)
4050{
4051 struct Scsi_Host *shost = class_to_shost(dev);
4052 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4053 unsigned long lock_flags = 0;
4054 int len;
4055
4056 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4057 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4058 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4059 return len;
4060}
4061
4062static struct device_attribute ipr_ioa_fw_type_attr = {
4063 .attr = {
4064 .name = "fw_type",
4065 .mode = S_IRUGO,
4066 },
4067 .show = ipr_show_fw_type
4068};
4069
Tony Jonesee959b02008-02-22 00:13:36 +01004070static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071 &ipr_fw_version_attr,
4072 &ipr_log_level_attr,
4073 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06004074 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004075 &ipr_ioa_reset_attr,
4076 &ipr_update_fw_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004077 &ipr_ioa_fw_type_attr,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06004078 &ipr_iopoll_weight_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079 NULL,
4080};
4081
4082#ifdef CONFIG_SCSI_IPR_DUMP
4083/**
4084 * ipr_read_dump - Dump the adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004085 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004087 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088 * @buf: buffer
4089 * @off: offset
4090 * @count: buffer size
4091 *
4092 * Return value:
4093 * number of bytes printed to buffer
4094 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004095static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004096 struct bin_attribute *bin_attr,
4097 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098{
Tony Jonesee959b02008-02-22 00:13:36 +01004099 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100 struct Scsi_Host *shost = class_to_shost(cdev);
4101 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4102 struct ipr_dump *dump;
4103 unsigned long lock_flags = 0;
4104 char *src;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004105 int len, sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106 size_t rc = count;
4107
4108 if (!capable(CAP_SYS_ADMIN))
4109 return -EACCES;
4110
4111 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4112 dump = ioa_cfg->dump;
4113
4114 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4115 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4116 return 0;
4117 }
4118 kref_get(&dump->kref);
4119 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4120
4121 if (off > dump->driver_dump.hdr.len) {
4122 kref_put(&dump->kref, ipr_release_dump);
4123 return 0;
4124 }
4125
4126 if (off + count > dump->driver_dump.hdr.len) {
4127 count = dump->driver_dump.hdr.len - off;
4128 rc = count;
4129 }
4130
4131 if (count && off < sizeof(dump->driver_dump)) {
4132 if (off + count > sizeof(dump->driver_dump))
4133 len = sizeof(dump->driver_dump) - off;
4134 else
4135 len = count;
4136 src = (u8 *)&dump->driver_dump + off;
4137 memcpy(buf, src, len);
4138 buf += len;
4139 off += len;
4140 count -= len;
4141 }
4142
4143 off -= sizeof(dump->driver_dump);
4144
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004145 if (ioa_cfg->sis64)
4146 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4147 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4148 sizeof(struct ipr_sdt_entry));
4149 else
4150 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4151 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4152
4153 if (count && off < sdt_end) {
4154 if (off + count > sdt_end)
4155 len = sdt_end - off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156 else
4157 len = count;
4158 src = (u8 *)&dump->ioa_dump + off;
4159 memcpy(buf, src, len);
4160 buf += len;
4161 off += len;
4162 count -= len;
4163 }
4164
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004165 off -= sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166
4167 while (count) {
4168 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4169 len = PAGE_ALIGN(off) - off;
4170 else
4171 len = count;
4172 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4173 src += off & ~PAGE_MASK;
4174 memcpy(buf, src, len);
4175 buf += len;
4176 off += len;
4177 count -= len;
4178 }
4179
4180 kref_put(&dump->kref, ipr_release_dump);
4181 return rc;
4182}
4183
4184/**
4185 * ipr_alloc_dump - Prepare for adapter dump
4186 * @ioa_cfg: ioa config struct
4187 *
4188 * Return value:
4189 * 0 on success / other on failure
4190 **/
4191static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4192{
4193 struct ipr_dump *dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004194 __be32 **ioa_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195 unsigned long lock_flags = 0;
4196
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06004197 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198
4199 if (!dump) {
4200 ipr_err("Dump memory allocation failed\n");
4201 return -ENOMEM;
4202 }
4203
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004204 if (ioa_cfg->sis64)
4205 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4206 else
4207 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4208
4209 if (!ioa_data) {
4210 ipr_err("Dump memory allocation failed\n");
4211 kfree(dump);
4212 return -ENOMEM;
4213 }
4214
4215 dump->ioa_dump.ioa_data = ioa_data;
4216
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217 kref_init(&dump->kref);
4218 dump->ioa_cfg = ioa_cfg;
4219
4220 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4221
4222 if (INACTIVE != ioa_cfg->sdt_state) {
4223 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004224 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225 kfree(dump);
4226 return 0;
4227 }
4228
4229 ioa_cfg->dump = dump;
4230 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004231 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232 ioa_cfg->dump_taken = 1;
4233 schedule_work(&ioa_cfg->work_q);
4234 }
4235 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4236
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 return 0;
4238}
4239
4240/**
4241 * ipr_free_dump - Free adapter dump memory
4242 * @ioa_cfg: ioa config struct
4243 *
4244 * Return value:
4245 * 0 on success / other on failure
4246 **/
4247static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4248{
4249 struct ipr_dump *dump;
4250 unsigned long lock_flags = 0;
4251
4252 ENTER;
4253
4254 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4255 dump = ioa_cfg->dump;
4256 if (!dump) {
4257 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4258 return 0;
4259 }
4260
4261 ioa_cfg->dump = NULL;
4262 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4263
4264 kref_put(&dump->kref, ipr_release_dump);
4265
4266 LEAVE;
4267 return 0;
4268}
4269
4270/**
4271 * ipr_write_dump - Setup dump state of adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004272 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004274 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275 * @buf: buffer
4276 * @off: offset
4277 * @count: buffer size
4278 *
4279 * Return value:
4280 * number of bytes printed to buffer
4281 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004282static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004283 struct bin_attribute *bin_attr,
4284 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285{
Tony Jonesee959b02008-02-22 00:13:36 +01004286 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287 struct Scsi_Host *shost = class_to_shost(cdev);
4288 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4289 int rc;
4290
4291 if (!capable(CAP_SYS_ADMIN))
4292 return -EACCES;
4293
4294 if (buf[0] == '1')
4295 rc = ipr_alloc_dump(ioa_cfg);
4296 else if (buf[0] == '0')
4297 rc = ipr_free_dump(ioa_cfg);
4298 else
4299 return -EINVAL;
4300
4301 if (rc)
4302 return rc;
4303 else
4304 return count;
4305}
4306
4307static struct bin_attribute ipr_dump_attr = {
4308 .attr = {
4309 .name = "dump",
4310 .mode = S_IRUSR | S_IWUSR,
4311 },
4312 .size = 0,
4313 .read = ipr_read_dump,
4314 .write = ipr_write_dump
4315};
4316#else
4317static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4318#endif
4319
4320/**
4321 * ipr_change_queue_depth - Change the device's queue depth
4322 * @sdev: scsi device struct
4323 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07004324 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325 *
4326 * Return value:
4327 * actual depth set
4328 **/
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004329static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330{
Brian King35a39692006-09-25 12:39:20 -05004331 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4332 struct ipr_resource_entry *res;
4333 unsigned long lock_flags = 0;
4334
4335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4336 res = (struct ipr_resource_entry *)sdev->hostdata;
4337
4338 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4339 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4341
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004342 scsi_change_queue_depth(sdev, qdepth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343 return sdev->queue_depth;
4344}
4345
4346/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4348 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004349 * @attr: device attribute structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350 * @buf: buffer
4351 *
4352 * Return value:
4353 * number of bytes printed to buffer
4354 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04004355static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356{
4357 struct scsi_device *sdev = to_scsi_device(dev);
4358 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4359 struct ipr_resource_entry *res;
4360 unsigned long lock_flags = 0;
4361 ssize_t len = -ENXIO;
4362
4363 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4364 res = (struct ipr_resource_entry *)sdev->hostdata;
4365 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004366 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4368 return len;
4369}
4370
4371static struct device_attribute ipr_adapter_handle_attr = {
4372 .attr = {
4373 .name = "adapter_handle",
4374 .mode = S_IRUSR,
4375 },
4376 .show = ipr_show_adapter_handle
4377};
4378
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004379/**
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004380 * ipr_show_resource_path - Show the resource path or the resource address for
4381 * this device.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004382 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004383 * @attr: device attribute structure
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004384 * @buf: buffer
4385 *
4386 * Return value:
4387 * number of bytes printed to buffer
4388 **/
4389static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4390{
4391 struct scsi_device *sdev = to_scsi_device(dev);
4392 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4393 struct ipr_resource_entry *res;
4394 unsigned long lock_flags = 0;
4395 ssize_t len = -ENXIO;
4396 char buffer[IPR_MAX_RES_PATH_LENGTH];
4397
4398 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4399 res = (struct ipr_resource_entry *)sdev->hostdata;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004400 if (res && ioa_cfg->sis64)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004401 len = snprintf(buf, PAGE_SIZE, "%s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004402 __ipr_format_res_path(res->res_path, buffer,
4403 sizeof(buffer)));
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004404 else if (res)
4405 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4406 res->bus, res->target, res->lun);
4407
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004408 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4409 return len;
4410}
4411
4412static struct device_attribute ipr_resource_path_attr = {
4413 .attr = {
4414 .name = "resource_path",
Wayne Boyer75576bb2010-07-14 10:50:14 -07004415 .mode = S_IRUGO,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004416 },
4417 .show = ipr_show_resource_path
4418};
4419
Wayne Boyer75576bb2010-07-14 10:50:14 -07004420/**
Wayne Boyer46d74562010-08-11 07:15:17 -07004421 * ipr_show_device_id - Show the device_id for this device.
4422 * @dev: device struct
4423 * @attr: device attribute structure
4424 * @buf: buffer
4425 *
4426 * Return value:
4427 * number of bytes printed to buffer
4428 **/
4429static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4430{
4431 struct scsi_device *sdev = to_scsi_device(dev);
4432 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4433 struct ipr_resource_entry *res;
4434 unsigned long lock_flags = 0;
4435 ssize_t len = -ENXIO;
4436
4437 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4438 res = (struct ipr_resource_entry *)sdev->hostdata;
4439 if (res && ioa_cfg->sis64)
4440 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4441 else if (res)
4442 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4443
4444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4445 return len;
4446}
4447
4448static struct device_attribute ipr_device_id_attr = {
4449 .attr = {
4450 .name = "device_id",
4451 .mode = S_IRUGO,
4452 },
4453 .show = ipr_show_device_id
4454};
4455
4456/**
Wayne Boyer75576bb2010-07-14 10:50:14 -07004457 * ipr_show_resource_type - Show the resource type for this device.
4458 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004459 * @attr: device attribute structure
Wayne Boyer75576bb2010-07-14 10:50:14 -07004460 * @buf: buffer
4461 *
4462 * Return value:
4463 * number of bytes printed to buffer
4464 **/
4465static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4466{
4467 struct scsi_device *sdev = to_scsi_device(dev);
4468 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4469 struct ipr_resource_entry *res;
4470 unsigned long lock_flags = 0;
4471 ssize_t len = -ENXIO;
4472
4473 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4474 res = (struct ipr_resource_entry *)sdev->hostdata;
4475
4476 if (res)
4477 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4478
4479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4480 return len;
4481}
4482
4483static struct device_attribute ipr_resource_type_attr = {
4484 .attr = {
4485 .name = "resource_type",
4486 .mode = S_IRUGO,
4487 },
4488 .show = ipr_show_resource_type
4489};
4490
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491static struct device_attribute *ipr_dev_attrs[] = {
4492 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004493 &ipr_resource_path_attr,
Wayne Boyer46d74562010-08-11 07:15:17 -07004494 &ipr_device_id_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004495 &ipr_resource_type_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 NULL,
4497};
4498
4499/**
4500 * ipr_biosparam - Return the HSC mapping
4501 * @sdev: scsi device struct
4502 * @block_device: block device pointer
4503 * @capacity: capacity of the device
4504 * @parm: Array containing returned HSC values.
4505 *
4506 * This function generates the HSC parms that fdisk uses.
4507 * We want to make sure we return something that places partitions
4508 * on 4k boundaries for best performance with the IOA.
4509 *
4510 * Return value:
4511 * 0 on success
4512 **/
4513static int ipr_biosparam(struct scsi_device *sdev,
4514 struct block_device *block_device,
4515 sector_t capacity, int *parm)
4516{
4517 int heads, sectors;
4518 sector_t cylinders;
4519
4520 heads = 128;
4521 sectors = 32;
4522
4523 cylinders = capacity;
4524 sector_div(cylinders, (128 * 32));
4525
4526 /* return result */
4527 parm[0] = heads;
4528 parm[1] = sectors;
4529 parm[2] = cylinders;
4530
4531 return 0;
4532}
4533
4534/**
Brian King35a39692006-09-25 12:39:20 -05004535 * ipr_find_starget - Find target based on bus/target.
4536 * @starget: scsi target struct
4537 *
4538 * Return value:
4539 * resource entry pointer if found / NULL if not found
4540 **/
4541static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4542{
4543 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4544 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4545 struct ipr_resource_entry *res;
4546
4547 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004548 if ((res->bus == starget->channel) &&
Brian King0ee1d712012-03-14 21:20:06 -05004549 (res->target == starget->id)) {
Brian King35a39692006-09-25 12:39:20 -05004550 return res;
4551 }
4552 }
4553
4554 return NULL;
4555}
4556
4557static struct ata_port_info sata_port_info;
4558
4559/**
4560 * ipr_target_alloc - Prepare for commands to a SCSI target
4561 * @starget: scsi target struct
4562 *
4563 * If the device is a SATA device, this function allocates an
4564 * ATA port with libata, else it does nothing.
4565 *
4566 * Return value:
4567 * 0 on success / non-0 on failure
4568 **/
4569static int ipr_target_alloc(struct scsi_target *starget)
4570{
4571 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4572 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4573 struct ipr_sata_port *sata_port;
4574 struct ata_port *ap;
4575 struct ipr_resource_entry *res;
4576 unsigned long lock_flags;
4577
4578 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4579 res = ipr_find_starget(starget);
4580 starget->hostdata = NULL;
4581
4582 if (res && ipr_is_gata(res)) {
4583 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4584 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4585 if (!sata_port)
4586 return -ENOMEM;
4587
4588 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4589 if (ap) {
4590 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4591 sata_port->ioa_cfg = ioa_cfg;
4592 sata_port->ap = ap;
4593 sata_port->res = res;
4594
4595 res->sata_port = sata_port;
4596 ap->private_data = sata_port;
4597 starget->hostdata = sata_port;
4598 } else {
4599 kfree(sata_port);
4600 return -ENOMEM;
4601 }
4602 }
4603 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4604
4605 return 0;
4606}
4607
4608/**
4609 * ipr_target_destroy - Destroy a SCSI target
4610 * @starget: scsi target struct
4611 *
4612 * If the device was a SATA device, this function frees the libata
4613 * ATA port, else it does nothing.
4614 *
4615 **/
4616static void ipr_target_destroy(struct scsi_target *starget)
4617{
4618 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004619 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4620 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4621
4622 if (ioa_cfg->sis64) {
Brian King0ee1d712012-03-14 21:20:06 -05004623 if (!ipr_find_starget(starget)) {
4624 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4625 clear_bit(starget->id, ioa_cfg->array_ids);
4626 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4627 clear_bit(starget->id, ioa_cfg->vset_ids);
4628 else if (starget->channel == 0)
4629 clear_bit(starget->id, ioa_cfg->target_ids);
4630 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004631 }
Brian King35a39692006-09-25 12:39:20 -05004632
4633 if (sata_port) {
4634 starget->hostdata = NULL;
4635 ata_sas_port_destroy(sata_port->ap);
4636 kfree(sata_port);
4637 }
4638}
4639
4640/**
4641 * ipr_find_sdev - Find device based on bus/target/lun.
4642 * @sdev: scsi device struct
4643 *
4644 * Return value:
4645 * resource entry pointer if found / NULL if not found
4646 **/
4647static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4648{
4649 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4650 struct ipr_resource_entry *res;
4651
4652 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004653 if ((res->bus == sdev->channel) &&
4654 (res->target == sdev->id) &&
4655 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004656 return res;
4657 }
4658
4659 return NULL;
4660}
4661
4662/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663 * ipr_slave_destroy - Unconfigure a SCSI device
4664 * @sdev: scsi device struct
4665 *
4666 * Return value:
4667 * nothing
4668 **/
4669static void ipr_slave_destroy(struct scsi_device *sdev)
4670{
4671 struct ipr_resource_entry *res;
4672 struct ipr_ioa_cfg *ioa_cfg;
4673 unsigned long lock_flags = 0;
4674
4675 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4676
4677 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4678 res = (struct ipr_resource_entry *) sdev->hostdata;
4679 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004680 if (res->sata_port)
Tejun Heo3e4ec342010-05-10 21:41:30 +02004681 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682 sdev->hostdata = NULL;
4683 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004684 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685 }
4686 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4687}
4688
4689/**
4690 * ipr_slave_configure - Configure a SCSI device
4691 * @sdev: scsi device struct
4692 *
4693 * This function configures the specified scsi device.
4694 *
4695 * Return value:
4696 * 0 on success
4697 **/
4698static int ipr_slave_configure(struct scsi_device *sdev)
4699{
4700 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4701 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004702 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004704 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705
4706 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4707 res = sdev->hostdata;
4708 if (res) {
4709 if (ipr_is_af_dasd_device(res))
4710 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004711 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004713 sdev->no_uld_attach = 1;
4714 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715 if (ipr_is_vset_device(res)) {
Brian King60654e22014-12-02 12:47:46 -06004716 sdev->scsi_level = SCSI_SPC_3;
Jens Axboe242f9dc2008-09-14 05:55:09 -07004717 blk_queue_rq_timeout(sdev->request_queue,
4718 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004719 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720 }
Brian Kingdd406ef2009-04-22 08:58:02 -05004721 if (ipr_is_gata(res) && res->sata_port)
4722 ap = res->sata_port->ap;
4723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4724
4725 if (ap) {
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004726 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004727 ata_sas_slave_configure(sdev, ap);
Christoph Hellwigc8b09f62014-11-03 20:15:14 +01004728 }
4729
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004730 if (ioa_cfg->sis64)
4731 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004732 ipr_format_res_path(ioa_cfg,
4733 res->res_path, buffer, sizeof(buffer)));
Brian Kingdd406ef2009-04-22 08:58:02 -05004734 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735 }
4736 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4737 return 0;
4738}
4739
4740/**
Brian King35a39692006-09-25 12:39:20 -05004741 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4742 * @sdev: scsi device struct
4743 *
4744 * This function initializes an ATA port so that future commands
4745 * sent through queuecommand will work.
4746 *
4747 * Return value:
4748 * 0 on success
4749 **/
4750static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4751{
4752 struct ipr_sata_port *sata_port = NULL;
4753 int rc = -ENXIO;
4754
4755 ENTER;
4756 if (sdev->sdev_target)
4757 sata_port = sdev->sdev_target->hostdata;
Dan Williamsb2024452012-03-21 21:09:07 -07004758 if (sata_port) {
Brian King35a39692006-09-25 12:39:20 -05004759 rc = ata_sas_port_init(sata_port->ap);
Dan Williamsb2024452012-03-21 21:09:07 -07004760 if (rc == 0)
4761 rc = ata_sas_sync_probe(sata_port->ap);
4762 }
4763
Brian King35a39692006-09-25 12:39:20 -05004764 if (rc)
4765 ipr_slave_destroy(sdev);
4766
4767 LEAVE;
4768 return rc;
4769}
4770
4771/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772 * ipr_slave_alloc - Prepare for commands to a device.
4773 * @sdev: scsi device struct
4774 *
4775 * This function saves a pointer to the resource entry
4776 * in the scsi device struct if the device exists. We
4777 * can then use this pointer in ipr_queuecommand when
4778 * handling new commands.
4779 *
4780 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004781 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782 **/
4783static int ipr_slave_alloc(struct scsi_device *sdev)
4784{
4785 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4786 struct ipr_resource_entry *res;
4787 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004788 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004789
4790 sdev->hostdata = NULL;
4791
4792 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4793
Brian King35a39692006-09-25 12:39:20 -05004794 res = ipr_find_sdev(sdev);
4795 if (res) {
4796 res->sdev = sdev;
4797 res->add_to_ml = 0;
4798 res->in_erp = 0;
4799 sdev->hostdata = res;
4800 if (!ipr_is_naca_model(res))
4801 res->needs_sync_complete = 1;
4802 rc = 0;
4803 if (ipr_is_gata(res)) {
4804 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4805 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004806 }
4807 }
4808
4809 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4810
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004811 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004812}
4813
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06004814static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004815{
4816 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06004817 unsigned long lock_flags = 0;
4818 int rc = SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004819
4820 ENTER;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06004821 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4822 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05004824 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06004825 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02004826 dev_err(&ioa_cfg->pdev->dev,
4827 "Adapter being reset as a result of error recovery.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004828
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02004829 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4830 ioa_cfg->sdt_state = GET_DUMP;
4831 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004832
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06004833 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4834 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4835 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004836
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06004837 /* If we got hit with a host reset while we were already resetting
4838 the adapter for some reason, and the reset failed. */
4839 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4840 ipr_trace;
4841 rc = FAILED;
4842 }
4843
4844 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004845 LEAVE;
4846 return rc;
4847}
4848
4849/**
Brian Kingc6513092006-03-29 09:37:43 -06004850 * ipr_device_reset - Reset the device
4851 * @ioa_cfg: ioa config struct
4852 * @res: resource entry struct
4853 *
4854 * This function issues a device reset to the affected device.
4855 * If the device is a SCSI device, a LUN reset will be sent
4856 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05004857 * will be sent. If the device is a SATA device, a PHY reset will
4858 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06004859 *
4860 * Return value:
4861 * 0 on success / non-zero on failure
4862 **/
4863static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4864 struct ipr_resource_entry *res)
4865{
4866 struct ipr_cmnd *ipr_cmd;
4867 struct ipr_ioarcb *ioarcb;
4868 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05004869 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06004870 u32 ioasc;
4871
4872 ENTER;
4873 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4874 ioarcb = &ipr_cmd->ioarcb;
4875 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08004876
4877 if (ipr_cmd->ioa_cfg->sis64) {
4878 regs = &ipr_cmd->i.ata_ioadl.regs;
4879 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4880 } else
4881 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06004882
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004883 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06004884 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4885 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05004886 if (ipr_is_gata(res)) {
4887 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08004888 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05004889 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4890 }
Brian Kingc6513092006-03-29 09:37:43 -06004891
4892 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07004893 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004894 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyer96d21f02010-05-10 09:13:27 -07004895 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4896 if (ipr_cmd->ioa_cfg->sis64)
4897 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4898 sizeof(struct ipr_ioasa_gata));
4899 else
4900 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4901 sizeof(struct ipr_ioasa_gata));
4902 }
Brian Kingc6513092006-03-29 09:37:43 -06004903
4904 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004905 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
Brian Kingc6513092006-03-29 09:37:43 -06004906}
4907
4908/**
Brian King35a39692006-09-25 12:39:20 -05004909 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09004910 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05004911 * @classes: class of the attached device
4912 *
Tejun Heocc0680a2007-08-06 18:36:23 +09004913 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05004914 *
4915 * Return value:
4916 * 0 on success / non-zero on failure
4917 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09004918static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07004919 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05004920{
Tejun Heocc0680a2007-08-06 18:36:23 +09004921 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05004922 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4923 struct ipr_resource_entry *res;
4924 unsigned long lock_flags = 0;
4925 int rc = -ENXIO;
4926
4927 ENTER;
4928 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004929 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06004930 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4931 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4932 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4933 }
4934
Brian King35a39692006-09-25 12:39:20 -05004935 res = sata_port->res;
4936 if (res) {
4937 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004938 *classes = res->ata_class;
Brian King35a39692006-09-25 12:39:20 -05004939 }
4940
4941 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4942 LEAVE;
4943 return rc;
4944}
4945
4946/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004947 * ipr_eh_dev_reset - Reset the device
4948 * @scsi_cmd: scsi command struct
4949 *
4950 * This function issues a device reset to the affected device.
4951 * A LUN reset will be sent to the device first. If that does
4952 * not work, a target reset will be sent.
4953 *
4954 * Return value:
4955 * SUCCESS / FAILED
4956 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004957static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004958{
4959 struct ipr_cmnd *ipr_cmd;
4960 struct ipr_ioa_cfg *ioa_cfg;
4961 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05004962 struct ata_port *ap;
4963 int rc = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004964 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004965
4966 ENTER;
4967 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4968 res = scsi_cmd->device->hostdata;
4969
brking@us.ibm.comeeb883072005-11-01 17:02:29 -06004970 if (!res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971 return FAILED;
4972
4973 /*
4974 * If we are currently going through reset/reload, return failed. This will force the
4975 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4976 * reset to complete
4977 */
4978 if (ioa_cfg->in_reset_reload)
4979 return FAILED;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004980 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004981 return FAILED;
4982
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004983 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004984 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004985 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4986 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4987 if (ipr_cmd->scsi_cmd)
4988 ipr_cmd->done = ipr_scsi_eh_done;
4989 if (ipr_cmd->qc)
4990 ipr_cmd->done = ipr_sata_eh_done;
4991 if (ipr_cmd->qc &&
4992 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4993 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4994 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4995 }
Brian King7402ece2006-11-21 10:28:23 -06004996 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004997 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004998 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005000 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005001 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05005002
5003 if (ipr_is_gata(res) && res->sata_port) {
5004 ap = res->sata_port->ap;
5005 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09005006 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05005007 spin_lock_irq(scsi_cmd->device->host->host_lock);
Brian King5af23d22007-05-09 15:36:35 -05005008
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005009 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005010 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005011 list_for_each_entry(ipr_cmd,
5012 &hrrq->hrrq_pending_q, queue) {
5013 if (ipr_cmd->ioarcb.res_handle ==
5014 res->res_handle) {
5015 rc = -EIO;
5016 break;
5017 }
Brian King5af23d22007-05-09 15:36:35 -05005018 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005019 spin_unlock(&hrrq->_lock);
Brian King5af23d22007-05-09 15:36:35 -05005020 }
Brian King35a39692006-09-25 12:39:20 -05005021 } else
5022 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06005024 res->reset_occurred = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005025
Linus Torvalds1da177e2005-04-16 15:20:36 -07005026 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005027 return rc ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028}
5029
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005030static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005031{
5032 int rc;
5033
5034 spin_lock_irq(cmd->device->host->host_lock);
5035 rc = __ipr_eh_dev_reset(cmd);
5036 spin_unlock_irq(cmd->device->host->host_lock);
5037
5038 return rc;
5039}
5040
Linus Torvalds1da177e2005-04-16 15:20:36 -07005041/**
5042 * ipr_bus_reset_done - Op done function for bus reset.
5043 * @ipr_cmd: ipr command struct
5044 *
5045 * This function is the op done function for a bus reset
5046 *
5047 * Return value:
5048 * none
5049 **/
5050static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5051{
5052 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5053 struct ipr_resource_entry *res;
5054
5055 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005056 if (!ioa_cfg->sis64)
5057 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5058 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5059 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5060 break;
5061 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005062 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005063
5064 /*
5065 * If abort has not completed, indicate the reset has, else call the
5066 * abort's done function to wake the sleeping eh thread
5067 */
5068 if (ipr_cmd->sibling->sibling)
5069 ipr_cmd->sibling->sibling = NULL;
5070 else
5071 ipr_cmd->sibling->done(ipr_cmd->sibling);
5072
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005073 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005074 LEAVE;
5075}
5076
5077/**
5078 * ipr_abort_timeout - An abort task has timed out
5079 * @ipr_cmd: ipr command struct
5080 *
5081 * This function handles when an abort task times out. If this
5082 * happens we issue a bus reset since we have resources tied
5083 * up that must be freed before returning to the midlayer.
5084 *
5085 * Return value:
5086 * none
5087 **/
5088static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5089{
5090 struct ipr_cmnd *reset_cmd;
5091 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5092 struct ipr_cmd_pkt *cmd_pkt;
5093 unsigned long lock_flags = 0;
5094
5095 ENTER;
5096 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5097 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5098 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5099 return;
5100 }
5101
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005102 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005103 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5104 ipr_cmd->sibling = reset_cmd;
5105 reset_cmd->sibling = ipr_cmd;
5106 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5107 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5108 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5109 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5110 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5111
5112 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5113 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5114 LEAVE;
5115}
5116
5117/**
5118 * ipr_cancel_op - Cancel specified op
5119 * @scsi_cmd: scsi command struct
5120 *
5121 * This function cancels specified op.
5122 *
5123 * Return value:
5124 * SUCCESS / FAILED
5125 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005126static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127{
5128 struct ipr_cmnd *ipr_cmd;
5129 struct ipr_ioa_cfg *ioa_cfg;
5130 struct ipr_resource_entry *res;
5131 struct ipr_cmd_pkt *cmd_pkt;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005132 u32 ioasc, int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133 int op_found = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005134 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135
5136 ENTER;
5137 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5138 res = scsi_cmd->device->hostdata;
5139
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005140 /* If we are currently going through reset/reload, return failed.
5141 * This will force the mid-layer to call ipr_eh_host_reset,
5142 * which will then go to sleep and wait for the reset to complete
5143 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005144 if (ioa_cfg->in_reset_reload ||
5145 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005146 return FAILED;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005147 if (!res)
5148 return FAILED;
5149
5150 /*
5151 * If we are aborting a timed out op, chances are that the timeout was caused
5152 * by a still not detected EEH error. In such cases, reading a register will
5153 * trigger the EEH recovery infrastructure.
5154 */
5155 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5156
5157 if (!ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005158 return FAILED;
5159
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005160 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005161 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005162 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5163 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5164 ipr_cmd->done = ipr_scsi_eh_done;
5165 op_found = 1;
5166 break;
5167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005169 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005170 }
5171
5172 if (!op_found)
5173 return SUCCESS;
5174
5175 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005176 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5178 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5179 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5180 ipr_cmd->u.sdev = scsi_cmd->device;
5181
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005182 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5183 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005185 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186
5187 /*
5188 * If the abort task timed out and we sent a bus reset, we will get
5189 * one the following responses to the abort
5190 */
5191 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5192 ioasc = 0;
5193 ipr_trace;
5194 }
5195
Kleber Sacilotto de Souzac4ee22a2013-03-14 13:52:23 -05005196 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005197 if (!ipr_is_naca_model(res))
5198 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199
5200 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005201 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202}
5203
5204/**
5205 * ipr_eh_abort - Abort a single op
5206 * @scsi_cmd: scsi command struct
5207 *
5208 * Return value:
Brian Kingf688f962014-12-02 12:47:37 -06005209 * 0 if scan in progress / 1 if scan is complete
5210 **/
5211static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5212{
5213 unsigned long lock_flags;
5214 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5215 int rc = 0;
5216
5217 spin_lock_irqsave(shost->host_lock, lock_flags);
5218 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5219 rc = 1;
5220 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5221 rc = 1;
5222 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5223 return rc;
5224}
5225
5226/**
5227 * ipr_eh_host_reset - Reset the host adapter
5228 * @scsi_cmd: scsi command struct
5229 *
5230 * Return value:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231 * SUCCESS / FAILED
5232 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005233static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005235 unsigned long flags;
5236 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005237
5238 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005240 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5241 rc = ipr_cancel_op(scsi_cmd);
5242 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005243
5244 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005245 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246}
5247
5248/**
5249 * ipr_handle_other_interrupt - Handle "other" interrupts
5250 * @ioa_cfg: ioa config struct
Wayne Boyer634651f2010-08-27 14:45:07 -07005251 * @int_reg: interrupt register
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252 *
5253 * Return value:
5254 * IRQ_NONE / IRQ_HANDLED
5255 **/
Wayne Boyer634651f2010-08-27 14:45:07 -07005256static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer630ad8312011-04-07 12:12:30 -07005257 u32 int_reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258{
5259 irqreturn_t rc = IRQ_HANDLED;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005260 u32 int_mask_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005261
Wayne Boyer7dacb642011-04-12 10:29:02 -07005262 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5263 int_reg &= ~int_mask_reg;
5264
5265 /* If an interrupt on the adapter did not occur, ignore it.
5266 * Or in the case of SIS 64, check for a stage change interrupt.
5267 */
5268 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5269 if (ioa_cfg->sis64) {
5270 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5271 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5272 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5273
5274 /* clear stage change */
5275 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5276 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5277 list_del(&ioa_cfg->reset_cmd->queue);
5278 del_timer(&ioa_cfg->reset_cmd->timer);
5279 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5280 return IRQ_HANDLED;
5281 }
5282 }
5283
5284 return IRQ_NONE;
5285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286
5287 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5288 /* Mask the interrupt */
5289 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5290
5291 /* Clear the interrupt */
5292 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5293 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5294
5295 list_del(&ioa_cfg->reset_cmd->queue);
5296 del_timer(&ioa_cfg->reset_cmd->timer);
5297 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005298 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
Brian King7dd21302012-03-14 21:20:08 -05005299 if (ioa_cfg->clear_isr) {
5300 if (ipr_debug && printk_ratelimit())
5301 dev_err(&ioa_cfg->pdev->dev,
5302 "Spurious interrupt detected. 0x%08X\n", int_reg);
5303 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5304 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5305 return IRQ_NONE;
5306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005307 } else {
5308 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5309 ioa_cfg->ioa_unit_checked = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005310 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5311 dev_err(&ioa_cfg->pdev->dev,
5312 "No Host RRQ. 0x%08X\n", int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005313 else
5314 dev_err(&ioa_cfg->pdev->dev,
5315 "Permanent IOA failure. 0x%08X\n", int_reg);
5316
5317 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5318 ioa_cfg->sdt_state = GET_DUMP;
5319
5320 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5321 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5322 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005323
Linus Torvalds1da177e2005-04-16 15:20:36 -07005324 return rc;
5325}
5326
5327/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005328 * ipr_isr_eh - Interrupt service routine error handler
5329 * @ioa_cfg: ioa config struct
5330 * @msg: message to log
5331 *
5332 * Return value:
5333 * none
5334 **/
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005335static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005336{
5337 ioa_cfg->errors_logged++;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005338 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005339
5340 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5341 ioa_cfg->sdt_state = GET_DUMP;
5342
5343 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5344}
5345
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005346static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005347 struct list_head *doneq)
5348{
5349 u32 ioasc;
5350 u16 cmd_index;
5351 struct ipr_cmnd *ipr_cmd;
5352 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5353 int num_hrrq = 0;
5354
5355 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005356 if (!hrr_queue->allow_interrupts)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005357 return 0;
5358
5359 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5360 hrr_queue->toggle_bit) {
5361
5362 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5363 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5364 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5365
5366 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5367 cmd_index < hrr_queue->min_cmd_id)) {
5368 ipr_isr_eh(ioa_cfg,
5369 "Invalid response handle from IOA: ",
5370 cmd_index);
5371 break;
5372 }
5373
5374 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5375 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5376
5377 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5378
5379 list_move_tail(&ipr_cmd->queue, doneq);
5380
5381 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5382 hrr_queue->hrrq_curr++;
5383 } else {
5384 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5385 hrr_queue->toggle_bit ^= 1u;
5386 }
5387 num_hrrq++;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005388 if (budget > 0 && num_hrrq >= budget)
5389 break;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005390 }
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005391
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005392 return num_hrrq;
5393}
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005394
5395static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5396{
5397 struct ipr_ioa_cfg *ioa_cfg;
5398 struct ipr_hrr_queue *hrrq;
5399 struct ipr_cmnd *ipr_cmd, *temp;
5400 unsigned long hrrq_flags;
5401 int completed_ops;
5402 LIST_HEAD(doneq);
5403
5404 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5405 ioa_cfg = hrrq->ioa_cfg;
5406
5407 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5408 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5409
5410 if (completed_ops < budget)
5411 blk_iopoll_complete(iop);
5412 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5413
5414 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5415 list_del(&ipr_cmd->queue);
5416 del_timer(&ipr_cmd->timer);
5417 ipr_cmd->fast_done(ipr_cmd);
5418 }
5419
5420 return completed_ops;
5421}
5422
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005423/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424 * ipr_isr - Interrupt service routine
5425 * @irq: irq number
5426 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005427 *
5428 * Return value:
5429 * IRQ_NONE / IRQ_HANDLED
5430 **/
David Howells7d12e782006-10-05 14:55:46 +01005431static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005432{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005433 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5434 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005435 unsigned long hrrq_flags = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005436 u32 int_reg = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005437 int num_hrrq = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005438 int irq_none = 0;
Brian King172cd6e2012-07-17 08:14:40 -05005439 struct ipr_cmnd *ipr_cmd, *temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440 irqreturn_t rc = IRQ_NONE;
Brian King172cd6e2012-07-17 08:14:40 -05005441 LIST_HEAD(doneq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005442
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005443 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005444 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005445 if (!hrrq->allow_interrupts) {
5446 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447 return IRQ_NONE;
5448 }
5449
Linus Torvalds1da177e2005-04-16 15:20:36 -07005450 while (1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005451 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5452 rc = IRQ_HANDLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005453
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005454 if (!ioa_cfg->clear_isr)
5455 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005456
Linus Torvalds1da177e2005-04-16 15:20:36 -07005457 /* Clear the PCI interrupt */
Wayne Boyera5442ba2011-05-17 09:18:53 -07005458 num_hrrq = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005459 do {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005460 writel(IPR_PCII_HRRQ_UPDATED,
5461 ioa_cfg->regs.clr_interrupt_reg32);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005462 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005463 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005464 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005465
Wayne Boyer7dacb642011-04-12 10:29:02 -07005466 } else if (rc == IRQ_NONE && irq_none == 0) {
5467 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5468 irq_none++;
Wayne Boyera5442ba2011-05-17 09:18:53 -07005469 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5470 int_reg & IPR_PCII_HRRQ_UPDATED) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005471 ipr_isr_eh(ioa_cfg,
5472 "Error clearing HRRQ: ", num_hrrq);
Brian King172cd6e2012-07-17 08:14:40 -05005473 rc = IRQ_HANDLED;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005474 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005475 } else
5476 break;
5477 }
5478
5479 if (unlikely(rc == IRQ_NONE))
Wayne Boyer634651f2010-08-27 14:45:07 -07005480 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005481
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005482 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05005483 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5484 list_del(&ipr_cmd->queue);
5485 del_timer(&ipr_cmd->timer);
5486 ipr_cmd->fast_done(ipr_cmd);
5487 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005488 return rc;
5489}
Brian King172cd6e2012-07-17 08:14:40 -05005490
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005491/**
5492 * ipr_isr_mhrrq - Interrupt service routine
5493 * @irq: irq number
5494 * @devp: pointer to ioa config struct
5495 *
5496 * Return value:
5497 * IRQ_NONE / IRQ_HANDLED
5498 **/
5499static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5500{
5501 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005502 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005503 unsigned long hrrq_flags = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005504 struct ipr_cmnd *ipr_cmd, *temp;
5505 irqreturn_t rc = IRQ_NONE;
5506 LIST_HEAD(doneq);
5507
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005508 spin_lock_irqsave(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005509
5510 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005511 if (!hrrq->allow_interrupts) {
5512 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005513 return IRQ_NONE;
5514 }
5515
Jens Axboe89f8b332014-03-13 09:38:42 -06005516 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005517 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5518 hrrq->toggle_bit) {
5519 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5520 blk_iopoll_sched(&hrrq->iopoll);
5521 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5522 return IRQ_HANDLED;
5523 }
5524 } else {
5525 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5526 hrrq->toggle_bit)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005527
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005528 if (ipr_process_hrrq(hrrq, -1, &doneq))
5529 rc = IRQ_HANDLED;
5530 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005531
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005532 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005533
5534 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5535 list_del(&ipr_cmd->queue);
5536 del_timer(&ipr_cmd->timer);
5537 ipr_cmd->fast_done(ipr_cmd);
5538 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005539 return rc;
5540}
5541
5542/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005543 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07005544 * @ioa_cfg: ioa config struct
5545 * @ipr_cmd: ipr command struct
5546 *
5547 * Return value:
5548 * 0 on success / -1 on failure
5549 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08005550static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5551 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005552{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005553 int i, nseg;
5554 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005555 u32 length;
5556 u32 ioadl_flags = 0;
5557 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5558 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005559 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005560
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005561 length = scsi_bufflen(scsi_cmd);
5562 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005563 return 0;
5564
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005565 nseg = scsi_dma_map(scsi_cmd);
5566 if (nseg < 0) {
Anton Blanchard51f52a42011-05-09 10:07:40 +10005567 if (printk_ratelimit())
Anton Blanchardd73341b2014-10-30 17:27:08 -05005568 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005569 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005570 }
5571
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005572 ipr_cmd->dma_use_sg = nseg;
5573
Wayne Boyer438b0332010-05-10 09:13:00 -07005574 ioarcb->data_transfer_length = cpu_to_be32(length);
Wayne Boyerb8803b12010-05-14 08:55:13 -07005575 ioarcb->ioadl_len =
5576 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
Wayne Boyer438b0332010-05-10 09:13:00 -07005577
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005578 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5579 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5580 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005581 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5582 ioadl_flags = IPR_IOADL_FLAGS_READ;
5583
5584 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5585 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5586 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5587 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5588 }
5589
5590 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5591 return 0;
5592}
5593
5594/**
5595 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5596 * @ioa_cfg: ioa config struct
5597 * @ipr_cmd: ipr command struct
5598 *
5599 * Return value:
5600 * 0 on success / -1 on failure
5601 **/
5602static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5603 struct ipr_cmnd *ipr_cmd)
5604{
5605 int i, nseg;
5606 struct scatterlist *sg;
5607 u32 length;
5608 u32 ioadl_flags = 0;
5609 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5610 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5611 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5612
5613 length = scsi_bufflen(scsi_cmd);
5614 if (!length)
5615 return 0;
5616
5617 nseg = scsi_dma_map(scsi_cmd);
5618 if (nseg < 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05005619 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
Wayne Boyera32c0552010-02-19 13:23:36 -08005620 return -1;
5621 }
5622
5623 ipr_cmd->dma_use_sg = nseg;
5624
5625 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5626 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5627 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5628 ioarcb->data_transfer_length = cpu_to_be32(length);
5629 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005630 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5631 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5632 ioadl_flags = IPR_IOADL_FLAGS_READ;
5633 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5634 ioarcb->read_ioadl_len =
5635 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5636 }
5637
Wayne Boyera32c0552010-02-19 13:23:36 -08005638 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5639 ioadl = ioarcb->u.add_data.u.ioadl;
5640 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5641 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005642 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5643 }
5644
5645 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5646 ioadl[i].flags_and_data_len =
5647 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5648 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5649 }
5650
5651 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5652 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005653}
5654
5655/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005656 * ipr_erp_done - Process completion of ERP for a device
5657 * @ipr_cmd: ipr command struct
5658 *
5659 * This function copies the sense buffer into the scsi_cmd
5660 * struct and pushes the scsi_done function.
5661 *
5662 * Return value:
5663 * nothing
5664 **/
5665static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5666{
5667 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5668 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005669 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005670
5671 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5672 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005673 scmd_printk(KERN_ERR, scsi_cmd,
5674 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005675 } else {
5676 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5677 SCSI_SENSE_BUFFERSIZE);
5678 }
5679
5680 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005681 if (!ipr_is_naca_model(res))
5682 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005683 res->in_erp = 0;
5684 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005685 scsi_dma_unmap(ipr_cmd->scsi_cmd);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005686 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005687 scsi_cmd->scsi_done(scsi_cmd);
5688}
5689
5690/**
5691 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5692 * @ipr_cmd: ipr command struct
5693 *
5694 * Return value:
5695 * none
5696 **/
5697static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5698{
Brian King51b1c7e2007-03-29 12:43:50 -05005699 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005700 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08005701 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005702
5703 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08005704 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005705 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005706 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005707 ioarcb->read_ioadl_len = 0;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005708 ioasa->hdr.ioasc = 0;
5709 ioasa->hdr.residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005710
5711 if (ipr_cmd->ioa_cfg->sis64)
5712 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5713 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5714 else {
5715 ioarcb->write_ioadl_addr =
5716 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5717 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5718 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005719}
5720
5721/**
5722 * ipr_erp_request_sense - Send request sense to a device
5723 * @ipr_cmd: ipr command struct
5724 *
5725 * This function sends a request sense to a device as a result
5726 * of a check condition.
5727 *
5728 * Return value:
5729 * nothing
5730 **/
5731static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5732{
5733 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005734 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005735
5736 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5737 ipr_erp_done(ipr_cmd);
5738 return;
5739 }
5740
5741 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5742
5743 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5744 cmd_pkt->cdb[0] = REQUEST_SENSE;
5745 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5746 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5747 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5748 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5749
Wayne Boyera32c0552010-02-19 13:23:36 -08005750 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5751 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005752
5753 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5754 IPR_REQUEST_SENSE_TIMEOUT * 2);
5755}
5756
5757/**
5758 * ipr_erp_cancel_all - Send cancel all to a device
5759 * @ipr_cmd: ipr command struct
5760 *
5761 * This function sends a cancel all to a device to clear the
5762 * queue. If we are running TCQ on the device, QERR is set to 1,
5763 * which means all outstanding ops have been dropped on the floor.
5764 * Cancel all will return them to us.
5765 *
5766 * Return value:
5767 * nothing
5768 **/
5769static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5770{
5771 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5772 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5773 struct ipr_cmd_pkt *cmd_pkt;
5774
5775 res->in_erp = 1;
5776
5777 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5778
Christoph Hellwig17ea0122014-11-24 15:36:20 +01005779 if (!scsi_cmd->device->simple_tags) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005780 ipr_erp_request_sense(ipr_cmd);
5781 return;
5782 }
5783
5784 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5785 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5786 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5787
5788 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5789 IPR_CANCEL_ALL_TIMEOUT);
5790}
5791
5792/**
5793 * ipr_dump_ioasa - Dump contents of IOASA
5794 * @ioa_cfg: ioa config struct
5795 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06005796 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005797 *
5798 * This function is invoked by the interrupt handler when ops
5799 * fail. It will log the IOASA if appropriate. Only called
5800 * for GPDD ops.
5801 *
5802 * Return value:
5803 * none
5804 **/
5805static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06005806 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005807{
5808 int i;
5809 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05005810 u32 ioasc, fd_ioasc;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005811 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005812 __be32 *ioasa_data = (__be32 *)ioasa;
5813 int error_index;
5814
Wayne Boyer96d21f02010-05-10 09:13:27 -07005815 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5816 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005817
5818 if (0 == ioasc)
5819 return;
5820
5821 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5822 return;
5823
Brian Kingb0692dd2007-03-29 12:43:09 -05005824 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5825 error_index = ipr_get_error(fd_ioasc);
5826 else
5827 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005828
5829 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5830 /* Don't log an error if the IOA already logged one */
Wayne Boyer96d21f02010-05-10 09:13:27 -07005831 if (ioasa->hdr.ilid != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005832 return;
5833
Brian Kingcc9bd5d2007-03-29 12:43:01 -05005834 if (!ipr_is_gscsi(res))
5835 return;
5836
Linus Torvalds1da177e2005-04-16 15:20:36 -07005837 if (ipr_error_table[error_index].log_ioasa == 0)
5838 return;
5839 }
5840
Brian Kingfe964d02006-03-29 09:37:29 -06005841 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005842
Wayne Boyer96d21f02010-05-10 09:13:27 -07005843 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5844 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5845 data_len = sizeof(struct ipr_ioasa64);
5846 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005847 data_len = sizeof(struct ipr_ioasa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005848
5849 ipr_err("IOASA Dump:\n");
5850
5851 for (i = 0; i < data_len / 4; i += 4) {
5852 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5853 be32_to_cpu(ioasa_data[i]),
5854 be32_to_cpu(ioasa_data[i+1]),
5855 be32_to_cpu(ioasa_data[i+2]),
5856 be32_to_cpu(ioasa_data[i+3]));
5857 }
5858}
5859
5860/**
5861 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5862 * @ioasa: IOASA
5863 * @sense_buf: sense data buffer
5864 *
5865 * Return value:
5866 * none
5867 **/
5868static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5869{
5870 u32 failing_lba;
5871 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5872 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005873 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5874 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005875
5876 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5877
5878 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5879 return;
5880
5881 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5882
5883 if (ipr_is_vset_device(res) &&
5884 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5885 ioasa->u.vset.failing_lba_hi != 0) {
5886 sense_buf[0] = 0x72;
5887 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5888 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5889 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5890
5891 sense_buf[7] = 12;
5892 sense_buf[8] = 0;
5893 sense_buf[9] = 0x0A;
5894 sense_buf[10] = 0x80;
5895
5896 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5897
5898 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5899 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5900 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5901 sense_buf[15] = failing_lba & 0x000000ff;
5902
5903 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5904
5905 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5906 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5907 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5908 sense_buf[19] = failing_lba & 0x000000ff;
5909 } else {
5910 sense_buf[0] = 0x70;
5911 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5912 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5913 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5914
5915 /* Illegal request */
5916 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
Wayne Boyer96d21f02010-05-10 09:13:27 -07005917 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005918 sense_buf[7] = 10; /* additional length */
5919
5920 /* IOARCB was in error */
5921 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5922 sense_buf[15] = 0xC0;
5923 else /* Parameter data was invalid */
5924 sense_buf[15] = 0x80;
5925
5926 sense_buf[16] =
5927 ((IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07005928 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005929 sense_buf[17] =
5930 (IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07005931 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005932 } else {
5933 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5934 if (ipr_is_vset_device(res))
5935 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5936 else
5937 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5938
5939 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5940 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5941 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5942 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5943 sense_buf[6] = failing_lba & 0x000000ff;
5944 }
5945
5946 sense_buf[7] = 6; /* additional length */
5947 }
5948 }
5949}
5950
5951/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005952 * ipr_get_autosense - Copy autosense data to sense buffer
5953 * @ipr_cmd: ipr command struct
5954 *
5955 * This function copies the autosense buffer to the buffer
5956 * in the scsi_cmd, if there is autosense available.
5957 *
5958 * Return value:
5959 * 1 if autosense was available / 0 if not
5960 **/
5961static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5962{
Wayne Boyer96d21f02010-05-10 09:13:27 -07005963 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5964 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005965
Wayne Boyer96d21f02010-05-10 09:13:27 -07005966 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005967 return 0;
5968
Wayne Boyer96d21f02010-05-10 09:13:27 -07005969 if (ipr_cmd->ioa_cfg->sis64)
5970 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5971 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5972 SCSI_SENSE_BUFFERSIZE));
5973 else
5974 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5975 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5976 SCSI_SENSE_BUFFERSIZE));
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005977 return 1;
5978}
5979
5980/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005981 * ipr_erp_start - Process an error response for a SCSI op
5982 * @ioa_cfg: ioa config struct
5983 * @ipr_cmd: ipr command struct
5984 *
5985 * This function determines whether or not to initiate ERP
5986 * on the affected device.
5987 *
5988 * Return value:
5989 * nothing
5990 **/
5991static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5992 struct ipr_cmnd *ipr_cmd)
5993{
5994 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5995 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005996 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King8a048992007-04-26 16:00:10 -05005997 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005998
5999 if (!res) {
6000 ipr_scsi_eh_done(ipr_cmd);
6001 return;
6002 }
6003
Brian King8a048992007-04-26 16:00:10 -05006004 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006005 ipr_gen_sense(ipr_cmd);
6006
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006007 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6008
Brian King8a048992007-04-26 16:00:10 -05006009 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006010 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006011 if (ipr_is_naca_model(res))
6012 scsi_cmd->result |= (DID_ABORT << 16);
6013 else
6014 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006015 break;
6016 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006017 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006018 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6019 break;
6020 case IPR_IOASC_HW_SEL_TIMEOUT:
6021 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006022 if (!ipr_is_naca_model(res))
6023 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006024 break;
6025 case IPR_IOASC_SYNC_REQUIRED:
6026 if (!res->in_erp)
6027 res->needs_sync_complete = 1;
6028 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6029 break;
6030 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006031 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006032 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6033 break;
6034 case IPR_IOASC_BUS_WAS_RESET:
6035 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6036 /*
6037 * Report the bus reset and ask for a retry. The device
6038 * will give CC/UA the next command.
6039 */
6040 if (!res->resetting_device)
6041 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6042 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006043 if (!ipr_is_naca_model(res))
6044 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006045 break;
6046 case IPR_IOASC_HW_DEV_BUS_STATUS:
6047 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6048 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006049 if (!ipr_get_autosense(ipr_cmd)) {
6050 if (!ipr_is_naca_model(res)) {
6051 ipr_erp_cancel_all(ipr_cmd);
6052 return;
6053 }
6054 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006055 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006056 if (!ipr_is_naca_model(res))
6057 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006058 break;
6059 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6060 break;
6061 default:
Brian King5b7304f2006-08-02 14:57:51 -05006062 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6063 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006064 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006065 res->needs_sync_complete = 1;
6066 break;
6067 }
6068
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006069 scsi_dma_unmap(ipr_cmd->scsi_cmd);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006070 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006071 scsi_cmd->scsi_done(scsi_cmd);
6072}
6073
6074/**
6075 * ipr_scsi_done - mid-layer done function
6076 * @ipr_cmd: ipr command struct
6077 *
6078 * This function is invoked by the interrupt handler for
6079 * ops generated by the SCSI mid-layer
6080 *
6081 * Return value:
6082 * none
6083 **/
6084static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6085{
6086 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6087 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006088 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006089 unsigned long hrrq_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006090
Wayne Boyer96d21f02010-05-10 09:13:27 -07006091 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006092
6093 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
Brian King172cd6e2012-07-17 08:14:40 -05006094 scsi_dma_unmap(scsi_cmd);
6095
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006096 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006097 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006098 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006099 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006100 } else {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006101 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006102 ipr_erp_start(ioa_cfg, ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006103 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006104 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006105}
6106
6107/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006108 * ipr_queuecommand - Queue a mid-layer request
Brian King00bfef22012-07-17 08:13:52 -05006109 * @shost: scsi host struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006110 * @scsi_cmd: scsi command struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006111 *
6112 * This function queues a request generated by the mid-layer.
6113 *
6114 * Return value:
6115 * 0 on success
6116 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6117 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6118 **/
Brian King00bfef22012-07-17 08:13:52 -05006119static int ipr_queuecommand(struct Scsi_Host *shost,
6120 struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006121{
6122 struct ipr_ioa_cfg *ioa_cfg;
6123 struct ipr_resource_entry *res;
6124 struct ipr_ioarcb *ioarcb;
6125 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006126 unsigned long hrrq_flags, lock_flags;
Dan Carpenterd12f1572012-07-30 11:18:22 +03006127 int rc;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006128 struct ipr_hrr_queue *hrrq;
6129 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006130
Brian King00bfef22012-07-17 08:13:52 -05006131 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6132
Linus Torvalds1da177e2005-04-16 15:20:36 -07006133 scsi_cmd->result = (DID_OK << 16);
Brian King00bfef22012-07-17 08:13:52 -05006134 res = scsi_cmd->device->hostdata;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006135
6136 if (ipr_is_gata(res) && res->sata_port) {
6137 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6138 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6139 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6140 return rc;
6141 }
6142
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006143 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6144 hrrq = &ioa_cfg->hrrq[hrrq_id];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006145
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006146 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006147 /*
6148 * We are currently blocking all devices due to a host reset
6149 * We have told the host to stop giving us new requests, but
6150 * ERP ops don't count. FIXME
6151 */
Brian Kingbfae7822013-01-30 23:45:08 -06006152 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006153 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006154 return SCSI_MLQUEUE_HOST_BUSY;
Brian King00bfef22012-07-17 08:13:52 -05006155 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006156
6157 /*
6158 * FIXME - Create scsi_set_host_offline interface
6159 * and the ioa_is_dead check can be removed
6160 */
Brian Kingbfae7822013-01-30 23:45:08 -06006161 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006162 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006163 goto err_nodev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006164 }
6165
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006166 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6167 if (ipr_cmd == NULL) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006168 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006169 return SCSI_MLQUEUE_HOST_BUSY;
6170 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006171 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006172
Brian King172cd6e2012-07-17 08:14:40 -05006173 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006174 ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006175
6176 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6177 ipr_cmd->scsi_cmd = scsi_cmd;
Brian King172cd6e2012-07-17 08:14:40 -05006178 ipr_cmd->done = ipr_scsi_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006179
6180 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6181 if (scsi_cmd->underflow == 0)
6182 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6183
Linus Torvalds1da177e2005-04-16 15:20:36 -07006184 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006185 if (ipr_is_gscsi(res) && res->reset_occurred) {
6186 res->reset_occurred = 0;
Wayne Boyerab6c10b2011-03-31 09:56:10 -07006187 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006189 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
Christoph Hellwig50668632014-10-30 14:30:06 +01006190 if (scsi_cmd->flags & SCMD_TAGGED)
6191 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6192 else
6193 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006194 }
6195
6196 if (scsi_cmd->cmnd[0] >= 0xC0 &&
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006197 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006198 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006199 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006200
Dan Carpenterd12f1572012-07-30 11:18:22 +03006201 if (ioa_cfg->sis64)
6202 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6203 else
6204 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006205
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006206 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6207 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006208 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006209 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006210 if (!rc)
6211 scsi_dma_unmap(scsi_cmd);
Brian Kinga5fb4072012-03-14 21:20:09 -05006212 return SCSI_MLQUEUE_HOST_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006213 }
6214
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006215 if (unlikely(hrrq->ioa_is_dead)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006216 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006217 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006218 scsi_dma_unmap(scsi_cmd);
6219 goto err_nodev;
6220 }
6221
6222 ioarcb->res_handle = res->res_handle;
6223 if (res->needs_sync_complete) {
6224 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6225 res->needs_sync_complete = 0;
6226 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006227 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
Brian King00bfef22012-07-17 08:13:52 -05006228 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian Kinga5fb4072012-03-14 21:20:09 -05006229 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006230 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006231 return 0;
6232
6233err_nodev:
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006234 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006235 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6236 scsi_cmd->result = (DID_NO_CONNECT << 16);
6237 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006238 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006239 return 0;
6240}
6241
6242/**
Brian King35a39692006-09-25 12:39:20 -05006243 * ipr_ioctl - IOCTL handler
6244 * @sdev: scsi device struct
6245 * @cmd: IOCTL cmd
6246 * @arg: IOCTL arg
6247 *
6248 * Return value:
6249 * 0 on success / other on failure
6250 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06006251static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05006252{
6253 struct ipr_resource_entry *res;
6254
6255 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05006256 if (res && ipr_is_gata(res)) {
6257 if (cmd == HDIO_GET_IDENTITY)
6258 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05006259 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05006260 }
Brian King35a39692006-09-25 12:39:20 -05006261
6262 return -EINVAL;
6263}
6264
6265/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006266 * ipr_info - Get information about the card/driver
6267 * @scsi_host: scsi host struct
6268 *
6269 * Return value:
6270 * pointer to buffer with description string
6271 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006272static const char *ipr_ioa_info(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006273{
6274 static char buffer[512];
6275 struct ipr_ioa_cfg *ioa_cfg;
6276 unsigned long lock_flags = 0;
6277
6278 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6279
6280 spin_lock_irqsave(host->host_lock, lock_flags);
6281 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6282 spin_unlock_irqrestore(host->host_lock, lock_flags);
6283
6284 return buffer;
6285}
6286
6287static struct scsi_host_template driver_template = {
6288 .module = THIS_MODULE,
6289 .name = "IPR",
6290 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05006291 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006292 .queuecommand = ipr_queuecommand,
6293 .eh_abort_handler = ipr_eh_abort,
6294 .eh_device_reset_handler = ipr_eh_dev_reset,
6295 .eh_host_reset_handler = ipr_eh_host_reset,
6296 .slave_alloc = ipr_slave_alloc,
6297 .slave_configure = ipr_slave_configure,
6298 .slave_destroy = ipr_slave_destroy,
Brian Kingf688f962014-12-02 12:47:37 -06006299 .scan_finished = ipr_scan_finished,
Brian King35a39692006-09-25 12:39:20 -05006300 .target_alloc = ipr_target_alloc,
6301 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006302 .change_queue_depth = ipr_change_queue_depth,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006303 .bios_param = ipr_biosparam,
6304 .can_queue = IPR_MAX_COMMANDS,
6305 .this_id = -1,
6306 .sg_tablesize = IPR_MAX_SGLIST,
6307 .max_sectors = IPR_IOA_MAX_SECTORS,
6308 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6309 .use_clustering = ENABLE_CLUSTERING,
6310 .shost_attrs = ipr_ioa_attrs,
6311 .sdev_attrs = ipr_dev_attrs,
Martin K. Petersen54b2b502013-10-23 06:25:40 -04006312 .proc_name = IPR_NAME,
6313 .no_write_same = 1,
Christoph Hellwig2ecb2042014-11-03 14:09:02 +01006314 .use_blk_tags = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006315};
6316
Brian King35a39692006-09-25 12:39:20 -05006317/**
6318 * ipr_ata_phy_reset - libata phy_reset handler
6319 * @ap: ata port to reset
6320 *
6321 **/
6322static void ipr_ata_phy_reset(struct ata_port *ap)
6323{
6324 unsigned long flags;
6325 struct ipr_sata_port *sata_port = ap->private_data;
6326 struct ipr_resource_entry *res = sata_port->res;
6327 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6328 int rc;
6329
6330 ENTER;
6331 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006332 while (ioa_cfg->in_reset_reload) {
Brian King35a39692006-09-25 12:39:20 -05006333 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6334 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6335 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6336 }
6337
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006338 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Brian King35a39692006-09-25 12:39:20 -05006339 goto out_unlock;
6340
6341 rc = ipr_device_reset(ioa_cfg, res);
6342
6343 if (rc) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02006344 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006345 goto out_unlock;
6346 }
6347
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006348 ap->link.device[0].class = res->ata_class;
6349 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Tejun Heo3e4ec342010-05-10 21:41:30 +02006350 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006351
6352out_unlock:
6353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6354 LEAVE;
6355}
6356
6357/**
6358 * ipr_ata_post_internal - Cleanup after an internal command
6359 * @qc: ATA queued command
6360 *
6361 * Return value:
6362 * none
6363 **/
6364static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6365{
6366 struct ipr_sata_port *sata_port = qc->ap->private_data;
6367 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6368 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006369 struct ipr_hrr_queue *hrrq;
Brian King35a39692006-09-25 12:39:20 -05006370 unsigned long flags;
6371
6372 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006373 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06006374 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6375 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6376 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6377 }
6378
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006379 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006380 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006381 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6382 if (ipr_cmd->qc == qc) {
6383 ipr_device_reset(ioa_cfg, sata_port->res);
6384 break;
6385 }
Brian King35a39692006-09-25 12:39:20 -05006386 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006387 spin_unlock(&hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006388 }
6389 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6390}
6391
6392/**
Brian King35a39692006-09-25 12:39:20 -05006393 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6394 * @regs: destination
6395 * @tf: source ATA taskfile
6396 *
6397 * Return value:
6398 * none
6399 **/
6400static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6401 struct ata_taskfile *tf)
6402{
6403 regs->feature = tf->feature;
6404 regs->nsect = tf->nsect;
6405 regs->lbal = tf->lbal;
6406 regs->lbam = tf->lbam;
6407 regs->lbah = tf->lbah;
6408 regs->device = tf->device;
6409 regs->command = tf->command;
6410 regs->hob_feature = tf->hob_feature;
6411 regs->hob_nsect = tf->hob_nsect;
6412 regs->hob_lbal = tf->hob_lbal;
6413 regs->hob_lbam = tf->hob_lbam;
6414 regs->hob_lbah = tf->hob_lbah;
6415 regs->ctl = tf->ctl;
6416}
6417
6418/**
6419 * ipr_sata_done - done function for SATA commands
6420 * @ipr_cmd: ipr command struct
6421 *
6422 * This function is invoked by the interrupt handler for
6423 * ops generated by the SCSI mid-layer to SATA devices
6424 *
6425 * Return value:
6426 * none
6427 **/
6428static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6429{
6430 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6431 struct ata_queued_cmd *qc = ipr_cmd->qc;
6432 struct ipr_sata_port *sata_port = qc->ap->private_data;
6433 struct ipr_resource_entry *res = sata_port->res;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006434 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King35a39692006-09-25 12:39:20 -05006435
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006436 spin_lock(&ipr_cmd->hrrq->_lock);
Wayne Boyer96d21f02010-05-10 09:13:27 -07006437 if (ipr_cmd->ioa_cfg->sis64)
6438 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6439 sizeof(struct ipr_ioasa_gata));
6440 else
6441 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6442 sizeof(struct ipr_ioasa_gata));
Brian King35a39692006-09-25 12:39:20 -05006443 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6444
Wayne Boyer96d21f02010-05-10 09:13:27 -07006445 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006446 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05006447
6448 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
Wayne Boyer96d21f02010-05-10 09:13:27 -07006449 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
Brian King35a39692006-09-25 12:39:20 -05006450 else
Wayne Boyer96d21f02010-05-10 09:13:27 -07006451 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006452 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006453 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006454 ata_qc_complete(qc);
6455}
6456
6457/**
Wayne Boyera32c0552010-02-19 13:23:36 -08006458 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6459 * @ipr_cmd: ipr command struct
6460 * @qc: ATA queued command
6461 *
6462 **/
6463static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6464 struct ata_queued_cmd *qc)
6465{
6466 u32 ioadl_flags = 0;
6467 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006468 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
Wayne Boyera32c0552010-02-19 13:23:36 -08006469 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6470 int len = qc->nbytes;
6471 struct scatterlist *sg;
6472 unsigned int si;
6473 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6474
6475 if (len == 0)
6476 return;
6477
6478 if (qc->dma_dir == DMA_TO_DEVICE) {
6479 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6480 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6481 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6482 ioadl_flags = IPR_IOADL_FLAGS_READ;
6483
6484 ioarcb->data_transfer_length = cpu_to_be32(len);
6485 ioarcb->ioadl_len =
6486 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6487 ioarcb->u.sis64_addr_data.data_ioadl_addr =
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006488 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
Wayne Boyera32c0552010-02-19 13:23:36 -08006489
6490 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6491 ioadl64->flags = cpu_to_be32(ioadl_flags);
6492 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6493 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6494
6495 last_ioadl64 = ioadl64;
6496 ioadl64++;
6497 }
6498
6499 if (likely(last_ioadl64))
6500 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6501}
6502
6503/**
Brian King35a39692006-09-25 12:39:20 -05006504 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6505 * @ipr_cmd: ipr command struct
6506 * @qc: ATA queued command
6507 *
6508 **/
6509static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6510 struct ata_queued_cmd *qc)
6511{
6512 u32 ioadl_flags = 0;
6513 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08006514 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006515 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01006516 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05006517 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09006518 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05006519
6520 if (len == 0)
6521 return;
6522
6523 if (qc->dma_dir == DMA_TO_DEVICE) {
6524 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6525 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08006526 ioarcb->data_transfer_length = cpu_to_be32(len);
6527 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05006528 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6529 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6530 ioadl_flags = IPR_IOADL_FLAGS_READ;
6531 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6532 ioarcb->read_ioadl_len =
6533 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6534 }
6535
Tejun Heoff2aeb12007-12-05 16:43:11 +09006536 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05006537 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6538 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006539
6540 last_ioadl = ioadl;
6541 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05006542 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006543
6544 if (likely(last_ioadl))
6545 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05006546}
6547
6548/**
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006549 * ipr_qc_defer - Get a free ipr_cmd
6550 * @qc: queued command
6551 *
6552 * Return value:
6553 * 0 if success
6554 **/
6555static int ipr_qc_defer(struct ata_queued_cmd *qc)
6556{
6557 struct ata_port *ap = qc->ap;
6558 struct ipr_sata_port *sata_port = ap->private_data;
6559 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6560 struct ipr_cmnd *ipr_cmd;
6561 struct ipr_hrr_queue *hrrq;
6562 int hrrq_id;
6563
6564 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6565 hrrq = &ioa_cfg->hrrq[hrrq_id];
6566
6567 qc->lldd_task = NULL;
6568 spin_lock(&hrrq->_lock);
6569 if (unlikely(hrrq->ioa_is_dead)) {
6570 spin_unlock(&hrrq->_lock);
6571 return 0;
6572 }
6573
6574 if (unlikely(!hrrq->allow_cmds)) {
6575 spin_unlock(&hrrq->_lock);
6576 return ATA_DEFER_LINK;
6577 }
6578
6579 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6580 if (ipr_cmd == NULL) {
6581 spin_unlock(&hrrq->_lock);
6582 return ATA_DEFER_LINK;
6583 }
6584
6585 qc->lldd_task = ipr_cmd;
6586 spin_unlock(&hrrq->_lock);
6587 return 0;
6588}
6589
6590/**
Brian King35a39692006-09-25 12:39:20 -05006591 * ipr_qc_issue - Issue a SATA qc to a device
6592 * @qc: queued command
6593 *
6594 * Return value:
6595 * 0 if success
6596 **/
6597static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6598{
6599 struct ata_port *ap = qc->ap;
6600 struct ipr_sata_port *sata_port = ap->private_data;
6601 struct ipr_resource_entry *res = sata_port->res;
6602 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6603 struct ipr_cmnd *ipr_cmd;
6604 struct ipr_ioarcb *ioarcb;
6605 struct ipr_ioarcb_ata_regs *regs;
6606
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006607 if (qc->lldd_task == NULL)
6608 ipr_qc_defer(qc);
6609
6610 ipr_cmd = qc->lldd_task;
6611 if (ipr_cmd == NULL)
Brian King0feeed82007-03-29 12:43:43 -05006612 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05006613
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006614 qc->lldd_task = NULL;
6615 spin_lock(&ipr_cmd->hrrq->_lock);
6616 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6617 ipr_cmd->hrrq->ioa_is_dead)) {
6618 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6619 spin_unlock(&ipr_cmd->hrrq->_lock);
6620 return AC_ERR_SYSTEM;
6621 }
6622
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006623 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King35a39692006-09-25 12:39:20 -05006624 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05006625
Wayne Boyera32c0552010-02-19 13:23:36 -08006626 if (ioa_cfg->sis64) {
6627 regs = &ipr_cmd->i.ata_ioadl.regs;
6628 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6629 } else
6630 regs = &ioarcb->u.add_data.u.regs;
6631
6632 memset(regs, 0, sizeof(*regs));
6633 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05006634
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006635 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Brian King35a39692006-09-25 12:39:20 -05006636 ipr_cmd->qc = qc;
6637 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006638 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05006639 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6640 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6641 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01006642 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05006643
Wayne Boyera32c0552010-02-19 13:23:36 -08006644 if (ioa_cfg->sis64)
6645 ipr_build_ata_ioadl64(ipr_cmd, qc);
6646 else
6647 ipr_build_ata_ioadl(ipr_cmd, qc);
6648
Brian King35a39692006-09-25 12:39:20 -05006649 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6650 ipr_copy_sata_tf(regs, &qc->tf);
6651 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006652 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05006653
6654 switch (qc->tf.protocol) {
6655 case ATA_PROT_NODATA:
6656 case ATA_PROT_PIO:
6657 break;
6658
6659 case ATA_PROT_DMA:
6660 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6661 break;
6662
Tejun Heo0dc36882007-12-18 16:34:43 -05006663 case ATAPI_PROT_PIO:
6664 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05006665 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6666 break;
6667
Tejun Heo0dc36882007-12-18 16:34:43 -05006668 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05006669 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6670 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6671 break;
6672
6673 default:
6674 WARN_ON(1);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006675 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King0feeed82007-03-29 12:43:43 -05006676 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05006677 }
6678
Wayne Boyera32c0552010-02-19 13:23:36 -08006679 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006680 spin_unlock(&ipr_cmd->hrrq->_lock);
Wayne Boyera32c0552010-02-19 13:23:36 -08006681
Brian King35a39692006-09-25 12:39:20 -05006682 return 0;
6683}
6684
6685/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006686 * ipr_qc_fill_rtf - Read result TF
6687 * @qc: ATA queued command
6688 *
6689 * Return value:
6690 * true
6691 **/
6692static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6693{
6694 struct ipr_sata_port *sata_port = qc->ap->private_data;
6695 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6696 struct ata_taskfile *tf = &qc->result_tf;
6697
6698 tf->feature = g->error;
6699 tf->nsect = g->nsect;
6700 tf->lbal = g->lbal;
6701 tf->lbam = g->lbam;
6702 tf->lbah = g->lbah;
6703 tf->device = g->device;
6704 tf->command = g->status;
6705 tf->hob_nsect = g->hob_nsect;
6706 tf->hob_lbal = g->hob_lbal;
6707 tf->hob_lbam = g->hob_lbam;
6708 tf->hob_lbah = g->hob_lbah;
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006709
6710 return true;
6711}
6712
Brian King35a39692006-09-25 12:39:20 -05006713static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05006714 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09006715 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05006716 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05006717 .qc_prep = ata_noop_qc_prep,
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006718 .qc_defer = ipr_qc_defer,
Brian King35a39692006-09-25 12:39:20 -05006719 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006720 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05006721 .port_start = ata_sas_port_start,
6722 .port_stop = ata_sas_port_stop
6723};
6724
6725static struct ata_port_info sata_port_info = {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +03006726 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
Sergei Shtylyov0f2e0332011-01-21 20:32:01 +03006727 .pio_mask = ATA_PIO4_ONLY,
6728 .mwdma_mask = ATA_MWDMA2,
6729 .udma_mask = ATA_UDMA6,
Brian King35a39692006-09-25 12:39:20 -05006730 .port_ops = &ipr_sata_ops
6731};
6732
Linus Torvalds1da177e2005-04-16 15:20:36 -07006733#ifdef CONFIG_PPC_PSERIES
6734static const u16 ipr_blocked_processors[] = {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00006735 PVR_NORTHSTAR,
6736 PVR_PULSAR,
6737 PVR_POWER4,
6738 PVR_ICESTAR,
6739 PVR_SSTAR,
6740 PVR_POWER4p,
6741 PVR_630,
6742 PVR_630p
Linus Torvalds1da177e2005-04-16 15:20:36 -07006743};
6744
6745/**
6746 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6747 * @ioa_cfg: ioa cfg struct
6748 *
6749 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6750 * certain pSeries hardware. This function determines if the given
6751 * adapter is in one of these confgurations or not.
6752 *
6753 * Return value:
6754 * 1 if adapter is not supported / 0 if adapter is supported
6755 **/
6756static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6757{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006758 int i;
6759
Auke Kok44c10132007-06-08 15:46:36 -07006760 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006761 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00006762 if (pvr_version_is(ipr_blocked_processors[i]))
Auke Kok44c10132007-06-08 15:46:36 -07006763 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006764 }
6765 }
6766 return 0;
6767}
6768#else
6769#define ipr_invalid_adapter(ioa_cfg) 0
6770#endif
6771
6772/**
6773 * ipr_ioa_bringdown_done - IOA bring down completion.
6774 * @ipr_cmd: ipr command struct
6775 *
6776 * This function processes the completion of an adapter bring down.
6777 * It wakes any reset sleepers.
6778 *
6779 * Return value:
6780 * IPR_RC_JOB_RETURN
6781 **/
6782static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6783{
6784 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05006785 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006786
6787 ENTER;
Brian Kingbfae7822013-01-30 23:45:08 -06006788 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6789 ipr_trace;
6790 spin_unlock_irq(ioa_cfg->host->host_lock);
6791 scsi_unblock_requests(ioa_cfg->host);
6792 spin_lock_irq(ioa_cfg->host->host_lock);
6793 }
6794
Linus Torvalds1da177e2005-04-16 15:20:36 -07006795 ioa_cfg->in_reset_reload = 0;
6796 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05006797 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6798 spin_lock(&ioa_cfg->hrrq[i]._lock);
6799 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6800 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6801 }
6802 wmb();
6803
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006804 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006805 wake_up_all(&ioa_cfg->reset_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006806 LEAVE;
6807
6808 return IPR_RC_JOB_RETURN;
6809}
6810
6811/**
6812 * ipr_ioa_reset_done - IOA reset completion.
6813 * @ipr_cmd: ipr command struct
6814 *
6815 * This function processes the completion of an adapter reset.
6816 * It schedules any necessary mid-layer add/removes and
6817 * wakes any reset sleepers.
6818 *
6819 * Return value:
6820 * IPR_RC_JOB_RETURN
6821 **/
6822static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6823{
6824 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6825 struct ipr_resource_entry *res;
6826 struct ipr_hostrcb *hostrcb, *temp;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006827 int i = 0, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006828
6829 ENTER;
6830 ioa_cfg->in_reset_reload = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006831 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6832 spin_lock(&ioa_cfg->hrrq[j]._lock);
6833 ioa_cfg->hrrq[j].allow_cmds = 1;
6834 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6835 }
6836 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006837 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06006838 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006839
6840 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Brian Kingf688f962014-12-02 12:47:37 -06006841 if (res->add_to_ml || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006842 ipr_trace;
6843 break;
6844 }
6845 }
6846 schedule_work(&ioa_cfg->work_q);
6847
6848 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6849 list_del(&hostrcb->queue);
6850 if (i++ < IPR_NUM_LOG_HCAMS)
6851 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6852 else
6853 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6854 }
6855
Brian King6bb04172007-04-26 16:00:08 -05006856 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006857 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6858
6859 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006860 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006861 wake_up_all(&ioa_cfg->reset_wait_q);
6862
Mark Nelson30237852008-12-10 12:23:20 +11006863 spin_unlock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006864 scsi_unblock_requests(ioa_cfg->host);
Mark Nelson30237852008-12-10 12:23:20 +11006865 spin_lock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006866
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006867 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006868 scsi_block_requests(ioa_cfg->host);
6869
Brian Kingf688f962014-12-02 12:47:37 -06006870 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006871 LEAVE;
6872 return IPR_RC_JOB_RETURN;
6873}
6874
6875/**
6876 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6877 * @supported_dev: supported device struct
6878 * @vpids: vendor product id struct
6879 *
6880 * Return value:
6881 * none
6882 **/
6883static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6884 struct ipr_std_inq_vpids *vpids)
6885{
6886 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6887 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6888 supported_dev->num_records = 1;
6889 supported_dev->data_length =
6890 cpu_to_be16(sizeof(struct ipr_supported_device));
6891 supported_dev->reserved = 0;
6892}
6893
6894/**
6895 * ipr_set_supported_devs - Send Set Supported Devices for a device
6896 * @ipr_cmd: ipr command struct
6897 *
Wayne Boyera32c0552010-02-19 13:23:36 -08006898 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07006899 *
6900 * Return value:
6901 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6902 **/
6903static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6904{
6905 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6906 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006907 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6908 struct ipr_resource_entry *res = ipr_cmd->u.res;
6909
6910 ipr_cmd->job_step = ipr_ioa_reset_done;
6911
6912 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06006913 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914 continue;
6915
6916 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006917 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006918
6919 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6920 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6921 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6922
6923 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006924 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006925 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6926 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6927
Wayne Boyera32c0552010-02-19 13:23:36 -08006928 ipr_init_ioadl(ipr_cmd,
6929 ioa_cfg->vpd_cbs_dma +
6930 offsetof(struct ipr_misc_cbs, supp_dev),
6931 sizeof(struct ipr_supported_device),
6932 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006933
6934 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6935 IPR_SET_SUP_DEVICE_TIMEOUT);
6936
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006937 if (!ioa_cfg->sis64)
6938 ipr_cmd->job_step = ipr_set_supported_devs;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006939 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006940 return IPR_RC_JOB_RETURN;
6941 }
6942
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006943 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006944 return IPR_RC_JOB_CONTINUE;
6945}
6946
6947/**
6948 * ipr_get_mode_page - Locate specified mode page
6949 * @mode_pages: mode page buffer
6950 * @page_code: page code to find
6951 * @len: minimum required length for mode page
6952 *
6953 * Return value:
6954 * pointer to mode page / NULL on failure
6955 **/
6956static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6957 u32 page_code, u32 len)
6958{
6959 struct ipr_mode_page_hdr *mode_hdr;
6960 u32 page_length;
6961 u32 length;
6962
6963 if (!mode_pages || (mode_pages->hdr.length == 0))
6964 return NULL;
6965
6966 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6967 mode_hdr = (struct ipr_mode_page_hdr *)
6968 (mode_pages->data + mode_pages->hdr.block_desc_len);
6969
6970 while (length) {
6971 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6972 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6973 return mode_hdr;
6974 break;
6975 } else {
6976 page_length = (sizeof(struct ipr_mode_page_hdr) +
6977 mode_hdr->page_length);
6978 length -= page_length;
6979 mode_hdr = (struct ipr_mode_page_hdr *)
6980 ((unsigned long)mode_hdr + page_length);
6981 }
6982 }
6983 return NULL;
6984}
6985
6986/**
6987 * ipr_check_term_power - Check for term power errors
6988 * @ioa_cfg: ioa config struct
6989 * @mode_pages: IOAFP mode pages buffer
6990 *
6991 * Check the IOAFP's mode page 28 for term power errors
6992 *
6993 * Return value:
6994 * nothing
6995 **/
6996static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6997 struct ipr_mode_pages *mode_pages)
6998{
6999 int i;
7000 int entry_length;
7001 struct ipr_dev_bus_entry *bus;
7002 struct ipr_mode_page28 *mode_page;
7003
7004 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7005 sizeof(struct ipr_mode_page28));
7006
7007 entry_length = mode_page->entry_length;
7008
7009 bus = mode_page->bus;
7010
7011 for (i = 0; i < mode_page->num_entries; i++) {
7012 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7013 dev_err(&ioa_cfg->pdev->dev,
7014 "Term power is absent on scsi bus %d\n",
7015 bus->res_addr.bus);
7016 }
7017
7018 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7019 }
7020}
7021
7022/**
7023 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7024 * @ioa_cfg: ioa config struct
7025 *
7026 * Looks through the config table checking for SES devices. If
7027 * the SES device is in the SES table indicating a maximum SCSI
7028 * bus speed, the speed is limited for the bus.
7029 *
7030 * Return value:
7031 * none
7032 **/
7033static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7034{
7035 u32 max_xfer_rate;
7036 int i;
7037
7038 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7039 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7040 ioa_cfg->bus_attr[i].bus_width);
7041
7042 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7043 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7044 }
7045}
7046
7047/**
7048 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7049 * @ioa_cfg: ioa config struct
7050 * @mode_pages: mode page 28 buffer
7051 *
7052 * Updates mode page 28 based on driver configuration
7053 *
7054 * Return value:
7055 * none
7056 **/
7057static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007058 struct ipr_mode_pages *mode_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007059{
7060 int i, entry_length;
7061 struct ipr_dev_bus_entry *bus;
7062 struct ipr_bus_attributes *bus_attr;
7063 struct ipr_mode_page28 *mode_page;
7064
7065 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7066 sizeof(struct ipr_mode_page28));
7067
7068 entry_length = mode_page->entry_length;
7069
7070 /* Loop for each device bus entry */
7071 for (i = 0, bus = mode_page->bus;
7072 i < mode_page->num_entries;
7073 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7074 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7075 dev_err(&ioa_cfg->pdev->dev,
7076 "Invalid resource address reported: 0x%08X\n",
7077 IPR_GET_PHYS_LOC(bus->res_addr));
7078 continue;
7079 }
7080
7081 bus_attr = &ioa_cfg->bus_attr[i];
7082 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7083 bus->bus_width = bus_attr->bus_width;
7084 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7085 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7086 if (bus_attr->qas_enabled)
7087 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7088 else
7089 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7090 }
7091}
7092
7093/**
7094 * ipr_build_mode_select - Build a mode select command
7095 * @ipr_cmd: ipr command struct
7096 * @res_handle: resource handle to send command to
7097 * @parm: Byte 2 of Mode Sense command
7098 * @dma_addr: DMA buffer address
7099 * @xfer_len: data transfer length
7100 *
7101 * Return value:
7102 * none
7103 **/
7104static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08007105 __be32 res_handle, u8 parm,
7106 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007107{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007108 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7109
7110 ioarcb->res_handle = res_handle;
7111 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7112 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7113 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7114 ioarcb->cmd_pkt.cdb[1] = parm;
7115 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7116
Wayne Boyera32c0552010-02-19 13:23:36 -08007117 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007118}
7119
7120/**
7121 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7122 * @ipr_cmd: ipr command struct
7123 *
7124 * This function sets up the SCSI bus attributes and sends
7125 * a Mode Select for Page 28 to activate them.
7126 *
7127 * Return value:
7128 * IPR_RC_JOB_RETURN
7129 **/
7130static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7131{
7132 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7133 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7134 int length;
7135
7136 ENTER;
Brian King47338042006-02-08 20:57:42 -06007137 ipr_scsi_bus_speed_limit(ioa_cfg);
7138 ipr_check_term_power(ioa_cfg, mode_pages);
7139 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7140 length = mode_pages->hdr.length + 1;
7141 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007142
7143 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7144 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7145 length);
7146
Wayne Boyerf72919e2010-02-19 13:24:21 -08007147 ipr_cmd->job_step = ipr_set_supported_devs;
7148 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7149 struct ipr_resource_entry, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007150 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7151
7152 LEAVE;
7153 return IPR_RC_JOB_RETURN;
7154}
7155
7156/**
7157 * ipr_build_mode_sense - Builds a mode sense command
7158 * @ipr_cmd: ipr command struct
7159 * @res: resource entry struct
7160 * @parm: Byte 2 of mode sense command
7161 * @dma_addr: DMA address of mode sense buffer
7162 * @xfer_len: Size of DMA buffer
7163 *
7164 * Return value:
7165 * none
7166 **/
7167static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7168 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08007169 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007170{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007171 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7172
7173 ioarcb->res_handle = res_handle;
7174 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7175 ioarcb->cmd_pkt.cdb[2] = parm;
7176 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7177 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7178
Wayne Boyera32c0552010-02-19 13:23:36 -08007179 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007180}
7181
7182/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007183 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7184 * @ipr_cmd: ipr command struct
7185 *
7186 * This function handles the failure of an IOA bringup command.
7187 *
7188 * Return value:
7189 * IPR_RC_JOB_RETURN
7190 **/
7191static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7192{
7193 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007194 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007195
7196 dev_err(&ioa_cfg->pdev->dev,
7197 "0x%02X failed with IOASC: 0x%08X\n",
7198 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7199
7200 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007201 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007202 return IPR_RC_JOB_RETURN;
7203}
7204
7205/**
7206 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7207 * @ipr_cmd: ipr command struct
7208 *
7209 * This function handles the failure of a Mode Sense to the IOAFP.
7210 * Some adapters do not handle all mode pages.
7211 *
7212 * Return value:
7213 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7214 **/
7215static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7216{
Wayne Boyerf72919e2010-02-19 13:24:21 -08007217 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007218 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007219
7220 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08007221 ipr_cmd->job_step = ipr_set_supported_devs;
7222 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7223 struct ipr_resource_entry, queue);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007224 return IPR_RC_JOB_CONTINUE;
7225 }
7226
7227 return ipr_reset_cmd_failed(ipr_cmd);
7228}
7229
7230/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007231 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7232 * @ipr_cmd: ipr command struct
7233 *
7234 * This function send a Page 28 mode sense to the IOA to
7235 * retrieve SCSI bus attributes.
7236 *
7237 * Return value:
7238 * IPR_RC_JOB_RETURN
7239 **/
7240static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7241{
7242 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7243
7244 ENTER;
7245 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7246 0x28, ioa_cfg->vpd_cbs_dma +
7247 offsetof(struct ipr_misc_cbs, mode_pages),
7248 sizeof(struct ipr_mode_pages));
7249
7250 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007251 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007252
7253 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7254
7255 LEAVE;
7256 return IPR_RC_JOB_RETURN;
7257}
7258
7259/**
Brian Kingac09c342007-04-26 16:00:16 -05007260 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7261 * @ipr_cmd: ipr command struct
7262 *
7263 * This function enables dual IOA RAID support if possible.
7264 *
7265 * Return value:
7266 * IPR_RC_JOB_RETURN
7267 **/
7268static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7269{
7270 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7271 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7272 struct ipr_mode_page24 *mode_page;
7273 int length;
7274
7275 ENTER;
7276 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7277 sizeof(struct ipr_mode_page24));
7278
7279 if (mode_page)
7280 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7281
7282 length = mode_pages->hdr.length + 1;
7283 mode_pages->hdr.length = 0;
7284
7285 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7286 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7287 length);
7288
7289 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7290 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7291
7292 LEAVE;
7293 return IPR_RC_JOB_RETURN;
7294}
7295
7296/**
7297 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7298 * @ipr_cmd: ipr command struct
7299 *
7300 * This function handles the failure of a Mode Sense to the IOAFP.
7301 * Some adapters do not handle all mode pages.
7302 *
7303 * Return value:
7304 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7305 **/
7306static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7307{
Wayne Boyer96d21f02010-05-10 09:13:27 -07007308 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian Kingac09c342007-04-26 16:00:16 -05007309
7310 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7311 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7312 return IPR_RC_JOB_CONTINUE;
7313 }
7314
7315 return ipr_reset_cmd_failed(ipr_cmd);
7316}
7317
7318/**
7319 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7320 * @ipr_cmd: ipr command struct
7321 *
7322 * This function send a mode sense to the IOA to retrieve
7323 * the IOA Advanced Function Control mode page.
7324 *
7325 * Return value:
7326 * IPR_RC_JOB_RETURN
7327 **/
7328static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7329{
7330 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7331
7332 ENTER;
7333 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7334 0x24, ioa_cfg->vpd_cbs_dma +
7335 offsetof(struct ipr_misc_cbs, mode_pages),
7336 sizeof(struct ipr_mode_pages));
7337
7338 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7339 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7340
7341 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7342
7343 LEAVE;
7344 return IPR_RC_JOB_RETURN;
7345}
7346
7347/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007348 * ipr_init_res_table - Initialize the resource table
7349 * @ipr_cmd: ipr command struct
7350 *
7351 * This function looks through the existing resource table, comparing
7352 * it with the config table. This function will take care of old/new
7353 * devices and schedule adding/removing them from the mid-layer
7354 * as appropriate.
7355 *
7356 * Return value:
7357 * IPR_RC_JOB_CONTINUE
7358 **/
7359static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7360{
7361 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7362 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007363 struct ipr_config_table_entry_wrapper cfgtew;
7364 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007365 LIST_HEAD(old_res);
7366
7367 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007368 if (ioa_cfg->sis64)
7369 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7370 else
7371 flag = ioa_cfg->u.cfg_table->hdr.flags;
7372
7373 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007374 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7375
7376 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7377 list_move_tail(&res->queue, &old_res);
7378
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007379 if (ioa_cfg->sis64)
Wayne Boyer438b0332010-05-10 09:13:00 -07007380 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007381 else
7382 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7383
7384 for (i = 0; i < entries; i++) {
7385 if (ioa_cfg->sis64)
7386 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7387 else
7388 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007389 found = 0;
7390
7391 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007392 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007393 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7394 found = 1;
7395 break;
7396 }
7397 }
7398
7399 if (!found) {
7400 if (list_empty(&ioa_cfg->free_res_q)) {
7401 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7402 break;
7403 }
7404
7405 found = 1;
7406 res = list_entry(ioa_cfg->free_res_q.next,
7407 struct ipr_resource_entry, queue);
7408 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007409 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007410 res->add_to_ml = 1;
Wayne Boyer56115592010-06-10 14:46:34 -07007411 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7412 res->sdev->allow_restart = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007413
7414 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007415 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007416 }
7417
7418 list_for_each_entry_safe(res, temp, &old_res, queue) {
7419 if (res->sdev) {
7420 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007421 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007422 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007423 }
7424 }
7425
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007426 list_for_each_entry_safe(res, temp, &old_res, queue) {
7427 ipr_clear_res_target(res);
7428 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7429 }
7430
Brian Kingac09c342007-04-26 16:00:16 -05007431 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7432 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7433 else
7434 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007435
7436 LEAVE;
7437 return IPR_RC_JOB_CONTINUE;
7438}
7439
7440/**
7441 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7442 * @ipr_cmd: ipr command struct
7443 *
7444 * This function sends a Query IOA Configuration command
7445 * to the adapter to retrieve the IOA configuration table.
7446 *
7447 * Return value:
7448 * IPR_RC_JOB_RETURN
7449 **/
7450static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7451{
7452 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7453 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007454 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05007455 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007456
7457 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05007458 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7459 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007460 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7461 ucode_vpd->major_release, ucode_vpd->card_type,
7462 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7463 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7464 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7465
7466 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer438b0332010-05-10 09:13:00 -07007467 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007468 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7469 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007470
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007471 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08007472 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007473
7474 ipr_cmd->job_step = ipr_init_res_table;
7475
7476 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7477
7478 LEAVE;
7479 return IPR_RC_JOB_RETURN;
7480}
7481
7482/**
7483 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7484 * @ipr_cmd: ipr command struct
7485 *
7486 * This utility function sends an inquiry to the adapter.
7487 *
7488 * Return value:
7489 * none
7490 **/
7491static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08007492 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007493{
7494 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007495
7496 ENTER;
7497 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7498 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7499
7500 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7501 ioarcb->cmd_pkt.cdb[1] = flags;
7502 ioarcb->cmd_pkt.cdb[2] = page;
7503 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7504
Wayne Boyera32c0552010-02-19 13:23:36 -08007505 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007506
7507 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7508 LEAVE;
7509}
7510
7511/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06007512 * ipr_inquiry_page_supported - Is the given inquiry page supported
7513 * @page0: inquiry page 0 buffer
7514 * @page: page code.
7515 *
7516 * This function determines if the specified inquiry page is supported.
7517 *
7518 * Return value:
7519 * 1 if page is supported / 0 if not
7520 **/
7521static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7522{
7523 int i;
7524
7525 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7526 if (page0->page[i] == page)
7527 return 1;
7528
7529 return 0;
7530}
7531
7532/**
Brian Kingac09c342007-04-26 16:00:16 -05007533 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7534 * @ipr_cmd: ipr command struct
7535 *
7536 * This function sends a Page 0xD0 inquiry to the adapter
7537 * to retrieve adapter capabilities.
7538 *
7539 * Return value:
7540 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7541 **/
7542static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7543{
7544 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7545 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7546 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7547
7548 ENTER;
7549 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7550 memset(cap, 0, sizeof(*cap));
7551
7552 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7553 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7554 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7555 sizeof(struct ipr_inquiry_cap));
7556 return IPR_RC_JOB_RETURN;
7557 }
7558
7559 LEAVE;
7560 return IPR_RC_JOB_CONTINUE;
7561}
7562
7563/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007564 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7565 * @ipr_cmd: ipr command struct
7566 *
7567 * This function sends a Page 3 inquiry to the adapter
7568 * to retrieve software VPD information.
7569 *
7570 * Return value:
7571 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7572 **/
7573static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7574{
7575 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007576
7577 ENTER;
7578
Brian Kingac09c342007-04-26 16:00:16 -05007579 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007580
7581 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7582 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7583 sizeof(struct ipr_inquiry_page3));
7584
7585 LEAVE;
7586 return IPR_RC_JOB_RETURN;
7587}
7588
7589/**
7590 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7591 * @ipr_cmd: ipr command struct
7592 *
7593 * This function sends a Page 0 inquiry to the adapter
7594 * to retrieve supported inquiry pages.
7595 *
7596 * Return value:
7597 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7598 **/
7599static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7600{
7601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007602 char type[5];
7603
7604 ENTER;
7605
7606 /* Grab the type out of the VPD and store it away */
7607 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7608 type[4] = '\0';
7609 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7610
Brian Kingf688f962014-12-02 12:47:37 -06007611 if (ipr_invalid_adapter(ioa_cfg)) {
7612 dev_err(&ioa_cfg->pdev->dev,
7613 "Adapter not supported in this hardware configuration.\n");
7614
7615 if (!ipr_testmode) {
7616 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7617 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7618 list_add_tail(&ipr_cmd->queue,
7619 &ioa_cfg->hrrq->hrrq_free_q);
7620 return IPR_RC_JOB_RETURN;
7621 }
7622 }
7623
brking@us.ibm.com62275042005-11-01 17:01:14 -06007624 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007625
brking@us.ibm.com62275042005-11-01 17:01:14 -06007626 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7627 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7628 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007629
7630 LEAVE;
7631 return IPR_RC_JOB_RETURN;
7632}
7633
7634/**
7635 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7636 * @ipr_cmd: ipr command struct
7637 *
7638 * This function sends a standard inquiry to the adapter.
7639 *
7640 * Return value:
7641 * IPR_RC_JOB_RETURN
7642 **/
7643static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7644{
7645 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7646
7647 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007648 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007649
7650 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7651 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7652 sizeof(struct ipr_ioa_vpd));
7653
7654 LEAVE;
7655 return IPR_RC_JOB_RETURN;
7656}
7657
7658/**
Wayne Boyer214777b2010-02-19 13:24:26 -08007659 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007660 * @ipr_cmd: ipr command struct
7661 *
7662 * This function send an Identify Host Request Response Queue
7663 * command to establish the HRRQ with the adapter.
7664 *
7665 * Return value:
7666 * IPR_RC_JOB_RETURN
7667 **/
Wayne Boyer214777b2010-02-19 13:24:26 -08007668static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007669{
7670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7671 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007672 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007673
7674 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007675 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007676 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7677
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007678 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7679 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007680
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007681 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7682 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007683
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007684 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7685 if (ioa_cfg->sis64)
7686 ioarcb->cmd_pkt.cdb[1] = 0x1;
7687
7688 if (ioa_cfg->nvectors == 1)
7689 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7690 else
7691 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7692
7693 ioarcb->cmd_pkt.cdb[2] =
7694 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7695 ioarcb->cmd_pkt.cdb[3] =
7696 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7697 ioarcb->cmd_pkt.cdb[4] =
7698 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7699 ioarcb->cmd_pkt.cdb[5] =
7700 ((u64) hrrq->host_rrq_dma) & 0xff;
7701 ioarcb->cmd_pkt.cdb[7] =
7702 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7703 ioarcb->cmd_pkt.cdb[8] =
7704 (sizeof(u32) * hrrq->size) & 0xff;
7705
7706 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007707 ioarcb->cmd_pkt.cdb[9] =
7708 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007709
7710 if (ioa_cfg->sis64) {
7711 ioarcb->cmd_pkt.cdb[10] =
7712 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7713 ioarcb->cmd_pkt.cdb[11] =
7714 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7715 ioarcb->cmd_pkt.cdb[12] =
7716 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7717 ioarcb->cmd_pkt.cdb[13] =
7718 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7719 }
7720
7721 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007722 ioarcb->cmd_pkt.cdb[14] =
7723 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007724
7725 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7726 IPR_INTERNAL_TIMEOUT);
7727
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007728 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7729 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007730
7731 LEAVE;
7732 return IPR_RC_JOB_RETURN;
Wayne Boyer214777b2010-02-19 13:24:26 -08007733 }
7734
Linus Torvalds1da177e2005-04-16 15:20:36 -07007735 LEAVE;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007736 return IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007737}
7738
7739/**
7740 * ipr_reset_timer_done - Adapter reset timer function
7741 * @ipr_cmd: ipr command struct
7742 *
7743 * Description: This function is used in adapter reset processing
7744 * for timing events. If the reset_cmd pointer in the IOA
7745 * config struct is not this adapter's we are doing nested
7746 * resets and fail_all_ops will take care of freeing the
7747 * command block.
7748 *
7749 * Return value:
7750 * none
7751 **/
7752static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7753{
7754 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7755 unsigned long lock_flags = 0;
7756
7757 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7758
7759 if (ioa_cfg->reset_cmd == ipr_cmd) {
7760 list_del(&ipr_cmd->queue);
7761 ipr_cmd->done(ipr_cmd);
7762 }
7763
7764 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7765}
7766
7767/**
7768 * ipr_reset_start_timer - Start a timer for adapter reset job
7769 * @ipr_cmd: ipr command struct
7770 * @timeout: timeout value
7771 *
7772 * Description: This function is used in adapter reset processing
7773 * for timing events. If the reset_cmd pointer in the IOA
7774 * config struct is not this adapter's we are doing nested
7775 * resets and fail_all_ops will take care of freeing the
7776 * command block.
7777 *
7778 * Return value:
7779 * none
7780 **/
7781static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7782 unsigned long timeout)
7783{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007784
7785 ENTER;
7786 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007787 ipr_cmd->done = ipr_reset_ioa_job;
7788
7789 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7790 ipr_cmd->timer.expires = jiffies + timeout;
7791 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7792 add_timer(&ipr_cmd->timer);
7793}
7794
7795/**
7796 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7797 * @ioa_cfg: ioa cfg struct
7798 *
7799 * Return value:
7800 * nothing
7801 **/
7802static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7803{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007804 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007805
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007806 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007807 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007808 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7809
7810 /* Initialize Host RRQ pointers */
7811 hrrq->hrrq_start = hrrq->host_rrq;
7812 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7813 hrrq->hrrq_curr = hrrq->hrrq_start;
7814 hrrq->toggle_bit = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007815 spin_unlock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007816 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007817 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007818
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007819 ioa_cfg->identify_hrrq_index = 0;
7820 if (ioa_cfg->hrrq_num == 1)
7821 atomic_set(&ioa_cfg->hrrq_index, 0);
7822 else
7823 atomic_set(&ioa_cfg->hrrq_index, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007824
7825 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007826 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007827}
7828
7829/**
Wayne Boyer214777b2010-02-19 13:24:26 -08007830 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7831 * @ipr_cmd: ipr command struct
7832 *
7833 * Return value:
7834 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7835 **/
7836static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7837{
7838 unsigned long stage, stage_time;
7839 u32 feedback;
7840 volatile u32 int_reg;
7841 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7842 u64 maskval = 0;
7843
7844 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7845 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7846 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7847
7848 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7849
7850 /* sanity check the stage_time value */
Wayne Boyer438b0332010-05-10 09:13:00 -07007851 if (stage_time == 0)
7852 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7853 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
Wayne Boyer214777b2010-02-19 13:24:26 -08007854 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7855 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7856 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7857
7858 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7859 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7860 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7861 stage_time = ioa_cfg->transop_timeout;
7862 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7863 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
Wayne Boyer1df79ca2010-07-14 10:49:43 -07007864 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7865 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7866 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7867 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7868 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7869 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7870 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7871 return IPR_RC_JOB_CONTINUE;
7872 }
Wayne Boyer214777b2010-02-19 13:24:26 -08007873 }
7874
7875 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7876 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7877 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7878 ipr_cmd->done = ipr_reset_ioa_job;
7879 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007880
7881 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Wayne Boyer214777b2010-02-19 13:24:26 -08007882
7883 return IPR_RC_JOB_RETURN;
7884}
7885
7886/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007887 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7888 * @ipr_cmd: ipr command struct
7889 *
7890 * This function reinitializes some control blocks and
7891 * enables destructive diagnostics on the adapter.
7892 *
7893 * Return value:
7894 * IPR_RC_JOB_RETURN
7895 **/
7896static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7897{
7898 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7899 volatile u32 int_reg;
Wayne Boyer7be96902010-05-10 09:14:07 -07007900 volatile u64 maskval;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007901 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007902
7903 ENTER;
Wayne Boyer214777b2010-02-19 13:24:26 -08007904 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007905 ipr_init_ioa_mem(ioa_cfg);
7906
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007907 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7908 spin_lock(&ioa_cfg->hrrq[i]._lock);
7909 ioa_cfg->hrrq[i].allow_interrupts = 1;
7910 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7911 }
7912 wmb();
Wayne Boyer8701f182010-06-04 10:26:50 -07007913 if (ioa_cfg->sis64) {
7914 /* Set the adapter to the correct endian mode. */
7915 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7916 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7917 }
7918
Wayne Boyer7be96902010-05-10 09:14:07 -07007919 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007920
7921 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7922 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
Wayne Boyer214777b2010-02-19 13:24:26 -08007923 ioa_cfg->regs.clr_interrupt_mask_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007924 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7925 return IPR_RC_JOB_CONTINUE;
7926 }
7927
7928 /* Enable destructive diagnostics on IOA */
Wayne Boyer214777b2010-02-19 13:24:26 -08007929 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007930
Wayne Boyer7be96902010-05-10 09:14:07 -07007931 if (ioa_cfg->sis64) {
7932 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7933 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7934 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7935 } else
7936 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer214777b2010-02-19 13:24:26 -08007937
Linus Torvalds1da177e2005-04-16 15:20:36 -07007938 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7939
7940 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7941
Wayne Boyer214777b2010-02-19 13:24:26 -08007942 if (ioa_cfg->sis64) {
7943 ipr_cmd->job_step = ipr_reset_next_stage;
7944 return IPR_RC_JOB_CONTINUE;
7945 }
7946
Linus Torvalds1da177e2005-04-16 15:20:36 -07007947 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
Brian King5469cb52007-03-29 12:42:40 -05007948 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007949 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7950 ipr_cmd->done = ipr_reset_ioa_job;
7951 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007952 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007953
7954 LEAVE;
7955 return IPR_RC_JOB_RETURN;
7956}
7957
7958/**
7959 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7960 * @ipr_cmd: ipr command struct
7961 *
7962 * This function is invoked when an adapter dump has run out
7963 * of processing time.
7964 *
7965 * Return value:
7966 * IPR_RC_JOB_CONTINUE
7967 **/
7968static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7969{
7970 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7971
7972 if (ioa_cfg->sdt_state == GET_DUMP)
Brian King41e9a692011-09-21 08:51:11 -05007973 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7974 else if (ioa_cfg->sdt_state == READ_DUMP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007975 ioa_cfg->sdt_state = ABORT_DUMP;
7976
Brian King4c647e92011-10-15 09:08:56 -05007977 ioa_cfg->dump_timeout = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007978 ipr_cmd->job_step = ipr_reset_alert;
7979
7980 return IPR_RC_JOB_CONTINUE;
7981}
7982
7983/**
7984 * ipr_unit_check_no_data - Log a unit check/no data error log
7985 * @ioa_cfg: ioa config struct
7986 *
7987 * Logs an error indicating the adapter unit checked, but for some
7988 * reason, we were unable to fetch the unit check buffer.
7989 *
7990 * Return value:
7991 * nothing
7992 **/
7993static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7994{
7995 ioa_cfg->errors_logged++;
7996 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7997}
7998
7999/**
8000 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8001 * @ioa_cfg: ioa config struct
8002 *
8003 * Fetches the unit check buffer from the adapter by clocking the data
8004 * through the mailbox register.
8005 *
8006 * Return value:
8007 * nothing
8008 **/
8009static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8010{
8011 unsigned long mailbox;
8012 struct ipr_hostrcb *hostrcb;
8013 struct ipr_uc_sdt sdt;
8014 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05008015 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008016
8017 mailbox = readl(ioa_cfg->ioa_mailbox);
8018
Wayne Boyerdcbad002010-02-19 13:24:14 -08008019 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008020 ipr_unit_check_no_data(ioa_cfg);
8021 return;
8022 }
8023
8024 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8025 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8026 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8027
Wayne Boyerdcbad002010-02-19 13:24:14 -08008028 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8029 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8030 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008031 ipr_unit_check_no_data(ioa_cfg);
8032 return;
8033 }
8034
8035 /* Find length of the first sdt entry (UC buffer) */
Wayne Boyerdcbad002010-02-19 13:24:14 -08008036 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8037 length = be32_to_cpu(sdt.entry[0].end_token);
8038 else
8039 length = (be32_to_cpu(sdt.entry[0].end_token) -
8040 be32_to_cpu(sdt.entry[0].start_token)) &
8041 IPR_FMT2_MBX_ADDR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008042
8043 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8044 struct ipr_hostrcb, queue);
8045 list_del(&hostrcb->queue);
8046 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8047
8048 rc = ipr_get_ldump_data_section(ioa_cfg,
Wayne Boyerdcbad002010-02-19 13:24:14 -08008049 be32_to_cpu(sdt.entry[0].start_token),
Linus Torvalds1da177e2005-04-16 15:20:36 -07008050 (__be32 *)&hostrcb->hcam,
8051 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8052
Brian King65f56472007-04-26 16:00:12 -05008053 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008054 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08008055 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05008056 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8057 ioa_cfg->sdt_state == GET_DUMP)
8058 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8059 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07008060 ipr_unit_check_no_data(ioa_cfg);
8061
8062 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8063}
8064
8065/**
Wayne Boyer110def82010-11-04 09:36:16 -07008066 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8067 * @ipr_cmd: ipr command struct
8068 *
8069 * Description: This function will call to get the unit check buffer.
8070 *
8071 * Return value:
8072 * IPR_RC_JOB_RETURN
8073 **/
8074static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8075{
8076 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8077
8078 ENTER;
8079 ioa_cfg->ioa_unit_checked = 0;
8080 ipr_get_unit_check_buffer(ioa_cfg);
8081 ipr_cmd->job_step = ipr_reset_alert;
8082 ipr_reset_start_timer(ipr_cmd, 0);
8083
8084 LEAVE;
8085 return IPR_RC_JOB_RETURN;
8086}
8087
8088/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008089 * ipr_reset_restore_cfg_space - Restore PCI config space.
8090 * @ipr_cmd: ipr command struct
8091 *
8092 * Description: This function restores the saved PCI config space of
8093 * the adapter, fails all outstanding ops back to the callers, and
8094 * fetches the dump/unit check if applicable to this reset.
8095 *
8096 * Return value:
8097 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8098 **/
8099static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8100{
8101 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer630ad8312011-04-07 12:12:30 -07008102 u32 int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008103
8104 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02008105 ioa_cfg->pdev->state_saved = true;
Jon Mason1d3c16a2010-11-30 17:43:26 -06008106 pci_restore_state(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008107
8108 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
Wayne Boyer96d21f02010-05-10 09:13:27 -07008109 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008110 return IPR_RC_JOB_CONTINUE;
8111 }
8112
8113 ipr_fail_all_ops(ioa_cfg);
8114
Wayne Boyer8701f182010-06-04 10:26:50 -07008115 if (ioa_cfg->sis64) {
8116 /* Set the adapter to the correct endian mode. */
8117 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8118 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8119 }
8120
Linus Torvalds1da177e2005-04-16 15:20:36 -07008121 if (ioa_cfg->ioa_unit_checked) {
Wayne Boyer110def82010-11-04 09:36:16 -07008122 if (ioa_cfg->sis64) {
8123 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8124 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8125 return IPR_RC_JOB_RETURN;
8126 } else {
8127 ioa_cfg->ioa_unit_checked = 0;
8128 ipr_get_unit_check_buffer(ioa_cfg);
8129 ipr_cmd->job_step = ipr_reset_alert;
8130 ipr_reset_start_timer(ipr_cmd, 0);
8131 return IPR_RC_JOB_RETURN;
8132 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008133 }
8134
8135 if (ioa_cfg->in_ioa_bringdown) {
8136 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8137 } else {
8138 ipr_cmd->job_step = ipr_reset_enable_ioa;
8139
8140 if (GET_DUMP == ioa_cfg->sdt_state) {
Brian King41e9a692011-09-21 08:51:11 -05008141 ioa_cfg->sdt_state = READ_DUMP;
Brian King4c647e92011-10-15 09:08:56 -05008142 ioa_cfg->dump_timeout = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03008143 if (ioa_cfg->sis64)
8144 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8145 else
8146 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008147 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8148 schedule_work(&ioa_cfg->work_q);
8149 return IPR_RC_JOB_RETURN;
8150 }
8151 }
8152
Wayne Boyer438b0332010-05-10 09:13:00 -07008153 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008154 return IPR_RC_JOB_CONTINUE;
8155}
8156
8157/**
Brian Kinge619e1a2007-01-23 11:25:37 -06008158 * ipr_reset_bist_done - BIST has completed on the adapter.
8159 * @ipr_cmd: ipr command struct
8160 *
8161 * Description: Unblock config space and resume the reset process.
8162 *
8163 * Return value:
8164 * IPR_RC_JOB_CONTINUE
8165 **/
8166static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8167{
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008168 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8169
Brian Kinge619e1a2007-01-23 11:25:37 -06008170 ENTER;
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008171 if (ioa_cfg->cfg_locked)
8172 pci_cfg_access_unlock(ioa_cfg->pdev);
8173 ioa_cfg->cfg_locked = 0;
Brian Kinge619e1a2007-01-23 11:25:37 -06008174 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8175 LEAVE;
8176 return IPR_RC_JOB_CONTINUE;
8177}
8178
8179/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008180 * ipr_reset_start_bist - Run BIST on the adapter.
8181 * @ipr_cmd: ipr command struct
8182 *
8183 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8184 *
8185 * Return value:
8186 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8187 **/
8188static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8189{
8190 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008191 int rc = PCIBIOS_SUCCESSFUL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008192
8193 ENTER;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008194 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8195 writel(IPR_UPROCI_SIS64_START_BIST,
8196 ioa_cfg->regs.set_uproc_interrupt_reg32);
8197 else
8198 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8199
8200 if (rc == PCIBIOS_SUCCESSFUL) {
Brian Kinge619e1a2007-01-23 11:25:37 -06008201 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008202 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8203 rc = IPR_RC_JOB_RETURN;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008204 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008205 if (ioa_cfg->cfg_locked)
8206 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8207 ioa_cfg->cfg_locked = 0;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008208 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8209 rc = IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008210 }
8211
8212 LEAVE;
8213 return rc;
8214}
8215
8216/**
Brian King463fc692007-05-07 17:09:05 -05008217 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8218 * @ipr_cmd: ipr command struct
8219 *
8220 * Description: This clears PCI reset to the adapter and delays two seconds.
8221 *
8222 * Return value:
8223 * IPR_RC_JOB_RETURN
8224 **/
8225static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8226{
8227 ENTER;
8228 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8229 ipr_cmd->job_step = ipr_reset_bist_done;
8230 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8231 LEAVE;
8232 return IPR_RC_JOB_RETURN;
8233}
8234
8235/**
8236 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8237 * @ipr_cmd: ipr command struct
8238 *
8239 * Description: This asserts PCI reset to the adapter.
8240 *
8241 * Return value:
8242 * IPR_RC_JOB_RETURN
8243 **/
8244static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8245{
8246 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8247 struct pci_dev *pdev = ioa_cfg->pdev;
8248
8249 ENTER;
Brian King463fc692007-05-07 17:09:05 -05008250 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8251 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8252 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8253 LEAVE;
8254 return IPR_RC_JOB_RETURN;
8255}
8256
8257/**
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008258 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8259 * @ipr_cmd: ipr command struct
8260 *
8261 * Description: This attempts to block config access to the IOA.
8262 *
8263 * Return value:
8264 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8265 **/
8266static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8267{
8268 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8269 int rc = IPR_RC_JOB_CONTINUE;
8270
8271 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8272 ioa_cfg->cfg_locked = 1;
8273 ipr_cmd->job_step = ioa_cfg->reset;
8274 } else {
8275 if (ipr_cmd->u.time_left) {
8276 rc = IPR_RC_JOB_RETURN;
8277 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8278 ipr_reset_start_timer(ipr_cmd,
8279 IPR_CHECK_FOR_RESET_TIMEOUT);
8280 } else {
8281 ipr_cmd->job_step = ioa_cfg->reset;
8282 dev_err(&ioa_cfg->pdev->dev,
8283 "Timed out waiting to lock config access. Resetting anyway.\n");
8284 }
8285 }
8286
8287 return rc;
8288}
8289
8290/**
8291 * ipr_reset_block_config_access - Block config access to the IOA
8292 * @ipr_cmd: ipr command struct
8293 *
8294 * Description: This attempts to block config access to the IOA
8295 *
8296 * Return value:
8297 * IPR_RC_JOB_CONTINUE
8298 **/
8299static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8300{
8301 ipr_cmd->ioa_cfg->cfg_locked = 0;
8302 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8303 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8304 return IPR_RC_JOB_CONTINUE;
8305}
8306
8307/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008308 * ipr_reset_allowed - Query whether or not IOA can be reset
8309 * @ioa_cfg: ioa config struct
8310 *
8311 * Return value:
8312 * 0 if reset not allowed / non-zero if reset is allowed
8313 **/
8314static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8315{
8316 volatile u32 temp_reg;
8317
8318 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8319 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8320}
8321
8322/**
8323 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8324 * @ipr_cmd: ipr command struct
8325 *
8326 * Description: This function waits for adapter permission to run BIST,
8327 * then runs BIST. If the adapter does not give permission after a
8328 * reasonable time, we will reset the adapter anyway. The impact of
8329 * resetting the adapter without warning the adapter is the risk of
8330 * losing the persistent error log on the adapter. If the adapter is
8331 * reset while it is writing to the flash on the adapter, the flash
8332 * segment will have bad ECC and be zeroed.
8333 *
8334 * Return value:
8335 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8336 **/
8337static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8338{
8339 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8340 int rc = IPR_RC_JOB_RETURN;
8341
8342 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8343 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8344 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8345 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008346 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008347 rc = IPR_RC_JOB_CONTINUE;
8348 }
8349
8350 return rc;
8351}
8352
8353/**
Wayne Boyer8701f182010-06-04 10:26:50 -07008354 * ipr_reset_alert - Alert the adapter of a pending reset
Linus Torvalds1da177e2005-04-16 15:20:36 -07008355 * @ipr_cmd: ipr command struct
8356 *
8357 * Description: This function alerts the adapter that it will be reset.
8358 * If memory space is not currently enabled, proceed directly
8359 * to running BIST on the adapter. The timer must always be started
8360 * so we guarantee we do not run BIST from ipr_isr.
8361 *
8362 * Return value:
8363 * IPR_RC_JOB_RETURN
8364 **/
8365static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8366{
8367 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8368 u16 cmd_reg;
8369 int rc;
8370
8371 ENTER;
8372 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8373
8374 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8375 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
Wayne Boyer214777b2010-02-19 13:24:26 -08008376 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008377 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8378 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008379 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008380 }
8381
8382 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8383 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8384
8385 LEAVE;
8386 return IPR_RC_JOB_RETURN;
8387}
8388
8389/**
8390 * ipr_reset_ucode_download_done - Microcode download completion
8391 * @ipr_cmd: ipr command struct
8392 *
8393 * Description: This function unmaps the microcode download buffer.
8394 *
8395 * Return value:
8396 * IPR_RC_JOB_CONTINUE
8397 **/
8398static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8399{
8400 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8401 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8402
Anton Blanchardd73341b2014-10-30 17:27:08 -05008403 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008404 sglist->num_sg, DMA_TO_DEVICE);
8405
8406 ipr_cmd->job_step = ipr_reset_alert;
8407 return IPR_RC_JOB_CONTINUE;
8408}
8409
8410/**
8411 * ipr_reset_ucode_download - Download microcode to the adapter
8412 * @ipr_cmd: ipr command struct
8413 *
8414 * Description: This function checks to see if it there is microcode
8415 * to download to the adapter. If there is, a download is performed.
8416 *
8417 * Return value:
8418 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8419 **/
8420static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8421{
8422 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8423 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8424
8425 ENTER;
8426 ipr_cmd->job_step = ipr_reset_alert;
8427
8428 if (!sglist)
8429 return IPR_RC_JOB_CONTINUE;
8430
8431 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8432 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8433 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8434 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8435 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8436 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8437 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8438
Wayne Boyera32c0552010-02-19 13:23:36 -08008439 if (ioa_cfg->sis64)
8440 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8441 else
8442 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008443 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8444
8445 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8446 IPR_WRITE_BUFFER_TIMEOUT);
8447
8448 LEAVE;
8449 return IPR_RC_JOB_RETURN;
8450}
8451
8452/**
8453 * ipr_reset_shutdown_ioa - Shutdown the adapter
8454 * @ipr_cmd: ipr command struct
8455 *
8456 * Description: This function issues an adapter shutdown of the
8457 * specified type to the specified adapter as part of the
8458 * adapter reset job.
8459 *
8460 * Return value:
8461 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8462 **/
8463static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8464{
8465 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8466 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8467 unsigned long timeout;
8468 int rc = IPR_RC_JOB_CONTINUE;
8469
8470 ENTER;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008471 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8472 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008473 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8474 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8475 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8476 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8477
Brian Kingac09c342007-04-26 16:00:16 -05008478 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8479 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008480 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8481 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05008482 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8483 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008484 else
Brian Kingac09c342007-04-26 16:00:16 -05008485 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008486
8487 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8488
8489 rc = IPR_RC_JOB_RETURN;
8490 ipr_cmd->job_step = ipr_reset_ucode_download;
8491 } else
8492 ipr_cmd->job_step = ipr_reset_alert;
8493
8494 LEAVE;
8495 return rc;
8496}
8497
8498/**
8499 * ipr_reset_ioa_job - Adapter reset job
8500 * @ipr_cmd: ipr command struct
8501 *
8502 * Description: This function is the job router for the adapter reset job.
8503 *
8504 * Return value:
8505 * none
8506 **/
8507static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8508{
8509 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008510 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8511
8512 do {
Wayne Boyer96d21f02010-05-10 09:13:27 -07008513 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008514
8515 if (ioa_cfg->reset_cmd != ipr_cmd) {
8516 /*
8517 * We are doing nested adapter resets and this is
8518 * not the current reset job.
8519 */
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008520 list_add_tail(&ipr_cmd->queue,
8521 &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008522 return;
8523 }
8524
8525 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06008526 rc = ipr_cmd->job_step_failed(ipr_cmd);
8527 if (rc == IPR_RC_JOB_RETURN)
8528 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008529 }
8530
8531 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06008532 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008533 rc = ipr_cmd->job_step(ipr_cmd);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03008534 } while (rc == IPR_RC_JOB_CONTINUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008535}
8536
8537/**
8538 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8539 * @ioa_cfg: ioa config struct
8540 * @job_step: first job step of reset job
8541 * @shutdown_type: shutdown type
8542 *
8543 * Description: This function will initiate the reset of the given adapter
8544 * starting at the selected job step.
8545 * If the caller needs to wait on the completion of the reset,
8546 * the caller must sleep on the reset_wait_q.
8547 *
8548 * Return value:
8549 * none
8550 **/
8551static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8552 int (*job_step) (struct ipr_cmnd *),
8553 enum ipr_shutdown_type shutdown_type)
8554{
8555 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008556 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008557
8558 ioa_cfg->in_reset_reload = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008559 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8560 spin_lock(&ioa_cfg->hrrq[i]._lock);
8561 ioa_cfg->hrrq[i].allow_cmds = 0;
8562 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8563 }
8564 wmb();
Brian Kingbfae7822013-01-30 23:45:08 -06008565 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8566 scsi_block_requests(ioa_cfg->host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008567
8568 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8569 ioa_cfg->reset_cmd = ipr_cmd;
8570 ipr_cmd->job_step = job_step;
8571 ipr_cmd->u.shutdown_type = shutdown_type;
8572
8573 ipr_reset_ioa_job(ipr_cmd);
8574}
8575
8576/**
8577 * ipr_initiate_ioa_reset - Initiate an adapter reset
8578 * @ioa_cfg: ioa config struct
8579 * @shutdown_type: shutdown type
8580 *
8581 * Description: This function will initiate the reset of the given adapter.
8582 * If the caller needs to wait on the completion of the reset,
8583 * the caller must sleep on the reset_wait_q.
8584 *
8585 * Return value:
8586 * none
8587 **/
8588static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8589 enum ipr_shutdown_type shutdown_type)
8590{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008591 int i;
8592
8593 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008594 return;
8595
Brian King41e9a692011-09-21 08:51:11 -05008596 if (ioa_cfg->in_reset_reload) {
8597 if (ioa_cfg->sdt_state == GET_DUMP)
8598 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8599 else if (ioa_cfg->sdt_state == READ_DUMP)
8600 ioa_cfg->sdt_state = ABORT_DUMP;
8601 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008602
8603 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8604 dev_err(&ioa_cfg->pdev->dev,
8605 "IOA taken offline - error recovery failed\n");
8606
8607 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008608 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8609 spin_lock(&ioa_cfg->hrrq[i]._lock);
8610 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8611 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8612 }
8613 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008614
8615 if (ioa_cfg->in_ioa_bringdown) {
8616 ioa_cfg->reset_cmd = NULL;
8617 ioa_cfg->in_reset_reload = 0;
8618 ipr_fail_all_ops(ioa_cfg);
8619 wake_up_all(&ioa_cfg->reset_wait_q);
8620
Brian Kingbfae7822013-01-30 23:45:08 -06008621 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8622 spin_unlock_irq(ioa_cfg->host->host_lock);
8623 scsi_unblock_requests(ioa_cfg->host);
8624 spin_lock_irq(ioa_cfg->host->host_lock);
8625 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008626 return;
8627 } else {
8628 ioa_cfg->in_ioa_bringdown = 1;
8629 shutdown_type = IPR_SHUTDOWN_NONE;
8630 }
8631 }
8632
8633 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8634 shutdown_type);
8635}
8636
8637/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008638 * ipr_reset_freeze - Hold off all I/O activity
8639 * @ipr_cmd: ipr command struct
8640 *
8641 * Description: If the PCI slot is frozen, hold off all I/O
8642 * activity; then, as soon as the slot is available again,
8643 * initiate an adapter reset.
8644 */
8645static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8646{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008647 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8648 int i;
8649
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008650 /* Disallow new interrupts, avoid loop */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008651 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8652 spin_lock(&ioa_cfg->hrrq[i]._lock);
8653 ioa_cfg->hrrq[i].allow_interrupts = 0;
8654 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8655 }
8656 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008657 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008658 ipr_cmd->done = ipr_reset_ioa_job;
8659 return IPR_RC_JOB_RETURN;
8660}
8661
8662/**
Brian King6270e592014-01-21 12:16:41 -06008663 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8664 * @pdev: PCI device struct
8665 *
8666 * Description: This routine is called to tell us that the MMIO
8667 * access to the IOA has been restored
8668 */
8669static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8670{
8671 unsigned long flags = 0;
8672 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8673
8674 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8675 if (!ioa_cfg->probe_done)
8676 pci_save_state(pdev);
8677 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8678 return PCI_ERS_RESULT_NEED_RESET;
8679}
8680
8681/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008682 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8683 * @pdev: PCI device struct
8684 *
8685 * Description: This routine is called to tell us that the PCI bus
8686 * is down. Can't do anything here, except put the device driver
8687 * into a holding pattern, waiting for the PCI bus to come back.
8688 */
8689static void ipr_pci_frozen(struct pci_dev *pdev)
8690{
8691 unsigned long flags = 0;
8692 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8693
8694 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06008695 if (ioa_cfg->probe_done)
8696 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8698}
8699
8700/**
8701 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8702 * @pdev: PCI device struct
8703 *
8704 * Description: This routine is called by the pci error recovery
8705 * code after the PCI slot has been reset, just before we
8706 * should resume normal operations.
8707 */
8708static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8709{
8710 unsigned long flags = 0;
8711 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8712
8713 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06008714 if (ioa_cfg->probe_done) {
8715 if (ioa_cfg->needs_warm_reset)
8716 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8717 else
8718 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8719 IPR_SHUTDOWN_NONE);
8720 } else
8721 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008722 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8723 return PCI_ERS_RESULT_RECOVERED;
8724}
8725
8726/**
8727 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8728 * @pdev: PCI device struct
8729 *
8730 * Description: This routine is called when the PCI bus has
8731 * permanently failed.
8732 */
8733static void ipr_pci_perm_failure(struct pci_dev *pdev)
8734{
8735 unsigned long flags = 0;
8736 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008737 int i;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008738
8739 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06008740 if (ioa_cfg->probe_done) {
8741 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8742 ioa_cfg->sdt_state = ABORT_DUMP;
8743 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8744 ioa_cfg->in_ioa_bringdown = 1;
8745 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8746 spin_lock(&ioa_cfg->hrrq[i]._lock);
8747 ioa_cfg->hrrq[i].allow_cmds = 0;
8748 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8749 }
8750 wmb();
8751 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8752 } else
8753 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008754 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8755}
8756
8757/**
8758 * ipr_pci_error_detected - Called when a PCI error is detected.
8759 * @pdev: PCI device struct
8760 * @state: PCI channel state
8761 *
8762 * Description: Called when a PCI error is detected.
8763 *
8764 * Return value:
8765 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8766 */
8767static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8768 pci_channel_state_t state)
8769{
8770 switch (state) {
8771 case pci_channel_io_frozen:
8772 ipr_pci_frozen(pdev);
Brian King6270e592014-01-21 12:16:41 -06008773 return PCI_ERS_RESULT_CAN_RECOVER;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008774 case pci_channel_io_perm_failure:
8775 ipr_pci_perm_failure(pdev);
8776 return PCI_ERS_RESULT_DISCONNECT;
8777 break;
8778 default:
8779 break;
8780 }
8781 return PCI_ERS_RESULT_NEED_RESET;
8782}
8783
8784/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008785 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8786 * @ioa_cfg: ioa cfg struct
8787 *
8788 * Description: This is the second phase of adapter intialization
8789 * This function takes care of initilizing the adapter to the point
8790 * where it can accept new commands.
8791
8792 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02008793 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07008794 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008795static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008796{
8797 int rc = 0;
8798 unsigned long host_lock_flags = 0;
8799
8800 ENTER;
8801 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8802 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
Brian King6270e592014-01-21 12:16:41 -06008803 ioa_cfg->probe_done = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008804 if (ioa_cfg->needs_hard_reset) {
8805 ioa_cfg->needs_hard_reset = 0;
8806 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8807 } else
8808 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8809 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008810 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008811
8812 LEAVE;
8813 return rc;
8814}
8815
8816/**
8817 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8818 * @ioa_cfg: ioa config struct
8819 *
8820 * Return value:
8821 * none
8822 **/
8823static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8824{
8825 int i;
8826
8827 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8828 if (ioa_cfg->ipr_cmnd_list[i])
Anton Blanchardd73341b2014-10-30 17:27:08 -05008829 dma_pool_free(ioa_cfg->ipr_cmd_pool,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008830 ioa_cfg->ipr_cmnd_list[i],
8831 ioa_cfg->ipr_cmnd_list_dma[i]);
8832
8833 ioa_cfg->ipr_cmnd_list[i] = NULL;
8834 }
8835
8836 if (ioa_cfg->ipr_cmd_pool)
Anton Blanchardd73341b2014-10-30 17:27:08 -05008837 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008838
Brian King89aad422012-03-14 21:20:10 -05008839 kfree(ioa_cfg->ipr_cmnd_list);
8840 kfree(ioa_cfg->ipr_cmnd_list_dma);
8841 ioa_cfg->ipr_cmnd_list = NULL;
8842 ioa_cfg->ipr_cmnd_list_dma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008843 ioa_cfg->ipr_cmd_pool = NULL;
8844}
8845
8846/**
8847 * ipr_free_mem - Frees memory allocated for an adapter
8848 * @ioa_cfg: ioa cfg struct
8849 *
8850 * Return value:
8851 * nothing
8852 **/
8853static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8854{
8855 int i;
8856
8857 kfree(ioa_cfg->res_entries);
Anton Blanchardd73341b2014-10-30 17:27:08 -05008858 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8859 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008860 ipr_free_cmd_blks(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008861
8862 for (i = 0; i < ioa_cfg->hrrq_num; i++)
Anton Blanchardd73341b2014-10-30 17:27:08 -05008863 dma_free_coherent(&ioa_cfg->pdev->dev,
8864 sizeof(u32) * ioa_cfg->hrrq[i].size,
8865 ioa_cfg->hrrq[i].host_rrq,
8866 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008867
Anton Blanchardd73341b2014-10-30 17:27:08 -05008868 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8869 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008870
8871 for (i = 0; i < IPR_NUM_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05008872 dma_free_coherent(&ioa_cfg->pdev->dev,
8873 sizeof(struct ipr_hostrcb),
8874 ioa_cfg->hostrcb[i],
8875 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008876 }
8877
8878 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008879 kfree(ioa_cfg->trace);
8880}
8881
8882/**
8883 * ipr_free_all_resources - Free all allocated resources for an adapter.
8884 * @ipr_cmd: ipr command struct
8885 *
8886 * This function frees all allocated resources for the
8887 * specified adapter.
8888 *
8889 * Return value:
8890 * none
8891 **/
8892static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8893{
8894 struct pci_dev *pdev = ioa_cfg->pdev;
8895
8896 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008897 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8898 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8899 int i;
8900 for (i = 0; i < ioa_cfg->nvectors; i++)
8901 free_irq(ioa_cfg->vectors_info[i].vec,
8902 &ioa_cfg->hrrq[i]);
8903 } else
8904 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8905
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008906 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008907 pci_disable_msi(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008908 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8909 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008910 pci_disable_msix(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008911 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8912 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008913
Linus Torvalds1da177e2005-04-16 15:20:36 -07008914 iounmap(ioa_cfg->hdw_dma_regs);
8915 pci_release_regions(pdev);
8916 ipr_free_mem(ioa_cfg);
8917 scsi_host_put(ioa_cfg->host);
8918 pci_disable_device(pdev);
8919 LEAVE;
8920}
8921
8922/**
8923 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8924 * @ioa_cfg: ioa config struct
8925 *
8926 * Return value:
8927 * 0 on success / -ENOMEM on allocation failure
8928 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008929static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008930{
8931 struct ipr_cmnd *ipr_cmd;
8932 struct ipr_ioarcb *ioarcb;
8933 dma_addr_t dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008934 int i, entries_each_hrrq, hrrq_id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008935
Anton Blanchardd73341b2014-10-30 17:27:08 -05008936 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03008937 sizeof(struct ipr_cmnd), 512, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008938
8939 if (!ioa_cfg->ipr_cmd_pool)
8940 return -ENOMEM;
8941
Brian King89aad422012-03-14 21:20:10 -05008942 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8943 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8944
8945 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8946 ipr_free_cmd_blks(ioa_cfg);
8947 return -ENOMEM;
8948 }
8949
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008950 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8951 if (ioa_cfg->hrrq_num > 1) {
8952 if (i == 0) {
8953 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8954 ioa_cfg->hrrq[i].min_cmd_id = 0;
8955 ioa_cfg->hrrq[i].max_cmd_id =
8956 (entries_each_hrrq - 1);
8957 } else {
8958 entries_each_hrrq =
8959 IPR_NUM_BASE_CMD_BLKS/
8960 (ioa_cfg->hrrq_num - 1);
8961 ioa_cfg->hrrq[i].min_cmd_id =
8962 IPR_NUM_INTERNAL_CMD_BLKS +
8963 (i - 1) * entries_each_hrrq;
8964 ioa_cfg->hrrq[i].max_cmd_id =
8965 (IPR_NUM_INTERNAL_CMD_BLKS +
8966 i * entries_each_hrrq - 1);
8967 }
8968 } else {
8969 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8970 ioa_cfg->hrrq[i].min_cmd_id = 0;
8971 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8972 }
8973 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8974 }
8975
8976 BUG_ON(ioa_cfg->hrrq_num == 0);
8977
8978 i = IPR_NUM_CMD_BLKS -
8979 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8980 if (i > 0) {
8981 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8982 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8983 }
8984
Linus Torvalds1da177e2005-04-16 15:20:36 -07008985 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05008986 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008987
8988 if (!ipr_cmd) {
8989 ipr_free_cmd_blks(ioa_cfg);
8990 return -ENOMEM;
8991 }
8992
8993 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8994 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8995 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8996
8997 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08008998 ipr_cmd->dma_addr = dma_addr;
8999 if (ioa_cfg->sis64)
9000 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9001 else
9002 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9003
Linus Torvalds1da177e2005-04-16 15:20:36 -07009004 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08009005 if (ioa_cfg->sis64) {
9006 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9007 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9008 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009009 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
Wayne Boyera32c0552010-02-19 13:23:36 -08009010 } else {
9011 ioarcb->write_ioadl_addr =
9012 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9013 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9014 ioarcb->ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009015 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
Wayne Boyera32c0552010-02-19 13:23:36 -08009016 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009017 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9018 ipr_cmd->cmd_index = i;
9019 ipr_cmd->ioa_cfg = ioa_cfg;
9020 ipr_cmd->sense_buffer_dma = dma_addr +
9021 offsetof(struct ipr_cmnd, sense_buffer);
9022
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009023 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9024 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9025 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9026 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9027 hrrq_id++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009028 }
9029
9030 return 0;
9031}
9032
9033/**
9034 * ipr_alloc_mem - Allocate memory for an adapter
9035 * @ioa_cfg: ioa config struct
9036 *
9037 * Return value:
9038 * 0 on success / non-zero for error
9039 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009040static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009041{
9042 struct pci_dev *pdev = ioa_cfg->pdev;
9043 int i, rc = -ENOMEM;
9044
9045 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06009046 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009047 ioa_cfg->max_devs_supported, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009048
9049 if (!ioa_cfg->res_entries)
9050 goto out;
9051
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009052 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009053 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009054 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9055 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009056
Anton Blanchardd73341b2014-10-30 17:27:08 -05009057 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9058 sizeof(struct ipr_misc_cbs),
9059 &ioa_cfg->vpd_cbs_dma,
9060 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009061
9062 if (!ioa_cfg->vpd_cbs)
9063 goto out_free_res_entries;
9064
9065 if (ipr_alloc_cmd_blks(ioa_cfg))
9066 goto out_free_vpd_cbs;
9067
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009068 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009069 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009070 sizeof(u32) * ioa_cfg->hrrq[i].size,
Anton Blanchardd73341b2014-10-30 17:27:08 -05009071 &ioa_cfg->hrrq[i].host_rrq_dma,
9072 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009073
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009074 if (!ioa_cfg->hrrq[i].host_rrq) {
9075 while (--i > 0)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009076 dma_free_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009077 sizeof(u32) * ioa_cfg->hrrq[i].size,
9078 ioa_cfg->hrrq[i].host_rrq,
9079 ioa_cfg->hrrq[i].host_rrq_dma);
9080 goto out_ipr_free_cmd_blocks;
9081 }
9082 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9083 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009084
Anton Blanchardd73341b2014-10-30 17:27:08 -05009085 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9086 ioa_cfg->cfg_table_size,
9087 &ioa_cfg->cfg_table_dma,
9088 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009089
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009090 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009091 goto out_free_host_rrq;
9092
9093 for (i = 0; i < IPR_NUM_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009094 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9095 sizeof(struct ipr_hostrcb),
9096 &ioa_cfg->hostrcb_dma[i],
9097 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009098
9099 if (!ioa_cfg->hostrcb[i])
9100 goto out_free_hostrcb_dma;
9101
9102 ioa_cfg->hostrcb[i]->hostrcb_dma =
9103 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06009104 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009105 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9106 }
9107
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06009108 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009109 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9110
9111 if (!ioa_cfg->trace)
9112 goto out_free_hostrcb_dma;
9113
Linus Torvalds1da177e2005-04-16 15:20:36 -07009114 rc = 0;
9115out:
9116 LEAVE;
9117 return rc;
9118
9119out_free_hostrcb_dma:
9120 while (i-- > 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009121 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9122 ioa_cfg->hostrcb[i],
9123 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009124 }
Anton Blanchardd73341b2014-10-30 17:27:08 -05009125 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9126 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009127out_free_host_rrq:
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009128 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009129 dma_free_coherent(&pdev->dev,
9130 sizeof(u32) * ioa_cfg->hrrq[i].size,
9131 ioa_cfg->hrrq[i].host_rrq,
9132 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009133 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009134out_ipr_free_cmd_blocks:
9135 ipr_free_cmd_blks(ioa_cfg);
9136out_free_vpd_cbs:
Anton Blanchardd73341b2014-10-30 17:27:08 -05009137 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9138 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009139out_free_res_entries:
9140 kfree(ioa_cfg->res_entries);
9141 goto out;
9142}
9143
9144/**
9145 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9146 * @ioa_cfg: ioa config struct
9147 *
9148 * Return value:
9149 * none
9150 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009151static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009152{
9153 int i;
9154
9155 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9156 ioa_cfg->bus_attr[i].bus = i;
9157 ioa_cfg->bus_attr[i].qas_enabled = 0;
9158 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9159 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9160 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9161 else
9162 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9163 }
9164}
9165
9166/**
Brian King6270e592014-01-21 12:16:41 -06009167 * ipr_init_regs - Initialize IOA registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07009168 * @ioa_cfg: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07009169 *
9170 * Return value:
Brian King6270e592014-01-21 12:16:41 -06009171 * none
Linus Torvalds1da177e2005-04-16 15:20:36 -07009172 **/
Brian King6270e592014-01-21 12:16:41 -06009173static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009174{
9175 const struct ipr_interrupt_offsets *p;
9176 struct ipr_interrupts *t;
9177 void __iomem *base;
9178
Linus Torvalds1da177e2005-04-16 15:20:36 -07009179 p = &ioa_cfg->chip_cfg->regs;
9180 t = &ioa_cfg->regs;
9181 base = ioa_cfg->hdw_dma_regs;
9182
9183 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9184 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009185 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009186 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009187 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009188 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009189 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009190 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009191 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009192 t->ioarrin_reg = base + p->ioarrin_reg;
9193 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009194 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009195 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009196 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009197 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009198 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009199
9200 if (ioa_cfg->sis64) {
Wayne Boyer214777b2010-02-19 13:24:26 -08009201 t->init_feedback_reg = base + p->init_feedback_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009202 t->dump_addr_reg = base + p->dump_addr_reg;
9203 t->dump_data_reg = base + p->dump_data_reg;
Wayne Boyer8701f182010-06-04 10:26:50 -07009204 t->endian_swap_reg = base + p->endian_swap_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009206}
9207
9208/**
Brian King6270e592014-01-21 12:16:41 -06009209 * ipr_init_ioa_cfg - Initialize IOA config struct
9210 * @ioa_cfg: ioa config struct
9211 * @host: scsi host struct
9212 * @pdev: PCI dev struct
9213 *
9214 * Return value:
9215 * none
9216 **/
9217static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9218 struct Scsi_Host *host, struct pci_dev *pdev)
9219{
9220 int i;
9221
9222 ioa_cfg->host = host;
9223 ioa_cfg->pdev = pdev;
9224 ioa_cfg->log_level = ipr_log_level;
9225 ioa_cfg->doorbell = IPR_DOORBELL;
9226 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9227 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9228 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9229 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9230 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9231 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9232
9233 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9234 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9235 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9236 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9237 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9238 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9239 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9240 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9241 ioa_cfg->sdt_state = INACTIVE;
9242
9243 ipr_initialize_bus_attr(ioa_cfg);
9244 ioa_cfg->max_devs_supported = ipr_max_devs;
9245
9246 if (ioa_cfg->sis64) {
9247 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9248 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9249 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9250 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9251 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9252 + ((sizeof(struct ipr_config_table_entry64)
9253 * ioa_cfg->max_devs_supported)));
9254 } else {
9255 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9256 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9257 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9258 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9259 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9260 + ((sizeof(struct ipr_config_table_entry)
9261 * ioa_cfg->max_devs_supported)));
9262 }
9263
Brian Kingf688f962014-12-02 12:47:37 -06009264 host->max_channel = IPR_VSET_BUS;
Brian King6270e592014-01-21 12:16:41 -06009265 host->unique_id = host->host_no;
9266 host->max_cmd_len = IPR_MAX_CDB_LEN;
9267 host->can_queue = ioa_cfg->max_cmds;
9268 pci_set_drvdata(pdev, ioa_cfg);
9269
9270 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9271 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9272 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9273 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9274 if (i == 0)
9275 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9276 else
9277 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9278 }
9279}
9280
9281/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009282 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07009283 * @dev_id: PCI device id struct
9284 *
9285 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009286 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009287 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009288static const struct ipr_chip_t *
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009289ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009290{
9291 int i;
9292
Linus Torvalds1da177e2005-04-16 15:20:36 -07009293 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9294 if (ipr_chip[i].vendor == dev_id->vendor &&
9295 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009296 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07009297 return NULL;
9298}
9299
Brian King6270e592014-01-21 12:16:41 -06009300/**
9301 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9302 * during probe time
9303 * @ioa_cfg: ioa config struct
9304 *
9305 * Return value:
9306 * None
9307 **/
9308static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9309{
9310 struct pci_dev *pdev = ioa_cfg->pdev;
9311
9312 if (pci_channel_offline(pdev)) {
9313 wait_event_timeout(ioa_cfg->eeh_wait_q,
9314 !pci_channel_offline(pdev),
9315 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9316 pci_restore_state(pdev);
9317 }
9318}
9319
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009320static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9321{
9322 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009323 int i, vectors;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009324
9325 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9326 entries[i].entry = i;
9327
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009328 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9329 entries, 1, ipr_number_of_msix);
9330 if (vectors < 0) {
Brian King6270e592014-01-21 12:16:41 -06009331 ipr_wait_for_pci_err_recovery(ioa_cfg);
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009332 return vectors;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009333 }
9334
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009335 for (i = 0; i < vectors; i++)
9336 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9337 ioa_cfg->nvectors = vectors;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009338
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009339 return 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009340}
9341
9342static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9343{
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009344 int i, vectors;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009345
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009346 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9347 if (vectors < 0) {
Brian King6270e592014-01-21 12:16:41 -06009348 ipr_wait_for_pci_err_recovery(ioa_cfg);
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009349 return vectors;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009350 }
9351
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009352 for (i = 0; i < vectors; i++)
9353 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9354 ioa_cfg->nvectors = vectors;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009355
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009356 return 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009357}
9358
9359static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9360{
9361 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9362
9363 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9364 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9365 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9366 ioa_cfg->vectors_info[vec_idx].
9367 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9368 }
9369}
9370
9371static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9372{
9373 int i, rc;
9374
9375 for (i = 1; i < ioa_cfg->nvectors; i++) {
9376 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9377 ipr_isr_mhrrq,
9378 0,
9379 ioa_cfg->vectors_info[i].desc,
9380 &ioa_cfg->hrrq[i]);
9381 if (rc) {
9382 while (--i >= 0)
9383 free_irq(ioa_cfg->vectors_info[i].vec,
9384 &ioa_cfg->hrrq[i]);
9385 return rc;
9386 }
9387 }
9388 return 0;
9389}
9390
Linus Torvalds1da177e2005-04-16 15:20:36 -07009391/**
Wayne Boyer95fecd92009-06-16 15:13:28 -07009392 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9393 * @pdev: PCI device struct
9394 *
9395 * Description: Simply set the msi_received flag to 1 indicating that
9396 * Message Signaled Interrupts are supported.
9397 *
9398 * Return value:
9399 * 0 on success / non-zero on failure
9400 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009401static irqreturn_t ipr_test_intr(int irq, void *devp)
Wayne Boyer95fecd92009-06-16 15:13:28 -07009402{
9403 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9404 unsigned long lock_flags = 0;
9405 irqreturn_t rc = IRQ_HANDLED;
9406
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009407 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009408 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9409
9410 ioa_cfg->msi_received = 1;
9411 wake_up(&ioa_cfg->msi_wait_q);
9412
9413 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9414 return rc;
9415}
9416
9417/**
9418 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9419 * @pdev: PCI device struct
9420 *
Alexander Gordeev60e76b72014-03-12 16:08:50 -05009421 * Description: The return value from pci_enable_msi_range() can not always be
Wayne Boyer95fecd92009-06-16 15:13:28 -07009422 * trusted. This routine sets up and initiates a test interrupt to determine
9423 * if the interrupt is received via the ipr_test_intr() service routine.
9424 * If the tests fails, the driver will fall back to LSI.
9425 *
9426 * Return value:
9427 * 0 on success / non-zero on failure
9428 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009429static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
Wayne Boyer95fecd92009-06-16 15:13:28 -07009430{
9431 int rc;
9432 volatile u32 int_reg;
9433 unsigned long lock_flags = 0;
9434
9435 ENTER;
9436
9437 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9438 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9439 ioa_cfg->msi_received = 0;
9440 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer214777b2010-02-19 13:24:26 -08009441 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009442 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9443 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9444
wenxiong@linux.vnet.ibm.comf19799f2013-02-27 12:37:45 -06009445 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9446 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9447 else
9448 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009449 if (rc) {
9450 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9451 return rc;
9452 } else if (ipr_debug)
9453 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9454
Wayne Boyer214777b2010-02-19 13:24:26 -08009455 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009456 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9457 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009458 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009459 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9460
Wayne Boyer95fecd92009-06-16 15:13:28 -07009461 if (!ioa_cfg->msi_received) {
9462 /* MSI test failed */
9463 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9464 rc = -EOPNOTSUPP;
9465 } else if (ipr_debug)
9466 dev_info(&pdev->dev, "MSI test succeeded.\n");
9467
9468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9469
wenxiong@linux.vnet.ibm.comf19799f2013-02-27 12:37:45 -06009470 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9471 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9472 else
9473 free_irq(pdev->irq, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009474
9475 LEAVE;
9476
9477 return rc;
9478}
9479
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009480 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -07009481 * @pdev: PCI device struct
9482 * @dev_id: PCI device id struct
9483 *
9484 * Return value:
9485 * 0 on success / non-zero on failure
9486 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009487static int ipr_probe_ioa(struct pci_dev *pdev,
9488 const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009489{
9490 struct ipr_ioa_cfg *ioa_cfg;
9491 struct Scsi_Host *host;
9492 unsigned long ipr_regs_pci;
9493 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -07009494 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -05009495 volatile u32 mask, uproc, interrupts;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -05009496 unsigned long lock_flags, driver_lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009497
9498 ENTER;
9499
Linus Torvalds1da177e2005-04-16 15:20:36 -07009500 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009501 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9502
9503 if (!host) {
9504 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9505 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -06009506 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009507 }
9508
9509 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9510 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Dan Williams8d8e7d132012-07-09 21:06:08 -07009511 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009512
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009513 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009514
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009515 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009516 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9517 dev_id->vendor, dev_id->device);
9518 goto out_scsi_host_put;
9519 }
9520
Wayne Boyera32c0552010-02-19 13:23:36 -08009521 /* set SIS 32 or SIS 64 */
9522 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009523 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
Brian King7dd21302012-03-14 21:20:08 -05009524 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
Brian King89aad422012-03-14 21:20:10 -05009525 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009526
Brian King5469cb52007-03-29 12:42:40 -05009527 if (ipr_transop_timeout)
9528 ioa_cfg->transop_timeout = ipr_transop_timeout;
9529 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9530 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9531 else
9532 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9533
Auke Kok44c10132007-06-08 15:46:36 -07009534 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -05009535
Brian King6270e592014-01-21 12:16:41 -06009536 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9537
Linus Torvalds1da177e2005-04-16 15:20:36 -07009538 ipr_regs_pci = pci_resource_start(pdev, 0);
9539
9540 rc = pci_request_regions(pdev, IPR_NAME);
9541 if (rc < 0) {
9542 dev_err(&pdev->dev,
9543 "Couldn't register memory range of registers\n");
9544 goto out_scsi_host_put;
9545 }
9546
Brian King6270e592014-01-21 12:16:41 -06009547 rc = pci_enable_device(pdev);
9548
9549 if (rc || pci_channel_offline(pdev)) {
9550 if (pci_channel_offline(pdev)) {
9551 ipr_wait_for_pci_err_recovery(ioa_cfg);
9552 rc = pci_enable_device(pdev);
9553 }
9554
9555 if (rc) {
9556 dev_err(&pdev->dev, "Cannot enable adapter\n");
9557 ipr_wait_for_pci_err_recovery(ioa_cfg);
9558 goto out_release_regions;
9559 }
9560 }
9561
Arjan van de Ven25729a72008-09-28 16:18:02 -07009562 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009563
9564 if (!ipr_regs) {
9565 dev_err(&pdev->dev,
9566 "Couldn't map memory range of registers\n");
9567 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -06009568 goto out_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009569 }
9570
9571 ioa_cfg->hdw_dma_regs = ipr_regs;
9572 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9573 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9574
Brian King6270e592014-01-21 12:16:41 -06009575 ipr_init_regs(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009576
Wayne Boyera32c0552010-02-19 13:23:36 -08009577 if (ioa_cfg->sis64) {
Anton Blanchard869404c2014-10-30 17:27:09 -05009578 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Wayne Boyera32c0552010-02-19 13:23:36 -08009579 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -05009580 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9581 rc = dma_set_mask_and_coherent(&pdev->dev,
9582 DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -08009583 }
Wayne Boyera32c0552010-02-19 13:23:36 -08009584 } else
Anton Blanchard869404c2014-10-30 17:27:09 -05009585 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -08009586
Linus Torvalds1da177e2005-04-16 15:20:36 -07009587 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -05009588 dev_err(&pdev->dev, "Failed to set DMA mask\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009589 goto cleanup_nomem;
9590 }
9591
9592 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9593 ioa_cfg->chip_cfg->cache_line_size);
9594
9595 if (rc != PCIBIOS_SUCCESSFUL) {
9596 dev_err(&pdev->dev, "Write of cache line size failed\n");
Brian King6270e592014-01-21 12:16:41 -06009597 ipr_wait_for_pci_err_recovery(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009598 rc = -EIO;
9599 goto cleanup_nomem;
9600 }
9601
Brian King6270e592014-01-21 12:16:41 -06009602 /* Issue MMIO read to ensure card is not in EEH */
9603 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9604 ipr_wait_for_pci_err_recovery(ioa_cfg);
9605
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009606 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9607 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9608 IPR_MAX_MSIX_VECTORS);
9609 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9610 }
9611
9612 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009613 ipr_enable_msix(ioa_cfg) == 0)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009614 ioa_cfg->intr_flag = IPR_USE_MSIX;
9615 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009616 ipr_enable_msi(ioa_cfg) == 0)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009617 ioa_cfg->intr_flag = IPR_USE_MSI;
9618 else {
9619 ioa_cfg->intr_flag = IPR_USE_LSI;
9620 ioa_cfg->nvectors = 1;
9621 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9622 }
9623
Brian King6270e592014-01-21 12:16:41 -06009624 pci_set_master(pdev);
9625
9626 if (pci_channel_offline(pdev)) {
9627 ipr_wait_for_pci_err_recovery(ioa_cfg);
9628 pci_set_master(pdev);
9629 if (pci_channel_offline(pdev)) {
9630 rc = -EIO;
9631 goto out_msi_disable;
9632 }
9633 }
9634
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009635 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9636 ioa_cfg->intr_flag == IPR_USE_MSIX) {
Wayne Boyer95fecd92009-06-16 15:13:28 -07009637 rc = ipr_test_msi(ioa_cfg, pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009638 if (rc == -EOPNOTSUPP) {
Brian King6270e592014-01-21 12:16:41 -06009639 ipr_wait_for_pci_err_recovery(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009640 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9641 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9642 pci_disable_msi(pdev);
9643 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9644 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9645 pci_disable_msix(pdev);
9646 }
9647
9648 ioa_cfg->intr_flag = IPR_USE_LSI;
9649 ioa_cfg->nvectors = 1;
9650 }
Wayne Boyer95fecd92009-06-16 15:13:28 -07009651 else if (rc)
9652 goto out_msi_disable;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009653 else {
9654 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9655 dev_info(&pdev->dev,
9656 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9657 ioa_cfg->nvectors, pdev->irq);
9658 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9659 dev_info(&pdev->dev,
9660 "Request for %d MSIXs succeeded.",
9661 ioa_cfg->nvectors);
9662 }
9663 }
9664
9665 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9666 (unsigned int)num_online_cpus(),
9667 (unsigned int)IPR_MAX_HRRQ_NUM);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009668
Linus Torvalds1da177e2005-04-16 15:20:36 -07009669 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -07009670 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009671
9672 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -07009673 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009674
9675 rc = ipr_alloc_mem(ioa_cfg);
9676 if (rc < 0) {
9677 dev_err(&pdev->dev,
9678 "Couldn't allocate enough memory for device driver!\n");
Julia Lawallf170c682011-07-11 14:08:25 -07009679 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009680 }
9681
Brian King6270e592014-01-21 12:16:41 -06009682 /* Save away PCI config space for use following IOA reset */
9683 rc = pci_save_state(pdev);
9684
9685 if (rc != PCIBIOS_SUCCESSFUL) {
9686 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9687 rc = -EIO;
9688 goto cleanup_nolog;
9689 }
9690
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009691 /*
9692 * If HRRQ updated interrupt is not masked, or reset alert is set,
9693 * the card is in an unknown state and needs a hard reset
9694 */
Wayne Boyer214777b2010-02-19 13:24:26 -08009695 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9696 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9697 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009698 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9699 ioa_cfg->needs_hard_reset = 1;
Anton Blanchard5d7c20b2011-08-01 19:43:45 +10009700 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
Brian King473b1e82007-05-02 10:44:11 -05009701 ioa_cfg->needs_hard_reset = 1;
9702 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9703 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009704
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009705 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009706 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009707 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009708
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009709 if (ioa_cfg->intr_flag == IPR_USE_MSI
9710 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9711 name_msi_vectors(ioa_cfg);
9712 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9713 0,
9714 ioa_cfg->vectors_info[0].desc,
9715 &ioa_cfg->hrrq[0]);
9716 if (!rc)
9717 rc = ipr_request_other_msi_irqs(ioa_cfg);
9718 } else {
9719 rc = request_irq(pdev->irq, ipr_isr,
9720 IRQF_SHARED,
9721 IPR_NAME, &ioa_cfg->hrrq[0]);
9722 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009723 if (rc) {
9724 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9725 pdev->irq, rc);
9726 goto cleanup_nolog;
9727 }
9728
Brian King463fc692007-05-07 17:09:05 -05009729 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9730 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9731 ioa_cfg->needs_warm_reset = 1;
9732 ioa_cfg->reset = ipr_reset_slot_reset;
9733 } else
9734 ioa_cfg->reset = ipr_reset_start_bist;
9735
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -05009736 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009737 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -05009738 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009739
9740 LEAVE;
9741out:
9742 return rc;
9743
9744cleanup_nolog:
9745 ipr_free_mem(ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009746out_msi_disable:
Brian King6270e592014-01-21 12:16:41 -06009747 ipr_wait_for_pci_err_recovery(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009748 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9749 pci_disable_msi(pdev);
9750 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9751 pci_disable_msix(pdev);
Julia Lawallf170c682011-07-11 14:08:25 -07009752cleanup_nomem:
9753 iounmap(ipr_regs);
Brian King6270e592014-01-21 12:16:41 -06009754out_disable:
9755 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009756out_release_regions:
9757 pci_release_regions(pdev);
9758out_scsi_host_put:
9759 scsi_host_put(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009760 goto out;
9761}
9762
9763/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009764 * ipr_initiate_ioa_bringdown - Bring down an adapter
9765 * @ioa_cfg: ioa config struct
9766 * @shutdown_type: shutdown type
9767 *
9768 * Description: This function will initiate bringing down the adapter.
9769 * This consists of issuing an IOA shutdown to the adapter
9770 * to flush the cache, and running BIST.
9771 * If the caller needs to wait on the completion of the reset,
9772 * the caller must sleep on the reset_wait_q.
9773 *
9774 * Return value:
9775 * none
9776 **/
9777static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9778 enum ipr_shutdown_type shutdown_type)
9779{
9780 ENTER;
9781 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9782 ioa_cfg->sdt_state = ABORT_DUMP;
9783 ioa_cfg->reset_retries = 0;
9784 ioa_cfg->in_ioa_bringdown = 1;
9785 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9786 LEAVE;
9787}
9788
9789/**
9790 * __ipr_remove - Remove a single adapter
9791 * @pdev: pci device struct
9792 *
9793 * Adapter hot plug remove entry point.
9794 *
9795 * Return value:
9796 * none
9797 **/
9798static void __ipr_remove(struct pci_dev *pdev)
9799{
9800 unsigned long host_lock_flags = 0;
9801 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Brian Kingbfae7822013-01-30 23:45:08 -06009802 int i;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -05009803 unsigned long driver_lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009804 ENTER;
9805
9806 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009807 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05009808 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9809 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9810 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9811 }
9812
Brian Kingbfae7822013-01-30 23:45:08 -06009813 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9814 spin_lock(&ioa_cfg->hrrq[i]._lock);
9815 ioa_cfg->hrrq[i].removing_ioa = 1;
9816 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9817 }
9818 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07009819 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9820
9821 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9822 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Tejun Heo43829732012-08-20 14:51:24 -07009823 flush_work(&ioa_cfg->work_q);
wenxiong@linux.vnet.ibm.com9077a942013-03-14 13:52:24 -05009824 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009825 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9826
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -05009827 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009828 list_del(&ioa_cfg->queue);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -05009829 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009830
9831 if (ioa_cfg->sdt_state == ABORT_DUMP)
9832 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9833 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9834
9835 ipr_free_all_resources(ioa_cfg);
9836
9837 LEAVE;
9838}
9839
9840/**
9841 * ipr_remove - IOA hot plug remove entry point
9842 * @pdev: pci device struct
9843 *
9844 * Adapter hot plug remove entry point.
9845 *
9846 * Return value:
9847 * none
9848 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009849static void ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009850{
9851 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9852
9853 ENTER;
9854
Tony Jonesee959b02008-02-22 00:13:36 +01009855 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009856 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +01009857 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009858 &ipr_dump_attr);
9859 scsi_remove_host(ioa_cfg->host);
9860
9861 __ipr_remove(pdev);
9862
9863 LEAVE;
9864}
9865
9866/**
9867 * ipr_probe - Adapter hot plug add entry point
9868 *
9869 * Return value:
9870 * 0 on success / non-zero on failure
9871 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009872static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009873{
9874 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06009875 int rc, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009876
9877 rc = ipr_probe_ioa(pdev, dev_id);
9878
9879 if (rc)
9880 return rc;
9881
9882 ioa_cfg = pci_get_drvdata(pdev);
9883 rc = ipr_probe_ioa_part2(ioa_cfg);
9884
9885 if (rc) {
9886 __ipr_remove(pdev);
9887 return rc;
9888 }
9889
9890 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9891
9892 if (rc) {
9893 __ipr_remove(pdev);
9894 return rc;
9895 }
9896
Tony Jonesee959b02008-02-22 00:13:36 +01009897 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009898 &ipr_trace_attr);
9899
9900 if (rc) {
9901 scsi_remove_host(ioa_cfg->host);
9902 __ipr_remove(pdev);
9903 return rc;
9904 }
9905
Tony Jonesee959b02008-02-22 00:13:36 +01009906 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009907 &ipr_dump_attr);
9908
9909 if (rc) {
Tony Jonesee959b02008-02-22 00:13:36 +01009910 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009911 &ipr_trace_attr);
9912 scsi_remove_host(ioa_cfg->host);
9913 __ipr_remove(pdev);
9914 return rc;
9915 }
9916
9917 scsi_scan_host(ioa_cfg->host);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06009918 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9919
Jens Axboe89f8b332014-03-13 09:38:42 -06009920 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06009921 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9922 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9923 ioa_cfg->iopoll_weight, ipr_iopoll);
9924 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9925 }
9926 }
9927
Linus Torvalds1da177e2005-04-16 15:20:36 -07009928 schedule_work(&ioa_cfg->work_q);
9929 return 0;
9930}
9931
9932/**
9933 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009934 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07009935 *
9936 * This function is invoked upon system shutdown/reboot. It will issue
9937 * an adapter shutdown to the adapter to flush the write cache.
9938 *
9939 * Return value:
9940 * none
9941 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009942static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009943{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009944 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009945 unsigned long lock_flags = 0;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06009946 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009947
9948 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Jens Axboe89f8b332014-03-13 09:38:42 -06009949 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06009950 ioa_cfg->iopoll_weight = 0;
9951 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9952 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9953 }
9954
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009955 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05009956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9957 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9958 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9959 }
9960
Linus Torvalds1da177e2005-04-16 15:20:36 -07009961 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9962 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9963 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9964}
9965
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009966static struct pci_device_id ipr_pci_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009967 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009968 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009969 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009970 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009971 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009972 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009973 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009974 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009975 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06009976 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009977 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06009978 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009979 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06009980 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009981 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -05009982 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9983 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009984 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06009985 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009986 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05009987 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9988 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06009989 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05009990 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9991 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009992 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06009993 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009994 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05009995 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9996 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -06009997 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05009998 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9999 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010000 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -050010001 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10002 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -050010003 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King185eb312007-03-29 12:42:53 -050010004 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10005 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Wayne Boyerb0f56d32010-06-24 13:34:14 -070010006 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10007 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -050010008 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -050010009 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010010 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -060010011 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010012 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -060010013 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010014 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010015 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10016 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010017 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010018 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10019 IPR_USE_LONG_TRANSOP_TIMEOUT },
Wayne Boyerd7b46272010-02-19 13:24:38 -080010020 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10021 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10022 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10023 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10024 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10025 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
Wayne Boyer32622bd2010-10-18 20:24:34 -070010026 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010027 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10028 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer5a918352011-10-27 11:58:21 -070010029 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10030 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer32622bd2010-10-18 20:24:34 -070010031 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010032 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010033 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010034 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010035 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010036 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010037 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010038 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10039 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10040 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010041 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010042 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10043 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10044 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10045 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10046 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10047 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10048 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10049 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010050 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10051 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10052 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wendy Xiongf94d9962014-01-21 12:16:40 -060010053 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10054 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010055 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10056 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10057 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10058 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10059 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10060 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10061 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10062 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10063 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10064 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10065 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
Wendy Xiong5eeac3e2014-03-12 16:08:52 -050010066 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10067 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10068 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10069 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10070 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10071 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010072 { }
10073};
10074MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10075
Stephen Hemmingera55b2d22012-09-07 09:33:16 -070010076static const struct pci_error_handlers ipr_err_handler = {
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010077 .error_detected = ipr_pci_error_detected,
Brian King6270e592014-01-21 12:16:41 -060010078 .mmio_enabled = ipr_pci_mmio_enabled,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010079 .slot_reset = ipr_pci_slot_reset,
10080};
10081
Linus Torvalds1da177e2005-04-16 15:20:36 -070010082static struct pci_driver ipr_driver = {
10083 .name = IPR_NAME,
10084 .id_table = ipr_pci_table,
10085 .probe = ipr_probe,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010086 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010087 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010088 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010089};
10090
10091/**
Wayne Boyerf72919e2010-02-19 13:24:21 -080010092 * ipr_halt_done - Shutdown prepare completion
10093 *
10094 * Return value:
10095 * none
10096 **/
10097static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10098{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010099 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010100}
10101
10102/**
10103 * ipr_halt - Issue shutdown prepare to all adapters
10104 *
10105 * Return value:
10106 * NOTIFY_OK on success / NOTIFY_DONE on failure
10107 **/
10108static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10109{
10110 struct ipr_cmnd *ipr_cmd;
10111 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010112 unsigned long flags = 0, driver_lock_flags;
Wayne Boyerf72919e2010-02-19 13:24:21 -080010113
10114 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10115 return NOTIFY_DONE;
10116
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010117 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010118
10119 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10120 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010121 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Wayne Boyerf72919e2010-02-19 13:24:21 -080010122 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10123 continue;
10124 }
10125
10126 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10127 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10128 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10129 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10130 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10131
10132 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10133 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10134 }
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010135 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010136
10137 return NOTIFY_OK;
10138}
10139
10140static struct notifier_block ipr_notifier = {
10141 ipr_halt, NULL, 0
10142};
10143
10144/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010145 * ipr_init - Module entry point
10146 *
10147 * Return value:
10148 * 0 on success / negative value on failure
10149 **/
10150static int __init ipr_init(void)
10151{
10152 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10153 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10154
Wayne Boyerf72919e2010-02-19 13:24:21 -080010155 register_reboot_notifier(&ipr_notifier);
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -070010156 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010157}
10158
10159/**
10160 * ipr_exit - Module unload
10161 *
10162 * Module unload entry point.
10163 *
10164 * Return value:
10165 * none
10166 **/
10167static void __exit ipr_exit(void)
10168{
Wayne Boyerf72919e2010-02-19 13:24:21 -080010169 unregister_reboot_notifier(&ipr_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010170 pci_unregister_driver(&ipr_driver);
10171}
10172
10173module_init(ipr_init);
10174module_exit(ipr_exit);