blob: 17aea2d1ec7a6b7aadc8b740f57a1be4f7f11e6a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -030063#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050075#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050076#include <linux/hdreg.h>
Wayne Boyerf72919e2010-02-19 13:24:21 -080077#include <linux/reboot.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080078#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include "ipr.h"
88
89/*
90 * Global Data
91 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080092static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050097static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060098static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080099static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -0500100static unsigned int ipr_dual_ioa_raid = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600101static unsigned int ipr_number_of_msix = 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102static DEFINE_SPINLOCK(ipr_driver_lock);
103
104/* This table describes the differences between DMA controller chips */
105static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 .mailbox = 0x0042C,
Brian King89aad422012-03-14 21:20:10 -0500108 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500110 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600111 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 {
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
Wayne Boyer214777b2010-02-19 13:24:26 -0800115 .clr_interrupt_mask_reg32 = 0x00230,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 .sense_interrupt_mask_reg = 0x0022C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800117 .sense_interrupt_mask_reg32 = 0x0022C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 .clr_interrupt_reg = 0x00228,
Wayne Boyer214777b2010-02-19 13:24:26 -0800119 .clr_interrupt_reg32 = 0x00228,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 .sense_interrupt_reg = 0x00224,
Wayne Boyer214777b2010-02-19 13:24:26 -0800121 .sense_interrupt_reg32 = 0x00224,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800124 .sense_uproc_interrupt_reg32 = 0x00214,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 .set_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 }
130 },
131 { /* Snipe and Scamp */
132 .mailbox = 0x0052C,
Brian King89aad422012-03-14 21:20:10 -0500133 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500135 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600136 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 {
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800140 .clr_interrupt_mask_reg32 = 0x0028C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 .sense_interrupt_mask_reg = 0x00288,
Wayne Boyer214777b2010-02-19 13:24:26 -0800142 .sense_interrupt_mask_reg32 = 0x00288,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 .clr_interrupt_reg = 0x00284,
Wayne Boyer214777b2010-02-19 13:24:26 -0800144 .clr_interrupt_reg32 = 0x00284,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 .sense_interrupt_reg = 0x00280,
Wayne Boyer214777b2010-02-19 13:24:26 -0800146 .sense_interrupt_reg32 = 0x00280,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800149 .sense_uproc_interrupt_reg32 = 0x00290,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 .set_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 }
155 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800156 { /* CRoC */
Wayne Boyer110def82010-11-04 09:36:16 -0700157 .mailbox = 0x00044,
Brian King89aad422012-03-14 21:20:10 -0500158 .max_cmds = 1000,
Wayne Boyera74c1632010-02-19 13:23:51 -0800159 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500160 .clear_isr = 0,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600161 .iopoll_weight = 64,
Wayne Boyera74c1632010-02-19 13:23:51 -0800162 {
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
Wayne Boyer214777b2010-02-19 13:24:26 -0800165 .clr_interrupt_mask_reg32 = 0x0001C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800166 .sense_interrupt_mask_reg = 0x00010,
Wayne Boyer214777b2010-02-19 13:24:26 -0800167 .sense_interrupt_mask_reg32 = 0x00014,
Wayne Boyera74c1632010-02-19 13:23:51 -0800168 .clr_interrupt_reg = 0x00008,
Wayne Boyer214777b2010-02-19 13:24:26 -0800169 .clr_interrupt_reg32 = 0x0000C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800170 .sense_interrupt_reg = 0x00000,
Wayne Boyer214777b2010-02-19 13:24:26 -0800171 .sense_interrupt_reg32 = 0x00004,
Wayne Boyera74c1632010-02-19 13:23:51 -0800172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800174 .sense_uproc_interrupt_reg32 = 0x00024,
Wayne Boyera74c1632010-02-19 13:23:51 -0800175 .set_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800176 .set_uproc_interrupt_reg32 = 0x00024,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800177 .clr_uproc_interrupt_reg = 0x00028,
Wayne Boyer214777b2010-02-19 13:24:26 -0800178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800180 .dump_addr_reg = 0x00064,
Wayne Boyer8701f182010-06-04 10:26:50 -0700181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
Wayne Boyera74c1632010-02-19 13:23:51 -0800183 }
184 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185};
186
187static const struct ipr_chip_t ipr_chip[] = {
Wayne Boyercb237ef2010-06-17 11:51:40 -0700188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
Wayne Boyercd9b3d02012-02-23 11:54:55 -0800196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197};
198
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300199static int ipr_max_bus_speeds[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201};
202
203MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205module_param_named(max_speed, ipr_max_speed, uint, 0);
206MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207module_param_named(log_level, ipr_log_level, uint, 0);
208MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209module_param_named(testmode, ipr_testmode, int, 0);
210MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800211module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800215module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600216MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500217module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800219module_param_named(max_devs, ipr_max_devs, int, 0);
220MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600222module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224MODULE_LICENSE("GPL");
225MODULE_VERSION(IPR_DRIVER_VERSION);
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227/* A constant array of IOASCs/URCs/Error Messages */
228static const
229struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 "8155: An unknown error was received"},
232 {0x00330000, 0, 0,
233 "Soft underlength error"},
234 {0x005A0000, 0, 0,
235 "Command to be cancelled not found"},
236 {0x00808000, 0, 0,
237 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500241 "4101: Soft device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500279 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 "8009: Impending cache battery pack failure"},
284 {0x02040400, 0, 0,
285 "34FF: Disk device format in progress"},
Brian King65f56472007-04-26 16:00:12 -0500286 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
287 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 {0x023F0000, 0, 0,
289 "Synchronization required"},
290 {0x024E0000, 0, 0,
291 "No ready, IOA shutdown"},
292 {0x025A0000, 0, 0,
293 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500294 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 "3020: Storage subsystem configuration error"},
296 {0x03110B00, 0, 0,
297 "FFF5: Medium error, data unreadable, recommend reassign"},
298 {0x03110C00, 0, 0,
299 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500300 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500302 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500304 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500306 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 "3109: IOA timed out a device command"},
308 {0x04088000, 0, 0,
309 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500310 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500311 "4100: Hard device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800312 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
313 "310C: Logical block guard error detected by the device"},
314 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
315 "310C: Logical block reference tag error detected by the device"},
316 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
317 "4170: Scatter list tag / sequence number error"},
318 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "8150: Logical block CRC error on IOA to Host transfer"},
320 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
321 "4170: Logical block sequence number error on IOA to Host transfer"},
322 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310D: Logical block reference tag error detected by the IOA"},
324 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310D: Logical block guard error detected by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500326 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500328 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500330 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 "9002: IOA reserved area LRC error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800332 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
333 "Hardware Error, IOA metadata access error"},
Brian King933916f2007-03-29 12:43:30 -0500334 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500336 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500338 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500340 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500342 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500344 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 "8150: PCI bus error"},
346 {0x04430000, 1, 0,
347 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500348 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500350 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500352 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500354 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 "8151: IOA microcode error"},
356 {0x04448500, 0, 0,
357 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500358 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500360 {0x04448700, 0, 0,
361 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 {0x04490000, 0, 0,
363 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500364 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500366 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500368 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500370 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500372 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500374 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500375 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500376 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500378 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600379 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500380 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500381 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500382 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500383 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500384 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500385 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500386 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500387 "4110: Unsupported enclosure function"},
Brian King933916f2007-03-29 12:43:30 -0500388 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 "FFF4: Command to logical unit failed"},
390 {0x05240000, 1, 0,
391 "Illegal request, invalid request type or request packet"},
392 {0x05250000, 0, 0,
393 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600394 {0x05258000, 0, 0,
395 "Illegal request, commands not allowed to this device"},
396 {0x05258100, 0, 0,
397 "Illegal request, command not allowed to a secondary adapter"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800398 {0x05258200, 0, 0,
399 "Illegal request, command not allowed to a non-optimized resource"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 {0x05260000, 0, 0,
401 "Illegal request, invalid field in parameter list"},
402 {0x05260100, 0, 0,
403 "Illegal request, parameter not supported"},
404 {0x05260200, 0, 0,
405 "Illegal request, parameter value invalid"},
406 {0x052C0000, 0, 0,
407 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600408 {0x052C8000, 1, 0,
409 "Illegal request, dual adapter support not enabled"},
Brian King933916f2007-03-29 12:43:30 -0500410 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500412 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 "9040: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500414 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500415 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500416 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 "FFFB: SCSI bus was reset"},
418 {0x06290500, 0, 0,
419 "FFFE: SCSI bus transition to single ended"},
420 {0x06290600, 0, 0,
421 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500422 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500424 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 "3029: A device replacement has occurred"},
Brian King933916f2007-03-29 12:43:30 -0500426 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500428 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600429 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500430 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500432 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500434 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500436 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600437 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500438 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500439 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500440 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500441 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500442 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500443 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500444 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500445 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500446 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500447 "4050: Enclosure does not support a required multipath function"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800448 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500450 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500452 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500454 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500456 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600457 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500458 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600459 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500460 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 "9032: Array exposed but still protected"},
Brian Kinge4353402007-03-29 12:43:37 -0500462 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
463 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500464 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500465 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500466 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500467 "4060: Multipath redundancy level got worse"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 {0x07270000, 0, 0,
469 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500470 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500472 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500474 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500476 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500478 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500480 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500482 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500484 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500486 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500488 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500490 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500492 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500494 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500496 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500498 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500500 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500502 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500504 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500506 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500508 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 "9063: Maximum number of functional arrays has been exceeded"},
510 {0x0B260000, 0, 0,
511 "Aborted command, invalid descriptor"},
512 {0x0B5A0000, 0, 0,
513 "Command terminated by host"}
514};
515
516static const struct ipr_ses_table_entry ipr_ses_table[] = {
517 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
518 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
519 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
520 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
521 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
522 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
523 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
524 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
525 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
526 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
527 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
528 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
529 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
530};
531
532/*
533 * Function Prototypes
534 */
535static int ipr_reset_alert(struct ipr_cmnd *);
536static void ipr_process_ccn(struct ipr_cmnd *);
537static void ipr_process_error(struct ipr_cmnd *);
538static void ipr_reset_ioa_job(struct ipr_cmnd *);
539static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
540 enum ipr_shutdown_type);
541
542#ifdef CONFIG_SCSI_IPR_TRACE
543/**
544 * ipr_trc_hook - Add a trace entry to the driver trace
545 * @ipr_cmd: ipr command struct
546 * @type: trace type
547 * @add_data: additional data
548 *
549 * Return value:
550 * none
551 **/
552static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
553 u8 type, u32 add_data)
554{
555 struct ipr_trace_entry *trace_entry;
556 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
557
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600558 trace_entry = &ioa_cfg->trace[atomic_add_return
559 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 trace_entry->time = jiffies;
561 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
562 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800563 if (ipr_cmd->ioa_cfg->sis64)
564 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
565 else
566 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500567 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
569 trace_entry->u.add_data = add_data;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600570 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571}
572#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300573#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574#endif
575
576/**
Brian King172cd6e2012-07-17 08:14:40 -0500577 * ipr_lock_and_done - Acquire lock and complete command
578 * @ipr_cmd: ipr command struct
579 *
580 * Return value:
581 * none
582 **/
583static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
584{
585 unsigned long lock_flags;
586 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
587
588 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
589 ipr_cmd->done(ipr_cmd);
590 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
591}
592
593/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
595 * @ipr_cmd: ipr command struct
596 *
597 * Return value:
598 * none
599 **/
600static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
601{
602 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700603 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
604 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
Wayne Boyera32c0552010-02-19 13:23:36 -0800605 dma_addr_t dma_addr = ipr_cmd->dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600606 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600608 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600610 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
Wayne Boyera32c0552010-02-19 13:23:36 -0800611 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800613 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800615
Wayne Boyer96d21f02010-05-10 09:13:27 -0700616 if (ipr_cmd->ioa_cfg->sis64) {
Wayne Boyera32c0552010-02-19 13:23:36 -0800617 ioarcb->u.sis64_addr_data.data_ioadl_addr =
618 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
Wayne Boyer96d21f02010-05-10 09:13:27 -0700619 ioasa64->u.gata.status = 0;
620 } else {
Wayne Boyera32c0552010-02-19 13:23:36 -0800621 ioarcb->write_ioadl_addr =
622 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
623 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700624 ioasa->u.gata.status = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800625 }
626
Wayne Boyer96d21f02010-05-10 09:13:27 -0700627 ioasa->hdr.ioasc = 0;
628 ioasa->hdr.residual_data_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500630 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 ipr_cmd->sense_buffer[0] = 0;
632 ipr_cmd->dma_use_sg = 0;
633}
634
635/**
636 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
637 * @ipr_cmd: ipr command struct
638 *
639 * Return value:
640 * none
641 **/
Brian King172cd6e2012-07-17 08:14:40 -0500642static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
643 void (*fast_done) (struct ipr_cmnd *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644{
645 ipr_reinit_ipr_cmnd(ipr_cmd);
646 ipr_cmd->u.scratch = 0;
647 ipr_cmd->sibling = NULL;
Brian King172cd6e2012-07-17 08:14:40 -0500648 ipr_cmd->fast_done = fast_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 init_timer(&ipr_cmd->timer);
650}
651
652/**
Brian King00bfef22012-07-17 08:13:52 -0500653 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 * @ioa_cfg: ioa config struct
655 *
656 * Return value:
657 * pointer to ipr command struct
658 **/
659static
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600660struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600662 struct ipr_cmnd *ipr_cmd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600664 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
665 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
666 struct ipr_cmnd, queue);
667 list_del(&ipr_cmd->queue);
668 }
669
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
671 return ipr_cmd;
672}
673
674/**
Brian King00bfef22012-07-17 08:13:52 -0500675 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
676 * @ioa_cfg: ioa config struct
677 *
678 * Return value:
679 * pointer to ipr command struct
680 **/
681static
682struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
683{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600684 struct ipr_cmnd *ipr_cmd =
685 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
Brian King172cd6e2012-07-17 08:14:40 -0500686 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King00bfef22012-07-17 08:13:52 -0500687 return ipr_cmd;
688}
689
690/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
692 * @ioa_cfg: ioa config struct
693 * @clr_ints: interrupts to clear
694 *
695 * This function masks all interrupts on the adapter, then clears the
696 * interrupts specified in the mask
697 *
698 * Return value:
699 * none
700 **/
701static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
702 u32 clr_ints)
703{
704 volatile u32 int_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600705 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707 /* Stop new interrupts */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600708 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
709 spin_lock(&ioa_cfg->hrrq[i]._lock);
710 ioa_cfg->hrrq[i].allow_interrupts = 0;
711 spin_unlock(&ioa_cfg->hrrq[i]._lock);
712 }
713 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
715 /* Set interrupt mask to stop all new interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800716 if (ioa_cfg->sis64)
717 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
718 else
719 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721 /* Clear any pending interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800722 if (ioa_cfg->sis64)
723 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
724 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
726}
727
728/**
729 * ipr_save_pcix_cmd_reg - Save PCI-X command register
730 * @ioa_cfg: ioa config struct
731 *
732 * Return value:
733 * 0 on success / -EIO on failure
734 **/
735static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
736{
737 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
738
Brian King7dce0e12007-01-23 11:25:30 -0600739 if (pcix_cmd_reg == 0)
740 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
742 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
743 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
744 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
745 return -EIO;
746 }
747
748 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
749 return 0;
750}
751
752/**
753 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
754 * @ioa_cfg: ioa config struct
755 *
756 * Return value:
757 * 0 on success / -EIO on failure
758 **/
759static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
760{
761 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
762
763 if (pcix_cmd_reg) {
764 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
765 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
766 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
767 return -EIO;
768 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 }
770
771 return 0;
772}
773
774/**
Brian King35a39692006-09-25 12:39:20 -0500775 * ipr_sata_eh_done - done function for aborted SATA commands
776 * @ipr_cmd: ipr command struct
777 *
778 * This function is invoked for ops generated to SATA
779 * devices which are being aborted.
780 *
781 * Return value:
782 * none
783 **/
784static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
785{
Brian King35a39692006-09-25 12:39:20 -0500786 struct ata_queued_cmd *qc = ipr_cmd->qc;
787 struct ipr_sata_port *sata_port = qc->ap->private_data;
788
789 qc->err_mask |= AC_ERR_OTHER;
790 sata_port->ioasa.status |= ATA_BUSY;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600791 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King35a39692006-09-25 12:39:20 -0500792 ata_qc_complete(qc);
793}
794
795/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 * ipr_scsi_eh_done - mid-layer done function for aborted ops
797 * @ipr_cmd: ipr command struct
798 *
799 * This function is invoked by the interrupt handler for
800 * ops generated by the SCSI mid-layer which are being aborted.
801 *
802 * Return value:
803 * none
804 **/
805static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
806{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
808
809 scsi_cmd->result |= (DID_ERROR << 16);
810
FUJITA Tomonori63015bc2007-05-26 00:26:59 +0900811 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600813 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814}
815
816/**
817 * ipr_fail_all_ops - Fails all outstanding ops.
818 * @ioa_cfg: ioa config struct
819 *
820 * This function fails all outstanding ops.
821 *
822 * Return value:
823 * none
824 **/
825static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
826{
827 struct ipr_cmnd *ipr_cmd, *temp;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600828 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
830 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600831 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600832 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600833 list_for_each_entry_safe(ipr_cmd,
834 temp, &hrrq->hrrq_pending_q, queue) {
835 list_del(&ipr_cmd->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600837 ipr_cmd->s.ioasa.hdr.ioasc =
838 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
839 ipr_cmd->s.ioasa.hdr.ilid =
840 cpu_to_be32(IPR_DRIVER_ILID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600842 if (ipr_cmd->scsi_cmd)
843 ipr_cmd->done = ipr_scsi_eh_done;
844 else if (ipr_cmd->qc)
845 ipr_cmd->done = ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600847 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
848 IPR_IOASC_IOA_WAS_RESET);
849 del_timer(&ipr_cmd->timer);
850 ipr_cmd->done(ipr_cmd);
851 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600852 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 LEAVE;
855}
856
857/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800858 * ipr_send_command - Send driver initiated requests.
859 * @ipr_cmd: ipr command struct
860 *
861 * This function sends a command to the adapter using the correct write call.
862 * In the case of sis64, calculate the ioarcb size required. Then or in the
863 * appropriate bits.
864 *
865 * Return value:
866 * none
867 **/
868static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
869{
870 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
871 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
872
873 if (ioa_cfg->sis64) {
874 /* The default size is 256 bytes */
875 send_dma_addr |= 0x1;
876
877 /* If the number of ioadls * size of ioadl > 128 bytes,
878 then use a 512 byte ioarcb */
879 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
880 send_dma_addr |= 0x4;
881 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
882 } else
883 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
884}
885
886/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 * ipr_do_req - Send driver initiated requests.
888 * @ipr_cmd: ipr command struct
889 * @done: done function
890 * @timeout_func: timeout function
891 * @timeout: timeout value
892 *
893 * This function sends the specified command to the adapter with the
894 * timeout given. The done function is invoked on command completion.
895 *
896 * Return value:
897 * none
898 **/
899static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
900 void (*done) (struct ipr_cmnd *),
901 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
902{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600903 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
905 ipr_cmd->done = done;
906
907 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
908 ipr_cmd->timer.expires = jiffies + timeout;
909 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
910
911 add_timer(&ipr_cmd->timer);
912
913 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
914
Wayne Boyera32c0552010-02-19 13:23:36 -0800915 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916}
917
918/**
919 * ipr_internal_cmd_done - Op done function for an internally generated op.
920 * @ipr_cmd: ipr command struct
921 *
922 * This function is the op done function for an internally generated,
923 * blocking op. It simply wakes the sleeping thread.
924 *
925 * Return value:
926 * none
927 **/
928static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
929{
930 if (ipr_cmd->sibling)
931 ipr_cmd->sibling = NULL;
932 else
933 complete(&ipr_cmd->completion);
934}
935
936/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800937 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
938 * @ipr_cmd: ipr command struct
939 * @dma_addr: dma address
940 * @len: transfer length
941 * @flags: ioadl flag value
942 *
943 * This function initializes an ioadl in the case where there is only a single
944 * descriptor.
945 *
946 * Return value:
947 * nothing
948 **/
949static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
950 u32 len, int flags)
951{
952 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
953 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
954
955 ipr_cmd->dma_use_sg = 1;
956
957 if (ipr_cmd->ioa_cfg->sis64) {
958 ioadl64->flags = cpu_to_be32(flags);
959 ioadl64->data_len = cpu_to_be32(len);
960 ioadl64->address = cpu_to_be64(dma_addr);
961
962 ipr_cmd->ioarcb.ioadl_len =
963 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
964 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
965 } else {
966 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
967 ioadl->address = cpu_to_be32(dma_addr);
968
969 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
970 ipr_cmd->ioarcb.read_ioadl_len =
971 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
972 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
973 } else {
974 ipr_cmd->ioarcb.ioadl_len =
975 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
976 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
977 }
978 }
979}
980
981/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 * ipr_send_blocking_cmd - Send command and sleep on its completion.
983 * @ipr_cmd: ipr command struct
984 * @timeout_func: function to invoke if command times out
985 * @timeout: timeout
986 *
987 * Return value:
988 * none
989 **/
990static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
991 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
992 u32 timeout)
993{
994 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
995
996 init_completion(&ipr_cmd->completion);
997 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
998
999 spin_unlock_irq(ioa_cfg->host->host_lock);
1000 wait_for_completion(&ipr_cmd->completion);
1001 spin_lock_irq(ioa_cfg->host->host_lock);
1002}
1003
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001004static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1005{
1006 if (ioa_cfg->hrrq_num == 1)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001007 return 0;
1008 else
1009 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001010}
1011
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012/**
1013 * ipr_send_hcam - Send an HCAM to the adapter.
1014 * @ioa_cfg: ioa config struct
1015 * @type: HCAM type
1016 * @hostrcb: hostrcb struct
1017 *
1018 * This function will send a Host Controlled Async command to the adapter.
1019 * If HCAMs are currently not allowed to be issued to the adapter, it will
1020 * place the hostrcb on the free queue.
1021 *
1022 * Return value:
1023 * none
1024 **/
1025static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1026 struct ipr_hostrcb *hostrcb)
1027{
1028 struct ipr_cmnd *ipr_cmd;
1029 struct ipr_ioarcb *ioarcb;
1030
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001031 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001033 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1035
1036 ipr_cmd->u.hostrcb = hostrcb;
1037 ioarcb = &ipr_cmd->ioarcb;
1038
1039 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1040 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1041 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1042 ioarcb->cmd_pkt.cdb[1] = type;
1043 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1044 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1045
Wayne Boyera32c0552010-02-19 13:23:36 -08001046 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1047 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
1049 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1050 ipr_cmd->done = ipr_process_ccn;
1051 else
1052 ipr_cmd->done = ipr_process_error;
1053
1054 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1055
Wayne Boyera32c0552010-02-19 13:23:36 -08001056 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 } else {
1058 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1059 }
1060}
1061
1062/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001063 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001065 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 *
1067 * Return value:
1068 * none
1069 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001070static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03001072 switch (proto) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001073 case IPR_PROTO_SATA:
1074 case IPR_PROTO_SAS_STP:
1075 res->ata_class = ATA_DEV_ATA;
1076 break;
1077 case IPR_PROTO_SATA_ATAPI:
1078 case IPR_PROTO_SAS_STP_ATAPI:
1079 res->ata_class = ATA_DEV_ATAPI;
1080 break;
1081 default:
1082 res->ata_class = ATA_DEV_UNKNOWN;
1083 break;
1084 };
1085}
1086
1087/**
1088 * ipr_init_res_entry - Initialize a resource entry struct.
1089 * @res: resource entry struct
1090 * @cfgtew: config table entry wrapper struct
1091 *
1092 * Return value:
1093 * none
1094 **/
1095static void ipr_init_res_entry(struct ipr_resource_entry *res,
1096 struct ipr_config_table_entry_wrapper *cfgtew)
1097{
1098 int found = 0;
1099 unsigned int proto;
1100 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1101 struct ipr_resource_entry *gscsi_res = NULL;
1102
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06001103 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 res->in_erp = 0;
1105 res->add_to_ml = 0;
1106 res->del_from_ml = 0;
1107 res->resetting_device = 0;
1108 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05001109 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001110
1111 if (ioa_cfg->sis64) {
1112 proto = cfgtew->u.cfgte64->proto;
1113 res->res_flags = cfgtew->u.cfgte64->res_flags;
1114 res->qmodel = IPR_QUEUEING_MODEL64(res);
Wayne Boyer438b0332010-05-10 09:13:00 -07001115 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001116
1117 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1118 sizeof(res->res_path));
1119
1120 res->bus = 0;
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001121 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1122 sizeof(res->dev_lun.scsi_lun));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001123 res->lun = scsilun_to_int(&res->dev_lun);
1124
1125 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1126 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1127 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1128 found = 1;
1129 res->target = gscsi_res->target;
1130 break;
1131 }
1132 }
1133 if (!found) {
1134 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1135 ioa_cfg->max_devs_supported);
1136 set_bit(res->target, ioa_cfg->target_ids);
1137 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001138 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1139 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1140 res->target = 0;
1141 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1142 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1143 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1144 ioa_cfg->max_devs_supported);
1145 set_bit(res->target, ioa_cfg->array_ids);
1146 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1147 res->bus = IPR_VSET_VIRTUAL_BUS;
1148 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1149 ioa_cfg->max_devs_supported);
1150 set_bit(res->target, ioa_cfg->vset_ids);
1151 } else {
1152 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1153 ioa_cfg->max_devs_supported);
1154 set_bit(res->target, ioa_cfg->target_ids);
1155 }
1156 } else {
1157 proto = cfgtew->u.cfgte->proto;
1158 res->qmodel = IPR_QUEUEING_MODEL(res);
1159 res->flags = cfgtew->u.cfgte->flags;
1160 if (res->flags & IPR_IS_IOA_RESOURCE)
1161 res->type = IPR_RES_TYPE_IOAFP;
1162 else
1163 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1164
1165 res->bus = cfgtew->u.cfgte->res_addr.bus;
1166 res->target = cfgtew->u.cfgte->res_addr.target;
1167 res->lun = cfgtew->u.cfgte->res_addr.lun;
Wayne Boyer46d74562010-08-11 07:15:17 -07001168 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001169 }
1170
1171 ipr_update_ata_class(res, proto);
1172}
1173
1174/**
1175 * ipr_is_same_device - Determine if two devices are the same.
1176 * @res: resource entry struct
1177 * @cfgtew: config table entry wrapper struct
1178 *
1179 * Return value:
1180 * 1 if the devices are the same / 0 otherwise
1181 **/
1182static int ipr_is_same_device(struct ipr_resource_entry *res,
1183 struct ipr_config_table_entry_wrapper *cfgtew)
1184{
1185 if (res->ioa_cfg->sis64) {
1186 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1187 sizeof(cfgtew->u.cfgte64->dev_id)) &&
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001188 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001189 sizeof(cfgtew->u.cfgte64->lun))) {
1190 return 1;
1191 }
1192 } else {
1193 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1194 res->target == cfgtew->u.cfgte->res_addr.target &&
1195 res->lun == cfgtew->u.cfgte->res_addr.lun)
1196 return 1;
1197 }
1198
1199 return 0;
1200}
1201
1202/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001203 * __ipr_format_res_path - Format the resource path for printing.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001204 * @res_path: resource path
1205 * @buf: buffer
Brian Kingb3b3b402013-01-11 17:43:49 -06001206 * @len: length of buffer provided
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001207 *
1208 * Return value:
1209 * pointer to buffer
1210 **/
Brian Kingb3b3b402013-01-11 17:43:49 -06001211static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001212{
1213 int i;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001214 char *p = buffer;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001215
Wayne Boyer46d74562010-08-11 07:15:17 -07001216 *p = '\0';
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001217 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1218 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1219 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001220
1221 return buffer;
1222}
1223
1224/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001225 * ipr_format_res_path - Format the resource path for printing.
1226 * @ioa_cfg: ioa config struct
1227 * @res_path: resource path
1228 * @buf: buffer
1229 * @len: length of buffer provided
1230 *
1231 * Return value:
1232 * pointer to buffer
1233 **/
1234static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1235 u8 *res_path, char *buffer, int len)
1236{
1237 char *p = buffer;
1238
1239 *p = '\0';
1240 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1241 __ipr_format_res_path(res_path, p, len - (buffer - p));
1242 return buffer;
1243}
1244
1245/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001246 * ipr_update_res_entry - Update the resource entry.
1247 * @res: resource entry struct
1248 * @cfgtew: config table entry wrapper struct
1249 *
1250 * Return value:
1251 * none
1252 **/
1253static void ipr_update_res_entry(struct ipr_resource_entry *res,
1254 struct ipr_config_table_entry_wrapper *cfgtew)
1255{
1256 char buffer[IPR_MAX_RES_PATH_LENGTH];
1257 unsigned int proto;
1258 int new_path = 0;
1259
1260 if (res->ioa_cfg->sis64) {
1261 res->flags = cfgtew->u.cfgte64->flags;
1262 res->res_flags = cfgtew->u.cfgte64->res_flags;
Wayne Boyer75576bb2010-07-14 10:50:14 -07001263 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001264
1265 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1266 sizeof(struct ipr_std_inq_data));
1267
1268 res->qmodel = IPR_QUEUEING_MODEL64(res);
1269 proto = cfgtew->u.cfgte64->proto;
1270 res->res_handle = cfgtew->u.cfgte64->res_handle;
1271 res->dev_id = cfgtew->u.cfgte64->dev_id;
1272
1273 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1274 sizeof(res->dev_lun.scsi_lun));
1275
1276 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1277 sizeof(res->res_path))) {
1278 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1279 sizeof(res->res_path));
1280 new_path = 1;
1281 }
1282
1283 if (res->sdev && new_path)
1284 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06001285 ipr_format_res_path(res->ioa_cfg,
1286 res->res_path, buffer, sizeof(buffer)));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001287 } else {
1288 res->flags = cfgtew->u.cfgte->flags;
1289 if (res->flags & IPR_IS_IOA_RESOURCE)
1290 res->type = IPR_RES_TYPE_IOAFP;
1291 else
1292 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1293
1294 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1295 sizeof(struct ipr_std_inq_data));
1296
1297 res->qmodel = IPR_QUEUEING_MODEL(res);
1298 proto = cfgtew->u.cfgte->proto;
1299 res->res_handle = cfgtew->u.cfgte->res_handle;
1300 }
1301
1302 ipr_update_ata_class(res, proto);
1303}
1304
1305/**
1306 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1307 * for the resource.
1308 * @res: resource entry struct
1309 * @cfgtew: config table entry wrapper struct
1310 *
1311 * Return value:
1312 * none
1313 **/
1314static void ipr_clear_res_target(struct ipr_resource_entry *res)
1315{
1316 struct ipr_resource_entry *gscsi_res = NULL;
1317 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1318
1319 if (!ioa_cfg->sis64)
1320 return;
1321
1322 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1323 clear_bit(res->target, ioa_cfg->array_ids);
1324 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1325 clear_bit(res->target, ioa_cfg->vset_ids);
1326 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1327 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1328 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1329 return;
1330 clear_bit(res->target, ioa_cfg->target_ids);
1331
1332 } else if (res->bus == 0)
1333 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334}
1335
1336/**
1337 * ipr_handle_config_change - Handle a config change from the adapter
1338 * @ioa_cfg: ioa config struct
1339 * @hostrcb: hostrcb
1340 *
1341 * Return value:
1342 * none
1343 **/
1344static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001345 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346{
1347 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001348 struct ipr_config_table_entry_wrapper cfgtew;
1349 __be32 cc_res_handle;
1350
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 u32 is_ndn = 1;
1352
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001353 if (ioa_cfg->sis64) {
1354 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1355 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1356 } else {
1357 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1358 cc_res_handle = cfgtew.u.cfgte->res_handle;
1359 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
1361 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001362 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 is_ndn = 0;
1364 break;
1365 }
1366 }
1367
1368 if (is_ndn) {
1369 if (list_empty(&ioa_cfg->free_res_q)) {
1370 ipr_send_hcam(ioa_cfg,
1371 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1372 hostrcb);
1373 return;
1374 }
1375
1376 res = list_entry(ioa_cfg->free_res_q.next,
1377 struct ipr_resource_entry, queue);
1378
1379 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001380 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1382 }
1383
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001384 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
1386 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1387 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001389 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 if (ioa_cfg->allow_ml_add_del)
1391 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001392 } else {
1393 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001395 }
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02001396 } else if (!res->sdev || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 res->add_to_ml = 1;
1398 if (ioa_cfg->allow_ml_add_del)
1399 schedule_work(&ioa_cfg->work_q);
1400 }
1401
1402 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1403}
1404
1405/**
1406 * ipr_process_ccn - Op done function for a CCN.
1407 * @ipr_cmd: ipr command struct
1408 *
1409 * This function is the op done function for a configuration
1410 * change notification host controlled async from the adapter.
1411 *
1412 * Return value:
1413 * none
1414 **/
1415static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1416{
1417 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1418 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07001419 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
1421 list_del(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001422 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
1424 if (ioasc) {
1425 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1426 dev_err(&ioa_cfg->pdev->dev,
1427 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1428
1429 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1430 } else {
1431 ipr_handle_config_change(ioa_cfg, hostrcb);
1432 }
1433}
1434
1435/**
Brian King8cf093e2007-04-26 16:00:14 -05001436 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1437 * @i: index into buffer
1438 * @buf: string to modify
1439 *
1440 * This function will strip all trailing whitespace, pad the end
1441 * of the string with a single space, and NULL terminate the string.
1442 *
1443 * Return value:
1444 * new length of string
1445 **/
1446static int strip_and_pad_whitespace(int i, char *buf)
1447{
1448 while (i && buf[i] == ' ')
1449 i--;
1450 buf[i+1] = ' ';
1451 buf[i+2] = '\0';
1452 return i + 2;
1453}
1454
1455/**
1456 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1457 * @prefix: string to print at start of printk
1458 * @hostrcb: hostrcb pointer
1459 * @vpd: vendor/product id/sn struct
1460 *
1461 * Return value:
1462 * none
1463 **/
1464static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1465 struct ipr_vpd *vpd)
1466{
1467 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1468 int i = 0;
1469
1470 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1471 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1472
1473 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1474 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1475
1476 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1477 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1478
1479 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1480}
1481
1482/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001484 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 *
1486 * Return value:
1487 * none
1488 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001489static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490{
1491 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1492 + IPR_SERIAL_NUM_LEN];
1493
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001494 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1495 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 IPR_PROD_ID_LEN);
1497 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1498 ipr_err("Vendor/Product ID: %s\n", buffer);
1499
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001500 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1502 ipr_err(" Serial Number: %s\n", buffer);
1503}
1504
1505/**
Brian King8cf093e2007-04-26 16:00:14 -05001506 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1507 * @prefix: string to print at start of printk
1508 * @hostrcb: hostrcb pointer
1509 * @vpd: vendor/product id/sn/wwn struct
1510 *
1511 * Return value:
1512 * none
1513 **/
1514static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1515 struct ipr_ext_vpd *vpd)
1516{
1517 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1518 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1519 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1520}
1521
1522/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001523 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1524 * @vpd: vendor/product id/sn/wwn struct
1525 *
1526 * Return value:
1527 * none
1528 **/
1529static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1530{
1531 ipr_log_vpd(&vpd->vpd);
1532 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1533 be32_to_cpu(vpd->wwid[1]));
1534}
1535
1536/**
1537 * ipr_log_enhanced_cache_error - Log a cache error.
1538 * @ioa_cfg: ioa config struct
1539 * @hostrcb: hostrcb struct
1540 *
1541 * Return value:
1542 * none
1543 **/
1544static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1545 struct ipr_hostrcb *hostrcb)
1546{
Wayne Boyer4565e372010-02-19 13:24:07 -08001547 struct ipr_hostrcb_type_12_error *error;
1548
1549 if (ioa_cfg->sis64)
1550 error = &hostrcb->hcam.u.error64.u.type_12_error;
1551 else
1552 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001553
1554 ipr_err("-----Current Configuration-----\n");
1555 ipr_err("Cache Directory Card Information:\n");
1556 ipr_log_ext_vpd(&error->ioa_vpd);
1557 ipr_err("Adapter Card Information:\n");
1558 ipr_log_ext_vpd(&error->cfc_vpd);
1559
1560 ipr_err("-----Expected Configuration-----\n");
1561 ipr_err("Cache Directory Card Information:\n");
1562 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1563 ipr_err("Adapter Card Information:\n");
1564 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1565
1566 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1567 be32_to_cpu(error->ioa_data[0]),
1568 be32_to_cpu(error->ioa_data[1]),
1569 be32_to_cpu(error->ioa_data[2]));
1570}
1571
1572/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 * ipr_log_cache_error - Log a cache error.
1574 * @ioa_cfg: ioa config struct
1575 * @hostrcb: hostrcb struct
1576 *
1577 * Return value:
1578 * none
1579 **/
1580static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1581 struct ipr_hostrcb *hostrcb)
1582{
1583 struct ipr_hostrcb_type_02_error *error =
1584 &hostrcb->hcam.u.error.u.type_02_error;
1585
1586 ipr_err("-----Current Configuration-----\n");
1587 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001588 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001590 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591
1592 ipr_err("-----Expected Configuration-----\n");
1593 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001594 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001596 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597
1598 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1599 be32_to_cpu(error->ioa_data[0]),
1600 be32_to_cpu(error->ioa_data[1]),
1601 be32_to_cpu(error->ioa_data[2]));
1602}
1603
1604/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001605 * ipr_log_enhanced_config_error - Log a configuration error.
1606 * @ioa_cfg: ioa config struct
1607 * @hostrcb: hostrcb struct
1608 *
1609 * Return value:
1610 * none
1611 **/
1612static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1613 struct ipr_hostrcb *hostrcb)
1614{
1615 int errors_logged, i;
1616 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1617 struct ipr_hostrcb_type_13_error *error;
1618
1619 error = &hostrcb->hcam.u.error.u.type_13_error;
1620 errors_logged = be32_to_cpu(error->errors_logged);
1621
1622 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1623 be32_to_cpu(error->errors_detected), errors_logged);
1624
1625 dev_entry = error->dev;
1626
1627 for (i = 0; i < errors_logged; i++, dev_entry++) {
1628 ipr_err_separator;
1629
1630 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1631 ipr_log_ext_vpd(&dev_entry->vpd);
1632
1633 ipr_err("-----New Device Information-----\n");
1634 ipr_log_ext_vpd(&dev_entry->new_vpd);
1635
1636 ipr_err("Cache Directory Card Information:\n");
1637 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1638
1639 ipr_err("Adapter Card Information:\n");
1640 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1641 }
1642}
1643
1644/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001645 * ipr_log_sis64_config_error - Log a device error.
1646 * @ioa_cfg: ioa config struct
1647 * @hostrcb: hostrcb struct
1648 *
1649 * Return value:
1650 * none
1651 **/
1652static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1653 struct ipr_hostrcb *hostrcb)
1654{
1655 int errors_logged, i;
1656 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1657 struct ipr_hostrcb_type_23_error *error;
1658 char buffer[IPR_MAX_RES_PATH_LENGTH];
1659
1660 error = &hostrcb->hcam.u.error64.u.type_23_error;
1661 errors_logged = be32_to_cpu(error->errors_logged);
1662
1663 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1664 be32_to_cpu(error->errors_detected), errors_logged);
1665
1666 dev_entry = error->dev;
1667
1668 for (i = 0; i < errors_logged; i++, dev_entry++) {
1669 ipr_err_separator;
1670
1671 ipr_err("Device %d : %s", i + 1,
Brian Kingb3b3b402013-01-11 17:43:49 -06001672 __ipr_format_res_path(dev_entry->res_path,
1673 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08001674 ipr_log_ext_vpd(&dev_entry->vpd);
1675
1676 ipr_err("-----New Device Information-----\n");
1677 ipr_log_ext_vpd(&dev_entry->new_vpd);
1678
1679 ipr_err("Cache Directory Card Information:\n");
1680 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1681
1682 ipr_err("Adapter Card Information:\n");
1683 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1684 }
1685}
1686
1687/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 * ipr_log_config_error - Log a configuration error.
1689 * @ioa_cfg: ioa config struct
1690 * @hostrcb: hostrcb struct
1691 *
1692 * Return value:
1693 * none
1694 **/
1695static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1696 struct ipr_hostrcb *hostrcb)
1697{
1698 int errors_logged, i;
1699 struct ipr_hostrcb_device_data_entry *dev_entry;
1700 struct ipr_hostrcb_type_03_error *error;
1701
1702 error = &hostrcb->hcam.u.error.u.type_03_error;
1703 errors_logged = be32_to_cpu(error->errors_logged);
1704
1705 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1706 be32_to_cpu(error->errors_detected), errors_logged);
1707
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001708 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
1710 for (i = 0; i < errors_logged; i++, dev_entry++) {
1711 ipr_err_separator;
1712
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001713 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001714 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
1716 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001717 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
1719 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001720 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
1722 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001723 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
1725 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1726 be32_to_cpu(dev_entry->ioa_data[0]),
1727 be32_to_cpu(dev_entry->ioa_data[1]),
1728 be32_to_cpu(dev_entry->ioa_data[2]),
1729 be32_to_cpu(dev_entry->ioa_data[3]),
1730 be32_to_cpu(dev_entry->ioa_data[4]));
1731 }
1732}
1733
1734/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001735 * ipr_log_enhanced_array_error - Log an array configuration error.
1736 * @ioa_cfg: ioa config struct
1737 * @hostrcb: hostrcb struct
1738 *
1739 * Return value:
1740 * none
1741 **/
1742static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1743 struct ipr_hostrcb *hostrcb)
1744{
1745 int i, num_entries;
1746 struct ipr_hostrcb_type_14_error *error;
1747 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1748 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1749
1750 error = &hostrcb->hcam.u.error.u.type_14_error;
1751
1752 ipr_err_separator;
1753
1754 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1755 error->protection_level,
1756 ioa_cfg->host->host_no,
1757 error->last_func_vset_res_addr.bus,
1758 error->last_func_vset_res_addr.target,
1759 error->last_func_vset_res_addr.lun);
1760
1761 ipr_err_separator;
1762
1763 array_entry = error->array_member;
1764 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
Wayne Boyer72620262010-09-27 10:45:28 -07001765 ARRAY_SIZE(error->array_member));
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001766
1767 for (i = 0; i < num_entries; i++, array_entry++) {
1768 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1769 continue;
1770
1771 if (be32_to_cpu(error->exposed_mode_adn) == i)
1772 ipr_err("Exposed Array Member %d:\n", i);
1773 else
1774 ipr_err("Array Member %d:\n", i);
1775
1776 ipr_log_ext_vpd(&array_entry->vpd);
1777 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1778 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1779 "Expected Location");
1780
1781 ipr_err_separator;
1782 }
1783}
1784
1785/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 * ipr_log_array_error - Log an array configuration error.
1787 * @ioa_cfg: ioa config struct
1788 * @hostrcb: hostrcb struct
1789 *
1790 * Return value:
1791 * none
1792 **/
1793static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1794 struct ipr_hostrcb *hostrcb)
1795{
1796 int i;
1797 struct ipr_hostrcb_type_04_error *error;
1798 struct ipr_hostrcb_array_data_entry *array_entry;
1799 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1800
1801 error = &hostrcb->hcam.u.error.u.type_04_error;
1802
1803 ipr_err_separator;
1804
1805 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1806 error->protection_level,
1807 ioa_cfg->host->host_no,
1808 error->last_func_vset_res_addr.bus,
1809 error->last_func_vset_res_addr.target,
1810 error->last_func_vset_res_addr.lun);
1811
1812 ipr_err_separator;
1813
1814 array_entry = error->array_member;
1815
1816 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001817 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 continue;
1819
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001820 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001822 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001825 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001827 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1828 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1829 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830
1831 ipr_err_separator;
1832
1833 if (i == 9)
1834 array_entry = error->array_member2;
1835 else
1836 array_entry++;
1837 }
1838}
1839
1840/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001841 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001842 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001843 * @data: IOA error data
1844 * @len: data length
1845 *
1846 * Return value:
1847 * none
1848 **/
Brian Kingac719ab2006-11-21 10:28:42 -06001849static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001850{
1851 int i;
1852
1853 if (len == 0)
1854 return;
1855
Brian Kingac719ab2006-11-21 10:28:42 -06001856 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1857 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1858
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001859 for (i = 0; i < len / 4; i += 4) {
1860 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1861 be32_to_cpu(data[i]),
1862 be32_to_cpu(data[i+1]),
1863 be32_to_cpu(data[i+2]),
1864 be32_to_cpu(data[i+3]));
1865 }
1866}
1867
1868/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001869 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1870 * @ioa_cfg: ioa config struct
1871 * @hostrcb: hostrcb struct
1872 *
1873 * Return value:
1874 * none
1875 **/
1876static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1877 struct ipr_hostrcb *hostrcb)
1878{
1879 struct ipr_hostrcb_type_17_error *error;
1880
Wayne Boyer4565e372010-02-19 13:24:07 -08001881 if (ioa_cfg->sis64)
1882 error = &hostrcb->hcam.u.error64.u.type_17_error;
1883 else
1884 error = &hostrcb->hcam.u.error.u.type_17_error;
1885
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001886 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001887 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001888
Brian King8cf093e2007-04-26 16:00:14 -05001889 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1890 be32_to_cpu(hostrcb->hcam.u.error.prc));
1891 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001892 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001893 be32_to_cpu(hostrcb->hcam.length) -
1894 (offsetof(struct ipr_hostrcb_error, u) +
1895 offsetof(struct ipr_hostrcb_type_17_error, data)));
1896}
1897
1898/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001899 * ipr_log_dual_ioa_error - Log a dual adapter error.
1900 * @ioa_cfg: ioa config struct
1901 * @hostrcb: hostrcb struct
1902 *
1903 * Return value:
1904 * none
1905 **/
1906static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1907 struct ipr_hostrcb *hostrcb)
1908{
1909 struct ipr_hostrcb_type_07_error *error;
1910
1911 error = &hostrcb->hcam.u.error.u.type_07_error;
1912 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001913 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001914
Brian King8cf093e2007-04-26 16:00:14 -05001915 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1916 be32_to_cpu(hostrcb->hcam.u.error.prc));
1917 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001918 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001919 be32_to_cpu(hostrcb->hcam.length) -
1920 (offsetof(struct ipr_hostrcb_error, u) +
1921 offsetof(struct ipr_hostrcb_type_07_error, data)));
1922}
1923
Brian King49dc6a12006-11-21 10:28:35 -06001924static const struct {
1925 u8 active;
1926 char *desc;
1927} path_active_desc[] = {
1928 { IPR_PATH_NO_INFO, "Path" },
1929 { IPR_PATH_ACTIVE, "Active path" },
1930 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1931};
1932
1933static const struct {
1934 u8 state;
1935 char *desc;
1936} path_state_desc[] = {
1937 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1938 { IPR_PATH_HEALTHY, "is healthy" },
1939 { IPR_PATH_DEGRADED, "is degraded" },
1940 { IPR_PATH_FAILED, "is failed" }
1941};
1942
1943/**
1944 * ipr_log_fabric_path - Log a fabric path error
1945 * @hostrcb: hostrcb struct
1946 * @fabric: fabric descriptor
1947 *
1948 * Return value:
1949 * none
1950 **/
1951static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1952 struct ipr_hostrcb_fabric_desc *fabric)
1953{
1954 int i, j;
1955 u8 path_state = fabric->path_state;
1956 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1957 u8 state = path_state & IPR_PATH_STATE_MASK;
1958
1959 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1960 if (path_active_desc[i].active != active)
1961 continue;
1962
1963 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1964 if (path_state_desc[j].state != state)
1965 continue;
1966
1967 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1968 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1969 path_active_desc[i].desc, path_state_desc[j].desc,
1970 fabric->ioa_port);
1971 } else if (fabric->cascaded_expander == 0xff) {
1972 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1973 path_active_desc[i].desc, path_state_desc[j].desc,
1974 fabric->ioa_port, fabric->phy);
1975 } else if (fabric->phy == 0xff) {
1976 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1977 path_active_desc[i].desc, path_state_desc[j].desc,
1978 fabric->ioa_port, fabric->cascaded_expander);
1979 } else {
1980 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1981 path_active_desc[i].desc, path_state_desc[j].desc,
1982 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1983 }
1984 return;
1985 }
1986 }
1987
1988 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1989 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1990}
1991
Wayne Boyer4565e372010-02-19 13:24:07 -08001992/**
1993 * ipr_log64_fabric_path - Log a fabric path error
1994 * @hostrcb: hostrcb struct
1995 * @fabric: fabric descriptor
1996 *
1997 * Return value:
1998 * none
1999 **/
2000static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2001 struct ipr_hostrcb64_fabric_desc *fabric)
2002{
2003 int i, j;
2004 u8 path_state = fabric->path_state;
2005 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2006 u8 state = path_state & IPR_PATH_STATE_MASK;
2007 char buffer[IPR_MAX_RES_PATH_LENGTH];
2008
2009 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2010 if (path_active_desc[i].active != active)
2011 continue;
2012
2013 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2014 if (path_state_desc[j].state != state)
2015 continue;
2016
2017 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2018 path_active_desc[i].desc, path_state_desc[j].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002019 ipr_format_res_path(hostrcb->ioa_cfg,
2020 fabric->res_path,
2021 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002022 return;
2023 }
2024 }
2025
2026 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
Brian Kingb3b3b402013-01-11 17:43:49 -06002027 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2028 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002029}
2030
Brian King49dc6a12006-11-21 10:28:35 -06002031static const struct {
2032 u8 type;
2033 char *desc;
2034} path_type_desc[] = {
2035 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2036 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2037 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2038 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2039};
2040
2041static const struct {
2042 u8 status;
2043 char *desc;
2044} path_status_desc[] = {
2045 { IPR_PATH_CFG_NO_PROB, "Functional" },
2046 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2047 { IPR_PATH_CFG_FAILED, "Failed" },
2048 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2049 { IPR_PATH_NOT_DETECTED, "Missing" },
2050 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2051};
2052
2053static const char *link_rate[] = {
2054 "unknown",
2055 "disabled",
2056 "phy reset problem",
2057 "spinup hold",
2058 "port selector",
2059 "unknown",
2060 "unknown",
2061 "unknown",
2062 "1.5Gbps",
2063 "3.0Gbps",
2064 "unknown",
2065 "unknown",
2066 "unknown",
2067 "unknown",
2068 "unknown",
2069 "unknown"
2070};
2071
2072/**
2073 * ipr_log_path_elem - Log a fabric path element.
2074 * @hostrcb: hostrcb struct
2075 * @cfg: fabric path element struct
2076 *
2077 * Return value:
2078 * none
2079 **/
2080static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2081 struct ipr_hostrcb_config_element *cfg)
2082{
2083 int i, j;
2084 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2085 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2086
2087 if (type == IPR_PATH_CFG_NOT_EXIST)
2088 return;
2089
2090 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2091 if (path_type_desc[i].type != type)
2092 continue;
2093
2094 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2095 if (path_status_desc[j].status != status)
2096 continue;
2097
2098 if (type == IPR_PATH_CFG_IOA_PORT) {
2099 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2100 path_status_desc[j].desc, path_type_desc[i].desc,
2101 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2102 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2103 } else {
2104 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2105 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2106 path_status_desc[j].desc, path_type_desc[i].desc,
2107 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2108 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2109 } else if (cfg->cascaded_expander == 0xff) {
2110 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2111 "WWN=%08X%08X\n", path_status_desc[j].desc,
2112 path_type_desc[i].desc, cfg->phy,
2113 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2114 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2115 } else if (cfg->phy == 0xff) {
2116 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2117 "WWN=%08X%08X\n", path_status_desc[j].desc,
2118 path_type_desc[i].desc, cfg->cascaded_expander,
2119 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2120 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2121 } else {
2122 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2123 "WWN=%08X%08X\n", path_status_desc[j].desc,
2124 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2125 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2126 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2127 }
2128 }
2129 return;
2130 }
2131 }
2132
2133 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2134 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2135 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2136 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2137}
2138
2139/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002140 * ipr_log64_path_elem - Log a fabric path element.
2141 * @hostrcb: hostrcb struct
2142 * @cfg: fabric path element struct
2143 *
2144 * Return value:
2145 * none
2146 **/
2147static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2148 struct ipr_hostrcb64_config_element *cfg)
2149{
2150 int i, j;
2151 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2152 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2153 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2154 char buffer[IPR_MAX_RES_PATH_LENGTH];
2155
2156 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2157 return;
2158
2159 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2160 if (path_type_desc[i].type != type)
2161 continue;
2162
2163 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2164 if (path_status_desc[j].status != status)
2165 continue;
2166
2167 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2168 path_status_desc[j].desc, path_type_desc[i].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002169 ipr_format_res_path(hostrcb->ioa_cfg,
2170 cfg->res_path, buffer, sizeof(buffer)),
2171 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2172 be32_to_cpu(cfg->wwid[0]),
2173 be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002174 return;
2175 }
2176 }
2177 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2178 "WWN=%08X%08X\n", cfg->type_status,
Brian Kingb3b3b402013-01-11 17:43:49 -06002179 ipr_format_res_path(hostrcb->ioa_cfg,
2180 cfg->res_path, buffer, sizeof(buffer)),
2181 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2182 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002183}
2184
2185/**
Brian King49dc6a12006-11-21 10:28:35 -06002186 * ipr_log_fabric_error - Log a fabric error.
2187 * @ioa_cfg: ioa config struct
2188 * @hostrcb: hostrcb struct
2189 *
2190 * Return value:
2191 * none
2192 **/
2193static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2194 struct ipr_hostrcb *hostrcb)
2195{
2196 struct ipr_hostrcb_type_20_error *error;
2197 struct ipr_hostrcb_fabric_desc *fabric;
2198 struct ipr_hostrcb_config_element *cfg;
2199 int i, add_len;
2200
2201 error = &hostrcb->hcam.u.error.u.type_20_error;
2202 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2203 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2204
2205 add_len = be32_to_cpu(hostrcb->hcam.length) -
2206 (offsetof(struct ipr_hostrcb_error, u) +
2207 offsetof(struct ipr_hostrcb_type_20_error, desc));
2208
2209 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2210 ipr_log_fabric_path(hostrcb, fabric);
2211 for_each_fabric_cfg(fabric, cfg)
2212 ipr_log_path_elem(hostrcb, cfg);
2213
2214 add_len -= be16_to_cpu(fabric->length);
2215 fabric = (struct ipr_hostrcb_fabric_desc *)
2216 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2217 }
2218
Brian Kingac719ab2006-11-21 10:28:42 -06002219 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002220}
2221
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002222/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002223 * ipr_log_sis64_array_error - Log a sis64 array error.
2224 * @ioa_cfg: ioa config struct
2225 * @hostrcb: hostrcb struct
2226 *
2227 * Return value:
2228 * none
2229 **/
2230static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2231 struct ipr_hostrcb *hostrcb)
2232{
2233 int i, num_entries;
2234 struct ipr_hostrcb_type_24_error *error;
2235 struct ipr_hostrcb64_array_data_entry *array_entry;
2236 char buffer[IPR_MAX_RES_PATH_LENGTH];
2237 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2238
2239 error = &hostrcb->hcam.u.error64.u.type_24_error;
2240
2241 ipr_err_separator;
2242
2243 ipr_err("RAID %s Array Configuration: %s\n",
2244 error->protection_level,
Brian Kingb3b3b402013-01-11 17:43:49 -06002245 ipr_format_res_path(ioa_cfg, error->last_res_path,
2246 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002247
2248 ipr_err_separator;
2249
2250 array_entry = error->array_member;
Wayne Boyer72620262010-09-27 10:45:28 -07002251 num_entries = min_t(u32, error->num_entries,
2252 ARRAY_SIZE(error->array_member));
Wayne Boyer4565e372010-02-19 13:24:07 -08002253
2254 for (i = 0; i < num_entries; i++, array_entry++) {
2255
2256 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2257 continue;
2258
2259 if (error->exposed_mode_adn == i)
2260 ipr_err("Exposed Array Member %d:\n", i);
2261 else
2262 ipr_err("Array Member %d:\n", i);
2263
2264 ipr_err("Array Member %d:\n", i);
2265 ipr_log_ext_vpd(&array_entry->vpd);
Wayne Boyer72620262010-09-27 10:45:28 -07002266 ipr_err("Current Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002267 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2268 buffer, sizeof(buffer)));
Wayne Boyer72620262010-09-27 10:45:28 -07002269 ipr_err("Expected Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002270 ipr_format_res_path(ioa_cfg,
2271 array_entry->expected_res_path,
2272 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002273
2274 ipr_err_separator;
2275 }
2276}
2277
2278/**
2279 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2280 * @ioa_cfg: ioa config struct
2281 * @hostrcb: hostrcb struct
2282 *
2283 * Return value:
2284 * none
2285 **/
2286static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2287 struct ipr_hostrcb *hostrcb)
2288{
2289 struct ipr_hostrcb_type_30_error *error;
2290 struct ipr_hostrcb64_fabric_desc *fabric;
2291 struct ipr_hostrcb64_config_element *cfg;
2292 int i, add_len;
2293
2294 error = &hostrcb->hcam.u.error64.u.type_30_error;
2295
2296 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2297 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2298
2299 add_len = be32_to_cpu(hostrcb->hcam.length) -
2300 (offsetof(struct ipr_hostrcb64_error, u) +
2301 offsetof(struct ipr_hostrcb_type_30_error, desc));
2302
2303 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2304 ipr_log64_fabric_path(hostrcb, fabric);
2305 for_each_fabric_cfg(fabric, cfg)
2306 ipr_log64_path_elem(hostrcb, cfg);
2307
2308 add_len -= be16_to_cpu(fabric->length);
2309 fabric = (struct ipr_hostrcb64_fabric_desc *)
2310 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2311 }
2312
2313 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2314}
2315
2316/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 * ipr_log_generic_error - Log an adapter error.
2318 * @ioa_cfg: ioa config struct
2319 * @hostrcb: hostrcb struct
2320 *
2321 * Return value:
2322 * none
2323 **/
2324static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2325 struct ipr_hostrcb *hostrcb)
2326{
Brian Kingac719ab2006-11-21 10:28:42 -06002327 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002328 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329}
2330
2331/**
2332 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2333 * @ioasc: IOASC
2334 *
2335 * This function will return the index of into the ipr_error_table
2336 * for the specified IOASC. If the IOASC is not in the table,
2337 * 0 will be returned, which points to the entry used for unknown errors.
2338 *
2339 * Return value:
2340 * index into the ipr_error_table
2341 **/
2342static u32 ipr_get_error(u32 ioasc)
2343{
2344 int i;
2345
2346 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002347 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 return i;
2349
2350 return 0;
2351}
2352
2353/**
2354 * ipr_handle_log_data - Log an adapter error.
2355 * @ioa_cfg: ioa config struct
2356 * @hostrcb: hostrcb struct
2357 *
2358 * This function logs an adapter error to the system.
2359 *
2360 * Return value:
2361 * none
2362 **/
2363static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2364 struct ipr_hostrcb *hostrcb)
2365{
2366 u32 ioasc;
2367 int error_index;
2368
2369 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2370 return;
2371
2372 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2373 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2374
Wayne Boyer4565e372010-02-19 13:24:07 -08002375 if (ioa_cfg->sis64)
2376 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2377 else
2378 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379
Wayne Boyer4565e372010-02-19 13:24:07 -08002380 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2381 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2383 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002384 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 }
2386
2387 error_index = ipr_get_error(ioasc);
2388
2389 if (!ipr_error_table[error_index].log_hcam)
2390 return;
2391
Brian King49dc6a12006-11-21 10:28:35 -06002392 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393
2394 /* Set indication we have logged an error */
2395 ioa_cfg->errors_logged++;
2396
Brian King933916f2007-03-29 12:43:30 -05002397 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002399 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2400 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401
2402 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 case IPR_HOST_RCB_OVERLAY_ID_2:
2404 ipr_log_cache_error(ioa_cfg, hostrcb);
2405 break;
2406 case IPR_HOST_RCB_OVERLAY_ID_3:
2407 ipr_log_config_error(ioa_cfg, hostrcb);
2408 break;
2409 case IPR_HOST_RCB_OVERLAY_ID_4:
2410 case IPR_HOST_RCB_OVERLAY_ID_6:
2411 ipr_log_array_error(ioa_cfg, hostrcb);
2412 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002413 case IPR_HOST_RCB_OVERLAY_ID_7:
2414 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2415 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002416 case IPR_HOST_RCB_OVERLAY_ID_12:
2417 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2418 break;
2419 case IPR_HOST_RCB_OVERLAY_ID_13:
2420 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2421 break;
2422 case IPR_HOST_RCB_OVERLAY_ID_14:
2423 case IPR_HOST_RCB_OVERLAY_ID_16:
2424 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2425 break;
2426 case IPR_HOST_RCB_OVERLAY_ID_17:
2427 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2428 break;
Brian King49dc6a12006-11-21 10:28:35 -06002429 case IPR_HOST_RCB_OVERLAY_ID_20:
2430 ipr_log_fabric_error(ioa_cfg, hostrcb);
2431 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002432 case IPR_HOST_RCB_OVERLAY_ID_23:
2433 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2434 break;
2435 case IPR_HOST_RCB_OVERLAY_ID_24:
2436 case IPR_HOST_RCB_OVERLAY_ID_26:
2437 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2438 break;
2439 case IPR_HOST_RCB_OVERLAY_ID_30:
2440 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2441 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002442 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002445 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 break;
2447 }
2448}
2449
2450/**
2451 * ipr_process_error - Op done function for an adapter error log.
2452 * @ipr_cmd: ipr command struct
2453 *
2454 * This function is the op done function for an error log host
2455 * controlled async from the adapter. It will log the error and
2456 * send the HCAM back to the adapter.
2457 *
2458 * Return value:
2459 * none
2460 **/
2461static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2462{
2463 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2464 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07002465 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002466 u32 fd_ioasc;
2467
2468 if (ioa_cfg->sis64)
2469 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2470 else
2471 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472
2473 list_del(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06002474 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475
2476 if (!ioasc) {
2477 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002478 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2479 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2481 dev_err(&ioa_cfg->pdev->dev,
2482 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2483 }
2484
2485 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2486}
2487
2488/**
2489 * ipr_timeout - An internally generated op has timed out.
2490 * @ipr_cmd: ipr command struct
2491 *
2492 * This function blocks host requests and initiates an
2493 * adapter reset.
2494 *
2495 * Return value:
2496 * none
2497 **/
2498static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2499{
2500 unsigned long lock_flags = 0;
2501 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2502
2503 ENTER;
2504 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2505
2506 ioa_cfg->errors_logged++;
2507 dev_err(&ioa_cfg->pdev->dev,
2508 "Adapter being reset due to command timeout.\n");
2509
2510 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2511 ioa_cfg->sdt_state = GET_DUMP;
2512
2513 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2514 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2515
2516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2517 LEAVE;
2518}
2519
2520/**
2521 * ipr_oper_timeout - Adapter timed out transitioning to operational
2522 * @ipr_cmd: ipr command struct
2523 *
2524 * This function blocks host requests and initiates an
2525 * adapter reset.
2526 *
2527 * Return value:
2528 * none
2529 **/
2530static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2531{
2532 unsigned long lock_flags = 0;
2533 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2534
2535 ENTER;
2536 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2537
2538 ioa_cfg->errors_logged++;
2539 dev_err(&ioa_cfg->pdev->dev,
2540 "Adapter timed out transitioning to operational.\n");
2541
2542 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2543 ioa_cfg->sdt_state = GET_DUMP;
2544
2545 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2546 if (ipr_fastfail)
2547 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2548 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2549 }
2550
2551 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2552 LEAVE;
2553}
2554
2555/**
2556 * ipr_reset_reload - Reset/Reload the IOA
2557 * @ioa_cfg: ioa config struct
2558 * @shutdown_type: shutdown type
2559 *
2560 * This function resets the adapter and re-initializes it.
2561 * This function assumes that all new host commands have been stopped.
2562 * Return value:
2563 * SUCCESS / FAILED
2564 **/
2565static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2566 enum ipr_shutdown_type shutdown_type)
2567{
2568 if (!ioa_cfg->in_reset_reload)
2569 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2570
2571 spin_unlock_irq(ioa_cfg->host->host_lock);
2572 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2573 spin_lock_irq(ioa_cfg->host->host_lock);
2574
2575 /* If we got hit with a host reset while we were already resetting
2576 the adapter for some reason, and the reset failed. */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06002577 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578 ipr_trace;
2579 return FAILED;
2580 }
2581
2582 return SUCCESS;
2583}
2584
2585/**
2586 * ipr_find_ses_entry - Find matching SES in SES table
2587 * @res: resource entry struct of SES
2588 *
2589 * Return value:
2590 * pointer to SES table entry / NULL on failure
2591 **/
2592static const struct ipr_ses_table_entry *
2593ipr_find_ses_entry(struct ipr_resource_entry *res)
2594{
2595 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002596 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2598
2599 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2600 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2601 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002602 vpids = &res->std_inq_data.vpids;
2603 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 matches++;
2605 else
2606 break;
2607 } else
2608 matches++;
2609 }
2610
2611 if (matches == IPR_PROD_ID_LEN)
2612 return ste;
2613 }
2614
2615 return NULL;
2616}
2617
2618/**
2619 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2620 * @ioa_cfg: ioa config struct
2621 * @bus: SCSI bus
2622 * @bus_width: bus width
2623 *
2624 * Return value:
2625 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2626 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2627 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2628 * max 160MHz = max 320MB/sec).
2629 **/
2630static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2631{
2632 struct ipr_resource_entry *res;
2633 const struct ipr_ses_table_entry *ste;
2634 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2635
2636 /* Loop through each config table entry in the config table buffer */
2637 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002638 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 continue;
2640
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002641 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 continue;
2643
2644 if (!(ste = ipr_find_ses_entry(res)))
2645 continue;
2646
2647 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2648 }
2649
2650 return max_xfer_rate;
2651}
2652
2653/**
2654 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2655 * @ioa_cfg: ioa config struct
2656 * @max_delay: max delay in micro-seconds to wait
2657 *
2658 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2659 *
2660 * Return value:
2661 * 0 on success / other on failure
2662 **/
2663static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2664{
2665 volatile u32 pcii_reg;
2666 int delay = 1;
2667
2668 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2669 while (delay < max_delay) {
2670 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2671
2672 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2673 return 0;
2674
2675 /* udelay cannot be used if delay is more than a few milliseconds */
2676 if ((delay / 1000) > MAX_UDELAY_MS)
2677 mdelay(delay / 1000);
2678 else
2679 udelay(delay);
2680
2681 delay += delay;
2682 }
2683 return -EIO;
2684}
2685
2686/**
Wayne Boyerdcbad002010-02-19 13:24:14 -08002687 * ipr_get_sis64_dump_data_section - Dump IOA memory
2688 * @ioa_cfg: ioa config struct
2689 * @start_addr: adapter address to dump
2690 * @dest: destination kernel buffer
2691 * @length_in_words: length to dump in 4 byte words
2692 *
2693 * Return value:
2694 * 0 on success
2695 **/
2696static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2697 u32 start_addr,
2698 __be32 *dest, u32 length_in_words)
2699{
2700 int i;
2701
2702 for (i = 0; i < length_in_words; i++) {
2703 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2704 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2705 dest++;
2706 }
2707
2708 return 0;
2709}
2710
2711/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 * ipr_get_ldump_data_section - Dump IOA memory
2713 * @ioa_cfg: ioa config struct
2714 * @start_addr: adapter address to dump
2715 * @dest: destination kernel buffer
2716 * @length_in_words: length to dump in 4 byte words
2717 *
2718 * Return value:
2719 * 0 on success / -EIO on failure
2720 **/
2721static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2722 u32 start_addr,
2723 __be32 *dest, u32 length_in_words)
2724{
2725 volatile u32 temp_pcii_reg;
2726 int i, delay = 0;
2727
Wayne Boyerdcbad002010-02-19 13:24:14 -08002728 if (ioa_cfg->sis64)
2729 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2730 dest, length_in_words);
2731
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 /* Write IOA interrupt reg starting LDUMP state */
2733 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
Wayne Boyer214777b2010-02-19 13:24:26 -08002734 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735
2736 /* Wait for IO debug acknowledge */
2737 if (ipr_wait_iodbg_ack(ioa_cfg,
2738 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2739 dev_err(&ioa_cfg->pdev->dev,
2740 "IOA dump long data transfer timeout\n");
2741 return -EIO;
2742 }
2743
2744 /* Signal LDUMP interlocked - clear IO debug ack */
2745 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2746 ioa_cfg->regs.clr_interrupt_reg);
2747
2748 /* Write Mailbox with starting address */
2749 writel(start_addr, ioa_cfg->ioa_mailbox);
2750
2751 /* Signal address valid - clear IOA Reset alert */
2752 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002753 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754
2755 for (i = 0; i < length_in_words; i++) {
2756 /* Wait for IO debug acknowledge */
2757 if (ipr_wait_iodbg_ack(ioa_cfg,
2758 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2759 dev_err(&ioa_cfg->pdev->dev,
2760 "IOA dump short data transfer timeout\n");
2761 return -EIO;
2762 }
2763
2764 /* Read data from mailbox and increment destination pointer */
2765 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2766 dest++;
2767
2768 /* For all but the last word of data, signal data received */
2769 if (i < (length_in_words - 1)) {
2770 /* Signal dump data received - Clear IO debug Ack */
2771 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2772 ioa_cfg->regs.clr_interrupt_reg);
2773 }
2774 }
2775
2776 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2777 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002778 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779
2780 writel(IPR_UPROCI_IO_DEBUG_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002781 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782
2783 /* Signal dump data received - Clear IO debug Ack */
2784 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2785 ioa_cfg->regs.clr_interrupt_reg);
2786
2787 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2788 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2789 temp_pcii_reg =
Wayne Boyer214777b2010-02-19 13:24:26 -08002790 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791
2792 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2793 return 0;
2794
2795 udelay(10);
2796 delay += 10;
2797 }
2798
2799 return 0;
2800}
2801
2802#ifdef CONFIG_SCSI_IPR_DUMP
2803/**
2804 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2805 * @ioa_cfg: ioa config struct
2806 * @pci_address: adapter address
2807 * @length: length of data to copy
2808 *
2809 * Copy data from PCI adapter to kernel buffer.
2810 * Note: length MUST be a 4 byte multiple
2811 * Return value:
2812 * 0 on success / other on failure
2813 **/
2814static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2815 unsigned long pci_address, u32 length)
2816{
2817 int bytes_copied = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002818 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 __be32 *page;
2820 unsigned long lock_flags = 0;
2821 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2822
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002823 if (ioa_cfg->sis64)
2824 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2825 else
2826 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2827
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 while (bytes_copied < length &&
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002829 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 if (ioa_dump->page_offset >= PAGE_SIZE ||
2831 ioa_dump->page_offset == 0) {
2832 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2833
2834 if (!page) {
2835 ipr_trace;
2836 return bytes_copied;
2837 }
2838
2839 ioa_dump->page_offset = 0;
2840 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2841 ioa_dump->next_page_index++;
2842 } else
2843 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2844
2845 rem_len = length - bytes_copied;
2846 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2847 cur_len = min(rem_len, rem_page_len);
2848
2849 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2850 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2851 rc = -EIO;
2852 } else {
2853 rc = ipr_get_ldump_data_section(ioa_cfg,
2854 pci_address + bytes_copied,
2855 &page[ioa_dump->page_offset / 4],
2856 (cur_len / sizeof(u32)));
2857 }
2858 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2859
2860 if (!rc) {
2861 ioa_dump->page_offset += cur_len;
2862 bytes_copied += cur_len;
2863 } else {
2864 ipr_trace;
2865 break;
2866 }
2867 schedule();
2868 }
2869
2870 return bytes_copied;
2871}
2872
2873/**
2874 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2875 * @hdr: dump entry header struct
2876 *
2877 * Return value:
2878 * nothing
2879 **/
2880static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2881{
2882 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2883 hdr->num_elems = 1;
2884 hdr->offset = sizeof(*hdr);
2885 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2886}
2887
2888/**
2889 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2890 * @ioa_cfg: ioa config struct
2891 * @driver_dump: driver dump struct
2892 *
2893 * Return value:
2894 * nothing
2895 **/
2896static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2897 struct ipr_driver_dump *driver_dump)
2898{
2899 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2900
2901 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2902 driver_dump->ioa_type_entry.hdr.len =
2903 sizeof(struct ipr_dump_ioa_type_entry) -
2904 sizeof(struct ipr_dump_entry_header);
2905 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2906 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2907 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2908 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2909 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2910 ucode_vpd->minor_release[1];
2911 driver_dump->hdr.num_entries++;
2912}
2913
2914/**
2915 * ipr_dump_version_data - Fill in the driver version in the dump.
2916 * @ioa_cfg: ioa config struct
2917 * @driver_dump: driver dump struct
2918 *
2919 * Return value:
2920 * nothing
2921 **/
2922static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2923 struct ipr_driver_dump *driver_dump)
2924{
2925 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2926 driver_dump->version_entry.hdr.len =
2927 sizeof(struct ipr_dump_version_entry) -
2928 sizeof(struct ipr_dump_entry_header);
2929 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2930 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2931 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2932 driver_dump->hdr.num_entries++;
2933}
2934
2935/**
2936 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2937 * @ioa_cfg: ioa config struct
2938 * @driver_dump: driver dump struct
2939 *
2940 * Return value:
2941 * nothing
2942 **/
2943static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2944 struct ipr_driver_dump *driver_dump)
2945{
2946 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2947 driver_dump->trace_entry.hdr.len =
2948 sizeof(struct ipr_dump_trace_entry) -
2949 sizeof(struct ipr_dump_entry_header);
2950 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2951 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2952 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2953 driver_dump->hdr.num_entries++;
2954}
2955
2956/**
2957 * ipr_dump_location_data - Fill in the IOA location in the dump.
2958 * @ioa_cfg: ioa config struct
2959 * @driver_dump: driver dump struct
2960 *
2961 * Return value:
2962 * nothing
2963 **/
2964static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2965 struct ipr_driver_dump *driver_dump)
2966{
2967 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2968 driver_dump->location_entry.hdr.len =
2969 sizeof(struct ipr_dump_location_entry) -
2970 sizeof(struct ipr_dump_entry_header);
2971 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2972 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01002973 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 driver_dump->hdr.num_entries++;
2975}
2976
2977/**
2978 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2979 * @ioa_cfg: ioa config struct
2980 * @dump: dump struct
2981 *
2982 * Return value:
2983 * nothing
2984 **/
2985static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2986{
2987 unsigned long start_addr, sdt_word;
2988 unsigned long lock_flags = 0;
2989 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2990 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002991 u32 num_entries, max_num_entries, start_off, end_off;
2992 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 struct ipr_sdt *sdt;
Wayne Boyerdcbad002010-02-19 13:24:14 -08002994 int valid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995 int i;
2996
2997 ENTER;
2998
2999 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3000
Brian King41e9a692011-09-21 08:51:11 -05003001 if (ioa_cfg->sdt_state != READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3003 return;
3004 }
3005
Wayne Boyer110def82010-11-04 09:36:16 -07003006 if (ioa_cfg->sis64) {
3007 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3008 ssleep(IPR_DUMP_DELAY_SECONDS);
3009 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3010 }
3011
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 start_addr = readl(ioa_cfg->ioa_mailbox);
3013
Wayne Boyerdcbad002010-02-19 13:24:14 -08003014 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 dev_err(&ioa_cfg->pdev->dev,
3016 "Invalid dump table format: %lx\n", start_addr);
3017 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3018 return;
3019 }
3020
3021 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3022
3023 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3024
3025 /* Initialize the overall dump header */
3026 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3027 driver_dump->hdr.num_entries = 1;
3028 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3029 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3030 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3031 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3032
3033 ipr_dump_version_data(ioa_cfg, driver_dump);
3034 ipr_dump_location_data(ioa_cfg, driver_dump);
3035 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3036 ipr_dump_trace_data(ioa_cfg, driver_dump);
3037
3038 /* Update dump_header */
3039 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3040
3041 /* IOA Dump entry */
3042 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043 ioa_dump->hdr.len = 0;
3044 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3045 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3046
3047 /* First entries in sdt are actually a list of dump addresses and
3048 lengths to gather the real dump data. sdt represents the pointer
3049 to the ioa generated dump table. Dump data will be extracted based
3050 on entries in this table */
3051 sdt = &ioa_dump->sdt;
3052
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003053 if (ioa_cfg->sis64) {
3054 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3055 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3056 } else {
3057 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3058 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3059 }
3060
3061 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3062 (max_num_entries * sizeof(struct ipr_sdt_entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003064 bytes_to_copy / sizeof(__be32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065
3066 /* Smart Dump table is ready to use and the first entry is valid */
Wayne Boyerdcbad002010-02-19 13:24:14 -08003067 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3068 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 dev_err(&ioa_cfg->pdev->dev,
3070 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3071 rc, be32_to_cpu(sdt->hdr.state));
3072 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3073 ioa_cfg->sdt_state = DUMP_OBTAINED;
3074 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3075 return;
3076 }
3077
3078 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3079
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003080 if (num_entries > max_num_entries)
3081 num_entries = max_num_entries;
3082
3083 /* Update dump length to the actual data to be copied */
3084 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3085 if (ioa_cfg->sis64)
3086 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3087 else
3088 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089
3090 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3091
3092 for (i = 0; i < num_entries; i++) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003093 if (ioa_dump->hdr.len > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3095 break;
3096 }
3097
3098 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
Wayne Boyerdcbad002010-02-19 13:24:14 -08003099 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3100 if (ioa_cfg->sis64)
3101 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3102 else {
3103 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3104 end_off = be32_to_cpu(sdt->entry[i].end_token);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105
Wayne Boyerdcbad002010-02-19 13:24:14 -08003106 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3107 bytes_to_copy = end_off - start_off;
3108 else
3109 valid = 0;
3110 }
3111 if (valid) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003112 if (bytes_to_copy > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3114 continue;
3115 }
3116
3117 /* Copy data from adapter to driver buffers */
3118 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3119 bytes_to_copy);
3120
3121 ioa_dump->hdr.len += bytes_copied;
3122
3123 if (bytes_copied != bytes_to_copy) {
3124 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3125 break;
3126 }
3127 }
3128 }
3129 }
3130
3131 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3132
3133 /* Update dump_header */
3134 driver_dump->hdr.len += ioa_dump->hdr.len;
3135 wmb();
3136 ioa_cfg->sdt_state = DUMP_OBTAINED;
3137 LEAVE;
3138}
3139
3140#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003141#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142#endif
3143
3144/**
3145 * ipr_release_dump - Free adapter dump memory
3146 * @kref: kref struct
3147 *
3148 * Return value:
3149 * nothing
3150 **/
3151static void ipr_release_dump(struct kref *kref)
3152{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003153 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3155 unsigned long lock_flags = 0;
3156 int i;
3157
3158 ENTER;
3159 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3160 ioa_cfg->dump = NULL;
3161 ioa_cfg->sdt_state = INACTIVE;
3162 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3163
3164 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3165 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3166
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003167 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168 kfree(dump);
3169 LEAVE;
3170}
3171
3172/**
3173 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00003174 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 *
3176 * Called at task level from a work thread. This function takes care
3177 * of adding and removing device from the mid-layer as configuration
3178 * changes are detected by the adapter.
3179 *
3180 * Return value:
3181 * nothing
3182 **/
David Howellsc4028952006-11-22 14:57:56 +00003183static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184{
3185 unsigned long lock_flags;
3186 struct ipr_resource_entry *res;
3187 struct scsi_device *sdev;
3188 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00003189 struct ipr_ioa_cfg *ioa_cfg =
3190 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 u8 bus, target, lun;
3192 int did_work;
3193
3194 ENTER;
3195 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3196
Brian King41e9a692011-09-21 08:51:11 -05003197 if (ioa_cfg->sdt_state == READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198 dump = ioa_cfg->dump;
3199 if (!dump) {
3200 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3201 return;
3202 }
3203 kref_get(&dump->kref);
3204 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3205 ipr_get_ioa_dump(ioa_cfg, dump);
3206 kref_put(&dump->kref, ipr_release_dump);
3207
3208 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King4c647e92011-10-15 09:08:56 -05003209 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3211 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3212 return;
3213 }
3214
3215restart:
3216 do {
3217 did_work = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003218 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3219 !ioa_cfg->allow_ml_add_del) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3221 return;
3222 }
3223
3224 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3225 if (res->del_from_ml && res->sdev) {
3226 did_work = 1;
3227 sdev = res->sdev;
3228 if (!scsi_device_get(sdev)) {
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02003229 if (!res->add_to_ml)
3230 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3231 else
3232 res->del_from_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3234 scsi_remove_device(sdev);
3235 scsi_device_put(sdev);
3236 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3237 }
3238 break;
3239 }
3240 }
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003241 } while (did_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242
3243 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3244 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003245 bus = res->bus;
3246 target = res->target;
3247 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003248 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3250 scsi_add_device(ioa_cfg->host, bus, target, lun);
3251 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3252 goto restart;
3253 }
3254 }
3255
3256 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003257 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258 LEAVE;
3259}
3260
3261#ifdef CONFIG_SCSI_IPR_TRACE
3262/**
3263 * ipr_read_trace - Dump the adapter trace
Chris Wright2c3c8be2010-05-12 18:28:57 -07003264 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003266 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 * @buf: buffer
3268 * @off: offset
3269 * @count: buffer size
3270 *
3271 * Return value:
3272 * number of bytes printed to buffer
3273 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003274static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003275 struct bin_attribute *bin_attr,
3276 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277{
Tony Jonesee959b02008-02-22 00:13:36 +01003278 struct device *dev = container_of(kobj, struct device, kobj);
3279 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3281 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003282 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283
3284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003285 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3286 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003288
3289 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290}
3291
3292static struct bin_attribute ipr_trace_attr = {
3293 .attr = {
3294 .name = "trace",
3295 .mode = S_IRUGO,
3296 },
3297 .size = 0,
3298 .read = ipr_read_trace,
3299};
3300#endif
3301
3302/**
3303 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003304 * @dev: class device struct
3305 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306 *
3307 * Return value:
3308 * number of bytes printed to buffer
3309 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003310static ssize_t ipr_show_fw_version(struct device *dev,
3311 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312{
Tony Jonesee959b02008-02-22 00:13:36 +01003313 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3315 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3316 unsigned long lock_flags = 0;
3317 int len;
3318
3319 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3320 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3321 ucode_vpd->major_release, ucode_vpd->card_type,
3322 ucode_vpd->minor_release[0],
3323 ucode_vpd->minor_release[1]);
3324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3325 return len;
3326}
3327
Tony Jonesee959b02008-02-22 00:13:36 +01003328static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329 .attr = {
3330 .name = "fw_version",
3331 .mode = S_IRUGO,
3332 },
3333 .show = ipr_show_fw_version,
3334};
3335
3336/**
3337 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003338 * @dev: class device struct
3339 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340 *
3341 * Return value:
3342 * number of bytes printed to buffer
3343 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003344static ssize_t ipr_show_log_level(struct device *dev,
3345 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346{
Tony Jonesee959b02008-02-22 00:13:36 +01003347 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3349 unsigned long lock_flags = 0;
3350 int len;
3351
3352 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3353 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3355 return len;
3356}
3357
3358/**
3359 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003360 * @dev: class device struct
3361 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 *
3363 * Return value:
3364 * number of bytes printed to buffer
3365 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003366static ssize_t ipr_store_log_level(struct device *dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003367 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 const char *buf, size_t count)
3369{
Tony Jonesee959b02008-02-22 00:13:36 +01003370 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3372 unsigned long lock_flags = 0;
3373
3374 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3375 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3376 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3377 return strlen(buf);
3378}
3379
Tony Jonesee959b02008-02-22 00:13:36 +01003380static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381 .attr = {
3382 .name = "log_level",
3383 .mode = S_IRUGO | S_IWUSR,
3384 },
3385 .show = ipr_show_log_level,
3386 .store = ipr_store_log_level
3387};
3388
3389/**
3390 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003391 * @dev: device struct
3392 * @buf: buffer
3393 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 *
3395 * This function will reset the adapter and wait a reasonable
3396 * amount of time for any errors that the adapter might log.
3397 *
3398 * Return value:
3399 * count on success / other on failure
3400 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003401static ssize_t ipr_store_diagnostics(struct device *dev,
3402 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 const char *buf, size_t count)
3404{
Tony Jonesee959b02008-02-22 00:13:36 +01003405 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3407 unsigned long lock_flags = 0;
3408 int rc = count;
3409
3410 if (!capable(CAP_SYS_ADMIN))
3411 return -EACCES;
3412
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003414 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003415 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3416 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3417 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3418 }
3419
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 ioa_cfg->errors_logged = 0;
3421 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3422
3423 if (ioa_cfg->in_reset_reload) {
3424 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3425 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3426
3427 /* Wait for a second for any errors to be logged */
3428 msleep(1000);
3429 } else {
3430 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3431 return -EIO;
3432 }
3433
3434 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3435 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3436 rc = -EIO;
3437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3438
3439 return rc;
3440}
3441
Tony Jonesee959b02008-02-22 00:13:36 +01003442static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443 .attr = {
3444 .name = "run_diagnostics",
3445 .mode = S_IWUSR,
3446 },
3447 .store = ipr_store_diagnostics
3448};
3449
3450/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003451 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003452 * @class_dev: device struct
3453 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003454 *
3455 * Return value:
3456 * number of bytes printed to buffer
3457 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003458static ssize_t ipr_show_adapter_state(struct device *dev,
3459 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003460{
Tony Jonesee959b02008-02-22 00:13:36 +01003461 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003462 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3463 unsigned long lock_flags = 0;
3464 int len;
3465
3466 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003467 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003468 len = snprintf(buf, PAGE_SIZE, "offline\n");
3469 else
3470 len = snprintf(buf, PAGE_SIZE, "online\n");
3471 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3472 return len;
3473}
3474
3475/**
3476 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003477 * @dev: device struct
3478 * @buf: buffer
3479 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003480 *
3481 * This function will change the adapter's state.
3482 *
3483 * Return value:
3484 * count on success / other on failure
3485 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003486static ssize_t ipr_store_adapter_state(struct device *dev,
3487 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003488 const char *buf, size_t count)
3489{
Tony Jonesee959b02008-02-22 00:13:36 +01003490 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003491 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3492 unsigned long lock_flags;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003493 int result = count, i;
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003494
3495 if (!capable(CAP_SYS_ADMIN))
3496 return -EACCES;
3497
3498 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003499 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3500 !strncmp(buf, "online", 6)) {
3501 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3502 spin_lock(&ioa_cfg->hrrq[i]._lock);
3503 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3504 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3505 }
3506 wmb();
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003507 ioa_cfg->reset_retries = 0;
3508 ioa_cfg->in_ioa_bringdown = 0;
3509 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3510 }
3511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3512 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3513
3514 return result;
3515}
3516
Tony Jonesee959b02008-02-22 00:13:36 +01003517static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003518 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003519 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003520 .mode = S_IRUGO | S_IWUSR,
3521 },
3522 .show = ipr_show_adapter_state,
3523 .store = ipr_store_adapter_state
3524};
3525
3526/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003528 * @dev: device struct
3529 * @buf: buffer
3530 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531 *
3532 * This function will reset the adapter.
3533 *
3534 * Return value:
3535 * count on success / other on failure
3536 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003537static ssize_t ipr_store_reset_adapter(struct device *dev,
3538 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 const char *buf, size_t count)
3540{
Tony Jonesee959b02008-02-22 00:13:36 +01003541 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3543 unsigned long lock_flags;
3544 int result = count;
3545
3546 if (!capable(CAP_SYS_ADMIN))
3547 return -EACCES;
3548
3549 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3550 if (!ioa_cfg->in_reset_reload)
3551 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3552 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3553 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3554
3555 return result;
3556}
3557
Tony Jonesee959b02008-02-22 00:13:36 +01003558static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559 .attr = {
3560 .name = "reset_host",
3561 .mode = S_IWUSR,
3562 },
3563 .store = ipr_store_reset_adapter
3564};
3565
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003566static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3567 /**
3568 * ipr_show_iopoll_weight - Show ipr polling mode
3569 * @dev: class device struct
3570 * @buf: buffer
3571 *
3572 * Return value:
3573 * number of bytes printed to buffer
3574 **/
3575static ssize_t ipr_show_iopoll_weight(struct device *dev,
3576 struct device_attribute *attr, char *buf)
3577{
3578 struct Scsi_Host *shost = class_to_shost(dev);
3579 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3580 unsigned long lock_flags = 0;
3581 int len;
3582
3583 spin_lock_irqsave(shost->host_lock, lock_flags);
3584 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3585 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3586
3587 return len;
3588}
3589
3590/**
3591 * ipr_store_iopoll_weight - Change the adapter's polling mode
3592 * @dev: class device struct
3593 * @buf: buffer
3594 *
3595 * Return value:
3596 * number of bytes printed to buffer
3597 **/
3598static ssize_t ipr_store_iopoll_weight(struct device *dev,
3599 struct device_attribute *attr,
3600 const char *buf, size_t count)
3601{
3602 struct Scsi_Host *shost = class_to_shost(dev);
3603 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3604 unsigned long user_iopoll_weight;
3605 unsigned long lock_flags = 0;
3606 int i;
3607
3608 if (!ioa_cfg->sis64) {
3609 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3610 return -EINVAL;
3611 }
3612 if (kstrtoul(buf, 10, &user_iopoll_weight))
3613 return -EINVAL;
3614
3615 if (user_iopoll_weight > 256) {
3616 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3617 return -EINVAL;
3618 }
3619
3620 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3621 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3622 return strlen(buf);
3623 }
3624
3625 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3626 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3627 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3628 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3629 }
3630
3631 spin_lock_irqsave(shost->host_lock, lock_flags);
3632 ioa_cfg->iopoll_weight = user_iopoll_weight;
3633 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3634 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3635 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3636 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3637 ioa_cfg->iopoll_weight, ipr_iopoll);
3638 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3639 }
3640 }
3641 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3642
3643 return strlen(buf);
3644}
3645
3646static struct device_attribute ipr_iopoll_weight_attr = {
3647 .attr = {
3648 .name = "iopoll_weight",
3649 .mode = S_IRUGO | S_IWUSR,
3650 },
3651 .show = ipr_show_iopoll_weight,
3652 .store = ipr_store_iopoll_weight
3653};
3654
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655/**
3656 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3657 * @buf_len: buffer length
3658 *
3659 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3660 * list to use for microcode download
3661 *
3662 * Return value:
3663 * pointer to sglist / NULL on failure
3664 **/
3665static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3666{
3667 int sg_size, order, bsize_elem, num_elem, i, j;
3668 struct ipr_sglist *sglist;
3669 struct scatterlist *scatterlist;
3670 struct page *page;
3671
3672 /* Get the minimum size per scatter/gather element */
3673 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3674
3675 /* Get the actual size per element */
3676 order = get_order(sg_size);
3677
3678 /* Determine the actual number of bytes per element */
3679 bsize_elem = PAGE_SIZE * (1 << order);
3680
3681 /* Determine the actual number of sg entries needed */
3682 if (buf_len % bsize_elem)
3683 num_elem = (buf_len / bsize_elem) + 1;
3684 else
3685 num_elem = buf_len / bsize_elem;
3686
3687 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003688 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689 (sizeof(struct scatterlist) * (num_elem - 1)),
3690 GFP_KERNEL);
3691
3692 if (sglist == NULL) {
3693 ipr_trace;
3694 return NULL;
3695 }
3696
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697 scatterlist = sglist->scatterlist;
Jens Axboe45711f12007-10-22 21:19:53 +02003698 sg_init_table(scatterlist, num_elem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699
3700 sglist->order = order;
3701 sglist->num_sg = num_elem;
3702
3703 /* Allocate a bunch of sg elements */
3704 for (i = 0; i < num_elem; i++) {
3705 page = alloc_pages(GFP_KERNEL, order);
3706 if (!page) {
3707 ipr_trace;
3708
3709 /* Free up what we already allocated */
3710 for (j = i - 1; j >= 0; j--)
Jens Axboe45711f12007-10-22 21:19:53 +02003711 __free_pages(sg_page(&scatterlist[j]), order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712 kfree(sglist);
3713 return NULL;
3714 }
3715
Jens Axboe642f1492007-10-24 11:20:47 +02003716 sg_set_page(&scatterlist[i], page, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 }
3718
3719 return sglist;
3720}
3721
3722/**
3723 * ipr_free_ucode_buffer - Frees a microcode download buffer
3724 * @p_dnld: scatter/gather list pointer
3725 *
3726 * Free a DMA'able ucode download buffer previously allocated with
3727 * ipr_alloc_ucode_buffer
3728 *
3729 * Return value:
3730 * nothing
3731 **/
3732static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3733{
3734 int i;
3735
3736 for (i = 0; i < sglist->num_sg; i++)
Jens Axboe45711f12007-10-22 21:19:53 +02003737 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738
3739 kfree(sglist);
3740}
3741
3742/**
3743 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3744 * @sglist: scatter/gather list pointer
3745 * @buffer: buffer pointer
3746 * @len: buffer length
3747 *
3748 * Copy a microcode image from a user buffer into a buffer allocated by
3749 * ipr_alloc_ucode_buffer
3750 *
3751 * Return value:
3752 * 0 on success / other on failure
3753 **/
3754static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3755 u8 *buffer, u32 len)
3756{
3757 int bsize_elem, i, result = 0;
3758 struct scatterlist *scatterlist;
3759 void *kaddr;
3760
3761 /* Determine the actual number of bytes per element */
3762 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3763
3764 scatterlist = sglist->scatterlist;
3765
3766 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003767 struct page *page = sg_page(&scatterlist[i]);
3768
3769 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003771 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003772
3773 scatterlist[i].length = bsize_elem;
3774
3775 if (result != 0) {
3776 ipr_trace;
3777 return result;
3778 }
3779 }
3780
3781 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003782 struct page *page = sg_page(&scatterlist[i]);
3783
3784 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003786 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787
3788 scatterlist[i].length = len % bsize_elem;
3789 }
3790
3791 sglist->buffer_len = len;
3792 return result;
3793}
3794
3795/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003796 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3797 * @ipr_cmd: ipr command struct
3798 * @sglist: scatter/gather list
3799 *
3800 * Builds a microcode download IOA data list (IOADL).
3801 *
3802 **/
3803static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3804 struct ipr_sglist *sglist)
3805{
3806 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3807 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3808 struct scatterlist *scatterlist = sglist->scatterlist;
3809 int i;
3810
3811 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3812 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3813 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3814
3815 ioarcb->ioadl_len =
3816 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3817 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3818 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3819 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3820 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3821 }
3822
3823 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3824}
3825
3826/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003827 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828 * @ipr_cmd: ipr command struct
3829 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003831 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003834static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3835 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003838 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 struct scatterlist *scatterlist = sglist->scatterlist;
3840 int i;
3841
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003842 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003843 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08003844 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3845
3846 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3848
3849 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3850 ioadl[i].flags_and_data_len =
3851 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3852 ioadl[i].address =
3853 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3854 }
3855
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003856 ioadl[i-1].flags_and_data_len |=
3857 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3858}
3859
3860/**
3861 * ipr_update_ioa_ucode - Update IOA's microcode
3862 * @ioa_cfg: ioa config struct
3863 * @sglist: scatter/gather list
3864 *
3865 * Initiate an adapter reset to update the IOA's microcode
3866 *
3867 * Return value:
3868 * 0 on success / -EIO on failure
3869 **/
3870static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3871 struct ipr_sglist *sglist)
3872{
3873 unsigned long lock_flags;
3874
3875 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003876 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003877 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3878 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3879 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3880 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003881
3882 if (ioa_cfg->ucode_sglist) {
3883 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3884 dev_err(&ioa_cfg->pdev->dev,
3885 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886 return -EIO;
3887 }
3888
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003889 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3890 sglist->num_sg, DMA_TO_DEVICE);
3891
3892 if (!sglist->num_dma_sg) {
3893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3894 dev_err(&ioa_cfg->pdev->dev,
3895 "Failed to map microcode download buffer!\n");
3896 return -EIO;
3897 }
3898
3899 ioa_cfg->ucode_sglist = sglist;
3900 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3901 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3902 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3903
3904 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3905 ioa_cfg->ucode_sglist = NULL;
3906 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907 return 0;
3908}
3909
3910/**
3911 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003912 * @class_dev: device struct
3913 * @buf: buffer
3914 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915 *
3916 * This function will update the firmware on the adapter.
3917 *
3918 * Return value:
3919 * count on success / other on failure
3920 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003921static ssize_t ipr_store_update_fw(struct device *dev,
3922 struct device_attribute *attr,
3923 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924{
Tony Jonesee959b02008-02-22 00:13:36 +01003925 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3927 struct ipr_ucode_image_header *image_hdr;
3928 const struct firmware *fw_entry;
3929 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930 char fname[100];
3931 char *src;
3932 int len, result, dnld_size;
3933
3934 if (!capable(CAP_SYS_ADMIN))
3935 return -EACCES;
3936
3937 len = snprintf(fname, 99, "%s", buf);
3938 fname[len-1] = '\0';
3939
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003940 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3942 return -EIO;
3943 }
3944
3945 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3946
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3948 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3949 sglist = ipr_alloc_ucode_buffer(dnld_size);
3950
3951 if (!sglist) {
3952 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3953 release_firmware(fw_entry);
3954 return -ENOMEM;
3955 }
3956
3957 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3958
3959 if (result) {
3960 dev_err(&ioa_cfg->pdev->dev,
3961 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003962 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963 }
3964
Wayne Boyer14ed9cc2011-10-03 20:54:37 -07003965 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3966
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003967 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003969 if (!result)
3970 result = count;
3971out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972 ipr_free_ucode_buffer(sglist);
3973 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003974 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975}
3976
Tony Jonesee959b02008-02-22 00:13:36 +01003977static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 .attr = {
3979 .name = "update_fw",
3980 .mode = S_IWUSR,
3981 },
3982 .store = ipr_store_update_fw
3983};
3984
Wayne Boyer75576bb2010-07-14 10:50:14 -07003985/**
3986 * ipr_show_fw_type - Show the adapter's firmware type.
3987 * @dev: class device struct
3988 * @buf: buffer
3989 *
3990 * Return value:
3991 * number of bytes printed to buffer
3992 **/
3993static ssize_t ipr_show_fw_type(struct device *dev,
3994 struct device_attribute *attr, char *buf)
3995{
3996 struct Scsi_Host *shost = class_to_shost(dev);
3997 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3998 unsigned long lock_flags = 0;
3999 int len;
4000
4001 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4002 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4003 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4004 return len;
4005}
4006
4007static struct device_attribute ipr_ioa_fw_type_attr = {
4008 .attr = {
4009 .name = "fw_type",
4010 .mode = S_IRUGO,
4011 },
4012 .show = ipr_show_fw_type
4013};
4014
Tony Jonesee959b02008-02-22 00:13:36 +01004015static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016 &ipr_fw_version_attr,
4017 &ipr_log_level_attr,
4018 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06004019 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020 &ipr_ioa_reset_attr,
4021 &ipr_update_fw_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004022 &ipr_ioa_fw_type_attr,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06004023 &ipr_iopoll_weight_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024 NULL,
4025};
4026
4027#ifdef CONFIG_SCSI_IPR_DUMP
4028/**
4029 * ipr_read_dump - Dump the adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004030 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004032 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 * @buf: buffer
4034 * @off: offset
4035 * @count: buffer size
4036 *
4037 * Return value:
4038 * number of bytes printed to buffer
4039 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004040static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004041 struct bin_attribute *bin_attr,
4042 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043{
Tony Jonesee959b02008-02-22 00:13:36 +01004044 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045 struct Scsi_Host *shost = class_to_shost(cdev);
4046 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4047 struct ipr_dump *dump;
4048 unsigned long lock_flags = 0;
4049 char *src;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004050 int len, sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051 size_t rc = count;
4052
4053 if (!capable(CAP_SYS_ADMIN))
4054 return -EACCES;
4055
4056 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4057 dump = ioa_cfg->dump;
4058
4059 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4060 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4061 return 0;
4062 }
4063 kref_get(&dump->kref);
4064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4065
4066 if (off > dump->driver_dump.hdr.len) {
4067 kref_put(&dump->kref, ipr_release_dump);
4068 return 0;
4069 }
4070
4071 if (off + count > dump->driver_dump.hdr.len) {
4072 count = dump->driver_dump.hdr.len - off;
4073 rc = count;
4074 }
4075
4076 if (count && off < sizeof(dump->driver_dump)) {
4077 if (off + count > sizeof(dump->driver_dump))
4078 len = sizeof(dump->driver_dump) - off;
4079 else
4080 len = count;
4081 src = (u8 *)&dump->driver_dump + off;
4082 memcpy(buf, src, len);
4083 buf += len;
4084 off += len;
4085 count -= len;
4086 }
4087
4088 off -= sizeof(dump->driver_dump);
4089
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004090 if (ioa_cfg->sis64)
4091 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4092 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4093 sizeof(struct ipr_sdt_entry));
4094 else
4095 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4096 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4097
4098 if (count && off < sdt_end) {
4099 if (off + count > sdt_end)
4100 len = sdt_end - off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101 else
4102 len = count;
4103 src = (u8 *)&dump->ioa_dump + off;
4104 memcpy(buf, src, len);
4105 buf += len;
4106 off += len;
4107 count -= len;
4108 }
4109
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004110 off -= sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111
4112 while (count) {
4113 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4114 len = PAGE_ALIGN(off) - off;
4115 else
4116 len = count;
4117 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4118 src += off & ~PAGE_MASK;
4119 memcpy(buf, src, len);
4120 buf += len;
4121 off += len;
4122 count -= len;
4123 }
4124
4125 kref_put(&dump->kref, ipr_release_dump);
4126 return rc;
4127}
4128
4129/**
4130 * ipr_alloc_dump - Prepare for adapter dump
4131 * @ioa_cfg: ioa config struct
4132 *
4133 * Return value:
4134 * 0 on success / other on failure
4135 **/
4136static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4137{
4138 struct ipr_dump *dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004139 __be32 **ioa_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140 unsigned long lock_flags = 0;
4141
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06004142 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004143
4144 if (!dump) {
4145 ipr_err("Dump memory allocation failed\n");
4146 return -ENOMEM;
4147 }
4148
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004149 if (ioa_cfg->sis64)
4150 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4151 else
4152 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4153
4154 if (!ioa_data) {
4155 ipr_err("Dump memory allocation failed\n");
4156 kfree(dump);
4157 return -ENOMEM;
4158 }
4159
4160 dump->ioa_dump.ioa_data = ioa_data;
4161
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162 kref_init(&dump->kref);
4163 dump->ioa_cfg = ioa_cfg;
4164
4165 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4166
4167 if (INACTIVE != ioa_cfg->sdt_state) {
4168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004169 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 kfree(dump);
4171 return 0;
4172 }
4173
4174 ioa_cfg->dump = dump;
4175 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004176 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177 ioa_cfg->dump_taken = 1;
4178 schedule_work(&ioa_cfg->work_q);
4179 }
4180 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4181
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182 return 0;
4183}
4184
4185/**
4186 * ipr_free_dump - Free adapter dump memory
4187 * @ioa_cfg: ioa config struct
4188 *
4189 * Return value:
4190 * 0 on success / other on failure
4191 **/
4192static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4193{
4194 struct ipr_dump *dump;
4195 unsigned long lock_flags = 0;
4196
4197 ENTER;
4198
4199 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4200 dump = ioa_cfg->dump;
4201 if (!dump) {
4202 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4203 return 0;
4204 }
4205
4206 ioa_cfg->dump = NULL;
4207 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4208
4209 kref_put(&dump->kref, ipr_release_dump);
4210
4211 LEAVE;
4212 return 0;
4213}
4214
4215/**
4216 * ipr_write_dump - Setup dump state of adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004217 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004219 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220 * @buf: buffer
4221 * @off: offset
4222 * @count: buffer size
4223 *
4224 * Return value:
4225 * number of bytes printed to buffer
4226 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004227static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004228 struct bin_attribute *bin_attr,
4229 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230{
Tony Jonesee959b02008-02-22 00:13:36 +01004231 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232 struct Scsi_Host *shost = class_to_shost(cdev);
4233 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4234 int rc;
4235
4236 if (!capable(CAP_SYS_ADMIN))
4237 return -EACCES;
4238
4239 if (buf[0] == '1')
4240 rc = ipr_alloc_dump(ioa_cfg);
4241 else if (buf[0] == '0')
4242 rc = ipr_free_dump(ioa_cfg);
4243 else
4244 return -EINVAL;
4245
4246 if (rc)
4247 return rc;
4248 else
4249 return count;
4250}
4251
4252static struct bin_attribute ipr_dump_attr = {
4253 .attr = {
4254 .name = "dump",
4255 .mode = S_IRUSR | S_IWUSR,
4256 },
4257 .size = 0,
4258 .read = ipr_read_dump,
4259 .write = ipr_write_dump
4260};
4261#else
4262static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4263#endif
4264
4265/**
4266 * ipr_change_queue_depth - Change the device's queue depth
4267 * @sdev: scsi device struct
4268 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07004269 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270 *
4271 * Return value:
4272 * actual depth set
4273 **/
Mike Christiee881a172009-10-15 17:46:39 -07004274static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4275 int reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276{
Brian King35a39692006-09-25 12:39:20 -05004277 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4278 struct ipr_resource_entry *res;
4279 unsigned long lock_flags = 0;
4280
Mike Christiee881a172009-10-15 17:46:39 -07004281 if (reason != SCSI_QDEPTH_DEFAULT)
4282 return -EOPNOTSUPP;
4283
Brian King35a39692006-09-25 12:39:20 -05004284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4285 res = (struct ipr_resource_entry *)sdev->hostdata;
4286
4287 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4288 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4290
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4292 return sdev->queue_depth;
4293}
4294
4295/**
4296 * ipr_change_queue_type - Change the device's queue type
4297 * @dsev: scsi device struct
4298 * @tag_type: type of tags to use
4299 *
4300 * Return value:
4301 * actual queue type set
4302 **/
4303static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4304{
4305 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4306 struct ipr_resource_entry *res;
4307 unsigned long lock_flags = 0;
4308
4309 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4310 res = (struct ipr_resource_entry *)sdev->hostdata;
4311
4312 if (res) {
4313 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4314 /*
4315 * We don't bother quiescing the device here since the
4316 * adapter firmware does it for us.
4317 */
4318 scsi_set_tag_type(sdev, tag_type);
4319
4320 if (tag_type)
4321 scsi_activate_tcq(sdev, sdev->queue_depth);
4322 else
4323 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4324 } else
4325 tag_type = 0;
4326 } else
4327 tag_type = 0;
4328
4329 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4330 return tag_type;
4331}
4332
4333/**
4334 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4335 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004336 * @attr: device attribute structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337 * @buf: buffer
4338 *
4339 * Return value:
4340 * number of bytes printed to buffer
4341 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04004342static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343{
4344 struct scsi_device *sdev = to_scsi_device(dev);
4345 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4346 struct ipr_resource_entry *res;
4347 unsigned long lock_flags = 0;
4348 ssize_t len = -ENXIO;
4349
4350 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4351 res = (struct ipr_resource_entry *)sdev->hostdata;
4352 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004353 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4355 return len;
4356}
4357
4358static struct device_attribute ipr_adapter_handle_attr = {
4359 .attr = {
4360 .name = "adapter_handle",
4361 .mode = S_IRUSR,
4362 },
4363 .show = ipr_show_adapter_handle
4364};
4365
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004366/**
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004367 * ipr_show_resource_path - Show the resource path or the resource address for
4368 * this device.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004369 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004370 * @attr: device attribute structure
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004371 * @buf: buffer
4372 *
4373 * Return value:
4374 * number of bytes printed to buffer
4375 **/
4376static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4377{
4378 struct scsi_device *sdev = to_scsi_device(dev);
4379 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4380 struct ipr_resource_entry *res;
4381 unsigned long lock_flags = 0;
4382 ssize_t len = -ENXIO;
4383 char buffer[IPR_MAX_RES_PATH_LENGTH];
4384
4385 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4386 res = (struct ipr_resource_entry *)sdev->hostdata;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004387 if (res && ioa_cfg->sis64)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004388 len = snprintf(buf, PAGE_SIZE, "%s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004389 __ipr_format_res_path(res->res_path, buffer,
4390 sizeof(buffer)));
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004391 else if (res)
4392 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4393 res->bus, res->target, res->lun);
4394
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4396 return len;
4397}
4398
4399static struct device_attribute ipr_resource_path_attr = {
4400 .attr = {
4401 .name = "resource_path",
Wayne Boyer75576bb2010-07-14 10:50:14 -07004402 .mode = S_IRUGO,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004403 },
4404 .show = ipr_show_resource_path
4405};
4406
Wayne Boyer75576bb2010-07-14 10:50:14 -07004407/**
Wayne Boyer46d74562010-08-11 07:15:17 -07004408 * ipr_show_device_id - Show the device_id for this device.
4409 * @dev: device struct
4410 * @attr: device attribute structure
4411 * @buf: buffer
4412 *
4413 * Return value:
4414 * number of bytes printed to buffer
4415 **/
4416static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4417{
4418 struct scsi_device *sdev = to_scsi_device(dev);
4419 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4420 struct ipr_resource_entry *res;
4421 unsigned long lock_flags = 0;
4422 ssize_t len = -ENXIO;
4423
4424 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4425 res = (struct ipr_resource_entry *)sdev->hostdata;
4426 if (res && ioa_cfg->sis64)
4427 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4428 else if (res)
4429 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4430
4431 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4432 return len;
4433}
4434
4435static struct device_attribute ipr_device_id_attr = {
4436 .attr = {
4437 .name = "device_id",
4438 .mode = S_IRUGO,
4439 },
4440 .show = ipr_show_device_id
4441};
4442
4443/**
Wayne Boyer75576bb2010-07-14 10:50:14 -07004444 * ipr_show_resource_type - Show the resource type for this device.
4445 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004446 * @attr: device attribute structure
Wayne Boyer75576bb2010-07-14 10:50:14 -07004447 * @buf: buffer
4448 *
4449 * Return value:
4450 * number of bytes printed to buffer
4451 **/
4452static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4453{
4454 struct scsi_device *sdev = to_scsi_device(dev);
4455 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4456 struct ipr_resource_entry *res;
4457 unsigned long lock_flags = 0;
4458 ssize_t len = -ENXIO;
4459
4460 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4461 res = (struct ipr_resource_entry *)sdev->hostdata;
4462
4463 if (res)
4464 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4465
4466 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4467 return len;
4468}
4469
4470static struct device_attribute ipr_resource_type_attr = {
4471 .attr = {
4472 .name = "resource_type",
4473 .mode = S_IRUGO,
4474 },
4475 .show = ipr_show_resource_type
4476};
4477
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478static struct device_attribute *ipr_dev_attrs[] = {
4479 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004480 &ipr_resource_path_attr,
Wayne Boyer46d74562010-08-11 07:15:17 -07004481 &ipr_device_id_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004482 &ipr_resource_type_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 NULL,
4484};
4485
4486/**
4487 * ipr_biosparam - Return the HSC mapping
4488 * @sdev: scsi device struct
4489 * @block_device: block device pointer
4490 * @capacity: capacity of the device
4491 * @parm: Array containing returned HSC values.
4492 *
4493 * This function generates the HSC parms that fdisk uses.
4494 * We want to make sure we return something that places partitions
4495 * on 4k boundaries for best performance with the IOA.
4496 *
4497 * Return value:
4498 * 0 on success
4499 **/
4500static int ipr_biosparam(struct scsi_device *sdev,
4501 struct block_device *block_device,
4502 sector_t capacity, int *parm)
4503{
4504 int heads, sectors;
4505 sector_t cylinders;
4506
4507 heads = 128;
4508 sectors = 32;
4509
4510 cylinders = capacity;
4511 sector_div(cylinders, (128 * 32));
4512
4513 /* return result */
4514 parm[0] = heads;
4515 parm[1] = sectors;
4516 parm[2] = cylinders;
4517
4518 return 0;
4519}
4520
4521/**
Brian King35a39692006-09-25 12:39:20 -05004522 * ipr_find_starget - Find target based on bus/target.
4523 * @starget: scsi target struct
4524 *
4525 * Return value:
4526 * resource entry pointer if found / NULL if not found
4527 **/
4528static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4529{
4530 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4531 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4532 struct ipr_resource_entry *res;
4533
4534 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004535 if ((res->bus == starget->channel) &&
Brian King0ee1d712012-03-14 21:20:06 -05004536 (res->target == starget->id)) {
Brian King35a39692006-09-25 12:39:20 -05004537 return res;
4538 }
4539 }
4540
4541 return NULL;
4542}
4543
4544static struct ata_port_info sata_port_info;
4545
4546/**
4547 * ipr_target_alloc - Prepare for commands to a SCSI target
4548 * @starget: scsi target struct
4549 *
4550 * If the device is a SATA device, this function allocates an
4551 * ATA port with libata, else it does nothing.
4552 *
4553 * Return value:
4554 * 0 on success / non-0 on failure
4555 **/
4556static int ipr_target_alloc(struct scsi_target *starget)
4557{
4558 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4559 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4560 struct ipr_sata_port *sata_port;
4561 struct ata_port *ap;
4562 struct ipr_resource_entry *res;
4563 unsigned long lock_flags;
4564
4565 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4566 res = ipr_find_starget(starget);
4567 starget->hostdata = NULL;
4568
4569 if (res && ipr_is_gata(res)) {
4570 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4571 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4572 if (!sata_port)
4573 return -ENOMEM;
4574
4575 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4576 if (ap) {
4577 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4578 sata_port->ioa_cfg = ioa_cfg;
4579 sata_port->ap = ap;
4580 sata_port->res = res;
4581
4582 res->sata_port = sata_port;
4583 ap->private_data = sata_port;
4584 starget->hostdata = sata_port;
4585 } else {
4586 kfree(sata_port);
4587 return -ENOMEM;
4588 }
4589 }
4590 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4591
4592 return 0;
4593}
4594
4595/**
4596 * ipr_target_destroy - Destroy a SCSI target
4597 * @starget: scsi target struct
4598 *
4599 * If the device was a SATA device, this function frees the libata
4600 * ATA port, else it does nothing.
4601 *
4602 **/
4603static void ipr_target_destroy(struct scsi_target *starget)
4604{
4605 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004606 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4607 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4608
4609 if (ioa_cfg->sis64) {
Brian King0ee1d712012-03-14 21:20:06 -05004610 if (!ipr_find_starget(starget)) {
4611 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4612 clear_bit(starget->id, ioa_cfg->array_ids);
4613 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4614 clear_bit(starget->id, ioa_cfg->vset_ids);
4615 else if (starget->channel == 0)
4616 clear_bit(starget->id, ioa_cfg->target_ids);
4617 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004618 }
Brian King35a39692006-09-25 12:39:20 -05004619
4620 if (sata_port) {
4621 starget->hostdata = NULL;
4622 ata_sas_port_destroy(sata_port->ap);
4623 kfree(sata_port);
4624 }
4625}
4626
4627/**
4628 * ipr_find_sdev - Find device based on bus/target/lun.
4629 * @sdev: scsi device struct
4630 *
4631 * Return value:
4632 * resource entry pointer if found / NULL if not found
4633 **/
4634static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4635{
4636 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4637 struct ipr_resource_entry *res;
4638
4639 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004640 if ((res->bus == sdev->channel) &&
4641 (res->target == sdev->id) &&
4642 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004643 return res;
4644 }
4645
4646 return NULL;
4647}
4648
4649/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650 * ipr_slave_destroy - Unconfigure a SCSI device
4651 * @sdev: scsi device struct
4652 *
4653 * Return value:
4654 * nothing
4655 **/
4656static void ipr_slave_destroy(struct scsi_device *sdev)
4657{
4658 struct ipr_resource_entry *res;
4659 struct ipr_ioa_cfg *ioa_cfg;
4660 unsigned long lock_flags = 0;
4661
4662 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4663
4664 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4665 res = (struct ipr_resource_entry *) sdev->hostdata;
4666 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004667 if (res->sata_port)
Tejun Heo3e4ec342010-05-10 21:41:30 +02004668 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 sdev->hostdata = NULL;
4670 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004671 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672 }
4673 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4674}
4675
4676/**
4677 * ipr_slave_configure - Configure a SCSI device
4678 * @sdev: scsi device struct
4679 *
4680 * This function configures the specified scsi device.
4681 *
4682 * Return value:
4683 * 0 on success
4684 **/
4685static int ipr_slave_configure(struct scsi_device *sdev)
4686{
4687 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4688 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004689 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004691 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004692
4693 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4694 res = sdev->hostdata;
4695 if (res) {
4696 if (ipr_is_af_dasd_device(res))
4697 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004698 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004700 sdev->no_uld_attach = 1;
4701 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004702 if (ipr_is_vset_device(res)) {
Jens Axboe242f9dc2008-09-14 05:55:09 -07004703 blk_queue_rq_timeout(sdev->request_queue,
4704 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004705 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004706 }
Brian Kingdd406ef2009-04-22 08:58:02 -05004707 if (ipr_is_gata(res) && res->sata_port)
4708 ap = res->sata_port->ap;
4709 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4710
4711 if (ap) {
Brian King35a39692006-09-25 12:39:20 -05004712 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004713 ata_sas_slave_configure(sdev, ap);
4714 } else
Brian King35a39692006-09-25 12:39:20 -05004715 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004716 if (ioa_cfg->sis64)
4717 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004718 ipr_format_res_path(ioa_cfg,
4719 res->res_path, buffer, sizeof(buffer)));
Brian Kingdd406ef2009-04-22 08:58:02 -05004720 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721 }
4722 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4723 return 0;
4724}
4725
4726/**
Brian King35a39692006-09-25 12:39:20 -05004727 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4728 * @sdev: scsi device struct
4729 *
4730 * This function initializes an ATA port so that future commands
4731 * sent through queuecommand will work.
4732 *
4733 * Return value:
4734 * 0 on success
4735 **/
4736static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4737{
4738 struct ipr_sata_port *sata_port = NULL;
4739 int rc = -ENXIO;
4740
4741 ENTER;
4742 if (sdev->sdev_target)
4743 sata_port = sdev->sdev_target->hostdata;
Dan Williamsb2024452012-03-21 21:09:07 -07004744 if (sata_port) {
Brian King35a39692006-09-25 12:39:20 -05004745 rc = ata_sas_port_init(sata_port->ap);
Dan Williamsb2024452012-03-21 21:09:07 -07004746 if (rc == 0)
4747 rc = ata_sas_sync_probe(sata_port->ap);
4748 }
4749
Brian King35a39692006-09-25 12:39:20 -05004750 if (rc)
4751 ipr_slave_destroy(sdev);
4752
4753 LEAVE;
4754 return rc;
4755}
4756
4757/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004758 * ipr_slave_alloc - Prepare for commands to a device.
4759 * @sdev: scsi device struct
4760 *
4761 * This function saves a pointer to the resource entry
4762 * in the scsi device struct if the device exists. We
4763 * can then use this pointer in ipr_queuecommand when
4764 * handling new commands.
4765 *
4766 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004767 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07004768 **/
4769static int ipr_slave_alloc(struct scsi_device *sdev)
4770{
4771 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4772 struct ipr_resource_entry *res;
4773 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004774 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775
4776 sdev->hostdata = NULL;
4777
4778 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4779
Brian King35a39692006-09-25 12:39:20 -05004780 res = ipr_find_sdev(sdev);
4781 if (res) {
4782 res->sdev = sdev;
4783 res->add_to_ml = 0;
4784 res->in_erp = 0;
4785 sdev->hostdata = res;
4786 if (!ipr_is_naca_model(res))
4787 res->needs_sync_complete = 1;
4788 rc = 0;
4789 if (ipr_is_gata(res)) {
4790 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4791 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792 }
4793 }
4794
4795 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4796
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004797 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798}
4799
4800/**
4801 * ipr_eh_host_reset - Reset the host adapter
4802 * @scsi_cmd: scsi command struct
4803 *
4804 * Return value:
4805 * SUCCESS / FAILED
4806 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004807static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808{
4809 struct ipr_ioa_cfg *ioa_cfg;
4810 int rc;
4811
4812 ENTER;
4813 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4814
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02004815 if (!ioa_cfg->in_reset_reload) {
4816 dev_err(&ioa_cfg->pdev->dev,
4817 "Adapter being reset as a result of error recovery.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004818
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02004819 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4820 ioa_cfg->sdt_state = GET_DUMP;
4821 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004822
4823 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4824
4825 LEAVE;
4826 return rc;
4827}
4828
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004829static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
Jeff Garzik df0ae242005-05-28 07:57:14 -04004830{
4831 int rc;
4832
4833 spin_lock_irq(cmd->device->host->host_lock);
4834 rc = __ipr_eh_host_reset(cmd);
4835 spin_unlock_irq(cmd->device->host->host_lock);
4836
4837 return rc;
4838}
4839
Linus Torvalds1da177e2005-04-16 15:20:36 -07004840/**
Brian Kingc6513092006-03-29 09:37:43 -06004841 * ipr_device_reset - Reset the device
4842 * @ioa_cfg: ioa config struct
4843 * @res: resource entry struct
4844 *
4845 * This function issues a device reset to the affected device.
4846 * If the device is a SCSI device, a LUN reset will be sent
4847 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05004848 * will be sent. If the device is a SATA device, a PHY reset will
4849 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06004850 *
4851 * Return value:
4852 * 0 on success / non-zero on failure
4853 **/
4854static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4855 struct ipr_resource_entry *res)
4856{
4857 struct ipr_cmnd *ipr_cmd;
4858 struct ipr_ioarcb *ioarcb;
4859 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05004860 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06004861 u32 ioasc;
4862
4863 ENTER;
4864 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4865 ioarcb = &ipr_cmd->ioarcb;
4866 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08004867
4868 if (ipr_cmd->ioa_cfg->sis64) {
4869 regs = &ipr_cmd->i.ata_ioadl.regs;
4870 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4871 } else
4872 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06004873
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004874 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06004875 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4876 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05004877 if (ipr_is_gata(res)) {
4878 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08004879 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05004880 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4881 }
Brian Kingc6513092006-03-29 09:37:43 -06004882
4883 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07004884 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004885 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyer96d21f02010-05-10 09:13:27 -07004886 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4887 if (ipr_cmd->ioa_cfg->sis64)
4888 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4889 sizeof(struct ipr_ioasa_gata));
4890 else
4891 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4892 sizeof(struct ipr_ioasa_gata));
4893 }
Brian Kingc6513092006-03-29 09:37:43 -06004894
4895 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004896 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
Brian Kingc6513092006-03-29 09:37:43 -06004897}
4898
4899/**
Brian King35a39692006-09-25 12:39:20 -05004900 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09004901 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05004902 * @classes: class of the attached device
4903 *
Tejun Heocc0680a2007-08-06 18:36:23 +09004904 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05004905 *
4906 * Return value:
4907 * 0 on success / non-zero on failure
4908 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09004909static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07004910 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05004911{
Tejun Heocc0680a2007-08-06 18:36:23 +09004912 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05004913 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4914 struct ipr_resource_entry *res;
4915 unsigned long lock_flags = 0;
4916 int rc = -ENXIO;
4917
4918 ENTER;
4919 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004920 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06004921 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4922 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4923 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4924 }
4925
Brian King35a39692006-09-25 12:39:20 -05004926 res = sata_port->res;
4927 if (res) {
4928 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004929 *classes = res->ata_class;
Brian King35a39692006-09-25 12:39:20 -05004930 }
4931
4932 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4933 LEAVE;
4934 return rc;
4935}
4936
4937/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004938 * ipr_eh_dev_reset - Reset the device
4939 * @scsi_cmd: scsi command struct
4940 *
4941 * This function issues a device reset to the affected device.
4942 * A LUN reset will be sent to the device first. If that does
4943 * not work, a target reset will be sent.
4944 *
4945 * Return value:
4946 * SUCCESS / FAILED
4947 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004948static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004949{
4950 struct ipr_cmnd *ipr_cmd;
4951 struct ipr_ioa_cfg *ioa_cfg;
4952 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05004953 struct ata_port *ap;
4954 int rc = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004955 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956
4957 ENTER;
4958 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4959 res = scsi_cmd->device->hostdata;
4960
brking@us.ibm.comeeb883072005-11-01 17:02:29 -06004961 if (!res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962 return FAILED;
4963
4964 /*
4965 * If we are currently going through reset/reload, return failed. This will force the
4966 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4967 * reset to complete
4968 */
4969 if (ioa_cfg->in_reset_reload)
4970 return FAILED;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004971 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972 return FAILED;
4973
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004974 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004975 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06004976 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4977 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4978 if (ipr_cmd->scsi_cmd)
4979 ipr_cmd->done = ipr_scsi_eh_done;
4980 if (ipr_cmd->qc)
4981 ipr_cmd->done = ipr_sata_eh_done;
4982 if (ipr_cmd->qc &&
4983 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4984 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4985 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4986 }
Brian King7402ece2006-11-21 10:28:23 -06004987 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004988 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004989 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004990 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004992 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05004993
4994 if (ipr_is_gata(res) && res->sata_port) {
4995 ap = res->sata_port->ap;
4996 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09004997 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05004998 spin_lock_irq(scsi_cmd->device->host->host_lock);
Brian King5af23d22007-05-09 15:36:35 -05004999
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005000 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005001 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005002 list_for_each_entry(ipr_cmd,
5003 &hrrq->hrrq_pending_q, queue) {
5004 if (ipr_cmd->ioarcb.res_handle ==
5005 res->res_handle) {
5006 rc = -EIO;
5007 break;
5008 }
Brian King5af23d22007-05-09 15:36:35 -05005009 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005010 spin_unlock(&hrrq->_lock);
Brian King5af23d22007-05-09 15:36:35 -05005011 }
Brian King35a39692006-09-25 12:39:20 -05005012 } else
5013 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014 res->resetting_device = 0;
5015
Linus Torvalds1da177e2005-04-16 15:20:36 -07005016 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005017 return rc ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005018}
5019
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005020static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005021{
5022 int rc;
5023
5024 spin_lock_irq(cmd->device->host->host_lock);
5025 rc = __ipr_eh_dev_reset(cmd);
5026 spin_unlock_irq(cmd->device->host->host_lock);
5027
5028 return rc;
5029}
5030
Linus Torvalds1da177e2005-04-16 15:20:36 -07005031/**
5032 * ipr_bus_reset_done - Op done function for bus reset.
5033 * @ipr_cmd: ipr command struct
5034 *
5035 * This function is the op done function for a bus reset
5036 *
5037 * Return value:
5038 * none
5039 **/
5040static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5041{
5042 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5043 struct ipr_resource_entry *res;
5044
5045 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005046 if (!ioa_cfg->sis64)
5047 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5048 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5049 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5050 break;
5051 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005052 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053
5054 /*
5055 * If abort has not completed, indicate the reset has, else call the
5056 * abort's done function to wake the sleeping eh thread
5057 */
5058 if (ipr_cmd->sibling->sibling)
5059 ipr_cmd->sibling->sibling = NULL;
5060 else
5061 ipr_cmd->sibling->done(ipr_cmd->sibling);
5062
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005063 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005064 LEAVE;
5065}
5066
5067/**
5068 * ipr_abort_timeout - An abort task has timed out
5069 * @ipr_cmd: ipr command struct
5070 *
5071 * This function handles when an abort task times out. If this
5072 * happens we issue a bus reset since we have resources tied
5073 * up that must be freed before returning to the midlayer.
5074 *
5075 * Return value:
5076 * none
5077 **/
5078static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5079{
5080 struct ipr_cmnd *reset_cmd;
5081 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5082 struct ipr_cmd_pkt *cmd_pkt;
5083 unsigned long lock_flags = 0;
5084
5085 ENTER;
5086 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5087 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5088 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5089 return;
5090 }
5091
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005092 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5094 ipr_cmd->sibling = reset_cmd;
5095 reset_cmd->sibling = ipr_cmd;
5096 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5097 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5098 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5099 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5100 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5101
5102 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5104 LEAVE;
5105}
5106
5107/**
5108 * ipr_cancel_op - Cancel specified op
5109 * @scsi_cmd: scsi command struct
5110 *
5111 * This function cancels specified op.
5112 *
5113 * Return value:
5114 * SUCCESS / FAILED
5115 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005116static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005117{
5118 struct ipr_cmnd *ipr_cmd;
5119 struct ipr_ioa_cfg *ioa_cfg;
5120 struct ipr_resource_entry *res;
5121 struct ipr_cmd_pkt *cmd_pkt;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005122 u32 ioasc, int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123 int op_found = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005124 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005125
5126 ENTER;
5127 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5128 res = scsi_cmd->device->hostdata;
5129
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005130 /* If we are currently going through reset/reload, return failed.
5131 * This will force the mid-layer to call ipr_eh_host_reset,
5132 * which will then go to sleep and wait for the reset to complete
5133 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005134 if (ioa_cfg->in_reset_reload ||
5135 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005136 return FAILED;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005137 if (!res)
5138 return FAILED;
5139
5140 /*
5141 * If we are aborting a timed out op, chances are that the timeout was caused
5142 * by a still not detected EEH error. In such cases, reading a register will
5143 * trigger the EEH recovery infrastructure.
5144 */
5145 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5146
5147 if (!ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148 return FAILED;
5149
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005150 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005151 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005152 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5153 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5154 ipr_cmd->done = ipr_scsi_eh_done;
5155 op_found = 1;
5156 break;
5157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005158 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005159 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005160 }
5161
5162 if (!op_found)
5163 return SUCCESS;
5164
5165 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005166 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5168 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5169 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5170 ipr_cmd->u.sdev = scsi_cmd->device;
5171
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005172 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5173 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005175 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005176
5177 /*
5178 * If the abort task timed out and we sent a bus reset, we will get
5179 * one the following responses to the abort
5180 */
5181 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5182 ioasc = 0;
5183 ipr_trace;
5184 }
5185
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005186 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005187 if (!ipr_is_naca_model(res))
5188 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005189
5190 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005191 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005192}
5193
5194/**
5195 * ipr_eh_abort - Abort a single op
5196 * @scsi_cmd: scsi command struct
5197 *
5198 * Return value:
5199 * SUCCESS / FAILED
5200 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005201static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005203 unsigned long flags;
5204 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005205
5206 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005208 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5209 rc = ipr_cancel_op(scsi_cmd);
5210 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211
5212 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005213 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214}
5215
5216/**
5217 * ipr_handle_other_interrupt - Handle "other" interrupts
5218 * @ioa_cfg: ioa config struct
Wayne Boyer634651f2010-08-27 14:45:07 -07005219 * @int_reg: interrupt register
Linus Torvalds1da177e2005-04-16 15:20:36 -07005220 *
5221 * Return value:
5222 * IRQ_NONE / IRQ_HANDLED
5223 **/
Wayne Boyer634651f2010-08-27 14:45:07 -07005224static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer630ad8312011-04-07 12:12:30 -07005225 u32 int_reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226{
5227 irqreturn_t rc = IRQ_HANDLED;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005228 u32 int_mask_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005229
Wayne Boyer7dacb642011-04-12 10:29:02 -07005230 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5231 int_reg &= ~int_mask_reg;
5232
5233 /* If an interrupt on the adapter did not occur, ignore it.
5234 * Or in the case of SIS 64, check for a stage change interrupt.
5235 */
5236 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5237 if (ioa_cfg->sis64) {
5238 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5239 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5240 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5241
5242 /* clear stage change */
5243 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5244 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5245 list_del(&ioa_cfg->reset_cmd->queue);
5246 del_timer(&ioa_cfg->reset_cmd->timer);
5247 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5248 return IRQ_HANDLED;
5249 }
5250 }
5251
5252 return IRQ_NONE;
5253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005254
5255 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5256 /* Mask the interrupt */
5257 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5258
5259 /* Clear the interrupt */
5260 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5261 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5262
5263 list_del(&ioa_cfg->reset_cmd->queue);
5264 del_timer(&ioa_cfg->reset_cmd->timer);
5265 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005266 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
Brian King7dd21302012-03-14 21:20:08 -05005267 if (ioa_cfg->clear_isr) {
5268 if (ipr_debug && printk_ratelimit())
5269 dev_err(&ioa_cfg->pdev->dev,
5270 "Spurious interrupt detected. 0x%08X\n", int_reg);
5271 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5272 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5273 return IRQ_NONE;
5274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005275 } else {
5276 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5277 ioa_cfg->ioa_unit_checked = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005278 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5279 dev_err(&ioa_cfg->pdev->dev,
5280 "No Host RRQ. 0x%08X\n", int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005281 else
5282 dev_err(&ioa_cfg->pdev->dev,
5283 "Permanent IOA failure. 0x%08X\n", int_reg);
5284
5285 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5286 ioa_cfg->sdt_state = GET_DUMP;
5287
5288 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5289 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5290 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005291
Linus Torvalds1da177e2005-04-16 15:20:36 -07005292 return rc;
5293}
5294
5295/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005296 * ipr_isr_eh - Interrupt service routine error handler
5297 * @ioa_cfg: ioa config struct
5298 * @msg: message to log
5299 *
5300 * Return value:
5301 * none
5302 **/
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005303static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005304{
5305 ioa_cfg->errors_logged++;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005306 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005307
5308 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5309 ioa_cfg->sdt_state = GET_DUMP;
5310
5311 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5312}
5313
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005314static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005315 struct list_head *doneq)
5316{
5317 u32 ioasc;
5318 u16 cmd_index;
5319 struct ipr_cmnd *ipr_cmd;
5320 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5321 int num_hrrq = 0;
5322
5323 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005324 if (!hrr_queue->allow_interrupts)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005325 return 0;
5326
5327 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5328 hrr_queue->toggle_bit) {
5329
5330 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5331 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5332 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5333
5334 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5335 cmd_index < hrr_queue->min_cmd_id)) {
5336 ipr_isr_eh(ioa_cfg,
5337 "Invalid response handle from IOA: ",
5338 cmd_index);
5339 break;
5340 }
5341
5342 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5343 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5344
5345 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5346
5347 list_move_tail(&ipr_cmd->queue, doneq);
5348
5349 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5350 hrr_queue->hrrq_curr++;
5351 } else {
5352 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5353 hrr_queue->toggle_bit ^= 1u;
5354 }
5355 num_hrrq++;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005356 if (budget > 0 && num_hrrq >= budget)
5357 break;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005358 }
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005359
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005360 return num_hrrq;
5361}
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005362
5363static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5364{
5365 struct ipr_ioa_cfg *ioa_cfg;
5366 struct ipr_hrr_queue *hrrq;
5367 struct ipr_cmnd *ipr_cmd, *temp;
5368 unsigned long hrrq_flags;
5369 int completed_ops;
5370 LIST_HEAD(doneq);
5371
5372 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5373 ioa_cfg = hrrq->ioa_cfg;
5374
5375 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5376 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5377
5378 if (completed_ops < budget)
5379 blk_iopoll_complete(iop);
5380 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5381
5382 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5383 list_del(&ipr_cmd->queue);
5384 del_timer(&ipr_cmd->timer);
5385 ipr_cmd->fast_done(ipr_cmd);
5386 }
5387
5388 return completed_ops;
5389}
5390
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005391/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005392 * ipr_isr - Interrupt service routine
5393 * @irq: irq number
5394 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005395 *
5396 * Return value:
5397 * IRQ_NONE / IRQ_HANDLED
5398 **/
David Howells7d12e782006-10-05 14:55:46 +01005399static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005400{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005401 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5402 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005403 unsigned long hrrq_flags = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005404 u32 int_reg = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005405 int num_hrrq = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005406 int irq_none = 0;
Brian King172cd6e2012-07-17 08:14:40 -05005407 struct ipr_cmnd *ipr_cmd, *temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005408 irqreturn_t rc = IRQ_NONE;
Brian King172cd6e2012-07-17 08:14:40 -05005409 LIST_HEAD(doneq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005410
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005411 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005412 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005413 if (!hrrq->allow_interrupts) {
5414 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005415 return IRQ_NONE;
5416 }
5417
Linus Torvalds1da177e2005-04-16 15:20:36 -07005418 while (1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005419 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5420 rc = IRQ_HANDLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005421
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005422 if (!ioa_cfg->clear_isr)
5423 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425 /* Clear the PCI interrupt */
Wayne Boyera5442ba2011-05-17 09:18:53 -07005426 num_hrrq = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005427 do {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005428 writel(IPR_PCII_HRRQ_UPDATED,
5429 ioa_cfg->regs.clr_interrupt_reg32);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005430 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005431 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005432 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005433
Wayne Boyer7dacb642011-04-12 10:29:02 -07005434 } else if (rc == IRQ_NONE && irq_none == 0) {
5435 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5436 irq_none++;
Wayne Boyera5442ba2011-05-17 09:18:53 -07005437 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5438 int_reg & IPR_PCII_HRRQ_UPDATED) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005439 ipr_isr_eh(ioa_cfg,
5440 "Error clearing HRRQ: ", num_hrrq);
Brian King172cd6e2012-07-17 08:14:40 -05005441 rc = IRQ_HANDLED;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005442 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443 } else
5444 break;
5445 }
5446
5447 if (unlikely(rc == IRQ_NONE))
Wayne Boyer634651f2010-08-27 14:45:07 -07005448 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005449
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005450 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05005451 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5452 list_del(&ipr_cmd->queue);
5453 del_timer(&ipr_cmd->timer);
5454 ipr_cmd->fast_done(ipr_cmd);
5455 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005456 return rc;
5457}
Brian King172cd6e2012-07-17 08:14:40 -05005458
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005459/**
5460 * ipr_isr_mhrrq - Interrupt service routine
5461 * @irq: irq number
5462 * @devp: pointer to ioa config struct
5463 *
5464 * Return value:
5465 * IRQ_NONE / IRQ_HANDLED
5466 **/
5467static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5468{
5469 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005470 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005471 unsigned long hrrq_flags = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005472 struct ipr_cmnd *ipr_cmd, *temp;
5473 irqreturn_t rc = IRQ_NONE;
5474 LIST_HEAD(doneq);
5475
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005476 spin_lock_irqsave(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005477
5478 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005479 if (!hrrq->allow_interrupts) {
5480 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005481 return IRQ_NONE;
5482 }
5483
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005484 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
5485 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5486 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5487 hrrq->toggle_bit) {
5488 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5489 blk_iopoll_sched(&hrrq->iopoll);
5490 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5491 return IRQ_HANDLED;
5492 }
5493 } else {
5494 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5495 hrrq->toggle_bit)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005496
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005497 if (ipr_process_hrrq(hrrq, -1, &doneq))
5498 rc = IRQ_HANDLED;
5499 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005500
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005501 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005502
5503 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5504 list_del(&ipr_cmd->queue);
5505 del_timer(&ipr_cmd->timer);
5506 ipr_cmd->fast_done(ipr_cmd);
5507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005508 return rc;
5509}
5510
5511/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005512 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07005513 * @ioa_cfg: ioa config struct
5514 * @ipr_cmd: ipr command struct
5515 *
5516 * Return value:
5517 * 0 on success / -1 on failure
5518 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08005519static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5520 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005521{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005522 int i, nseg;
5523 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005524 u32 length;
5525 u32 ioadl_flags = 0;
5526 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5527 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005528 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005529
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005530 length = scsi_bufflen(scsi_cmd);
5531 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005532 return 0;
5533
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005534 nseg = scsi_dma_map(scsi_cmd);
5535 if (nseg < 0) {
Anton Blanchard51f52a42011-05-09 10:07:40 +10005536 if (printk_ratelimit())
5537 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005538 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005539 }
5540
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005541 ipr_cmd->dma_use_sg = nseg;
5542
Wayne Boyer438b0332010-05-10 09:13:00 -07005543 ioarcb->data_transfer_length = cpu_to_be32(length);
Wayne Boyerb8803b12010-05-14 08:55:13 -07005544 ioarcb->ioadl_len =
5545 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
Wayne Boyer438b0332010-05-10 09:13:00 -07005546
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005547 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5548 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5549 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005550 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5551 ioadl_flags = IPR_IOADL_FLAGS_READ;
5552
5553 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5554 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5555 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5556 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5557 }
5558
5559 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5560 return 0;
5561}
5562
5563/**
5564 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5565 * @ioa_cfg: ioa config struct
5566 * @ipr_cmd: ipr command struct
5567 *
5568 * Return value:
5569 * 0 on success / -1 on failure
5570 **/
5571static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5572 struct ipr_cmnd *ipr_cmd)
5573{
5574 int i, nseg;
5575 struct scatterlist *sg;
5576 u32 length;
5577 u32 ioadl_flags = 0;
5578 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5579 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5580 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5581
5582 length = scsi_bufflen(scsi_cmd);
5583 if (!length)
5584 return 0;
5585
5586 nseg = scsi_dma_map(scsi_cmd);
5587 if (nseg < 0) {
5588 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5589 return -1;
5590 }
5591
5592 ipr_cmd->dma_use_sg = nseg;
5593
5594 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5595 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5596 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5597 ioarcb->data_transfer_length = cpu_to_be32(length);
5598 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005599 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5600 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5601 ioadl_flags = IPR_IOADL_FLAGS_READ;
5602 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5603 ioarcb->read_ioadl_len =
5604 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5605 }
5606
Wayne Boyera32c0552010-02-19 13:23:36 -08005607 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5608 ioadl = ioarcb->u.add_data.u.ioadl;
5609 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5610 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005611 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5612 }
5613
5614 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5615 ioadl[i].flags_and_data_len =
5616 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5617 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5618 }
5619
5620 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5621 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005622}
5623
5624/**
5625 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5626 * @scsi_cmd: scsi command struct
5627 *
5628 * Return value:
5629 * task attributes
5630 **/
5631static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5632{
5633 u8 tag[2];
5634 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5635
5636 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5637 switch (tag[0]) {
5638 case MSG_SIMPLE_TAG:
5639 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5640 break;
5641 case MSG_HEAD_TAG:
5642 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5643 break;
5644 case MSG_ORDERED_TAG:
5645 rc = IPR_FLAGS_LO_ORDERED_TASK;
5646 break;
5647 };
5648 }
5649
5650 return rc;
5651}
5652
5653/**
5654 * ipr_erp_done - Process completion of ERP for a device
5655 * @ipr_cmd: ipr command struct
5656 *
5657 * This function copies the sense buffer into the scsi_cmd
5658 * struct and pushes the scsi_done function.
5659 *
5660 * Return value:
5661 * nothing
5662 **/
5663static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5664{
5665 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5666 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005667 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005668
5669 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5670 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005671 scmd_printk(KERN_ERR, scsi_cmd,
5672 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005673 } else {
5674 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5675 SCSI_SENSE_BUFFERSIZE);
5676 }
5677
5678 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005679 if (!ipr_is_naca_model(res))
5680 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005681 res->in_erp = 0;
5682 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005683 scsi_dma_unmap(ipr_cmd->scsi_cmd);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005684 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005685 scsi_cmd->scsi_done(scsi_cmd);
5686}
5687
5688/**
5689 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5690 * @ipr_cmd: ipr command struct
5691 *
5692 * Return value:
5693 * none
5694 **/
5695static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5696{
Brian King51b1c7e2007-03-29 12:43:50 -05005697 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005698 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08005699 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005700
5701 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08005702 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005703 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005704 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005705 ioarcb->read_ioadl_len = 0;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005706 ioasa->hdr.ioasc = 0;
5707 ioasa->hdr.residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005708
5709 if (ipr_cmd->ioa_cfg->sis64)
5710 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5711 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5712 else {
5713 ioarcb->write_ioadl_addr =
5714 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5715 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5716 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005717}
5718
5719/**
5720 * ipr_erp_request_sense - Send request sense to a device
5721 * @ipr_cmd: ipr command struct
5722 *
5723 * This function sends a request sense to a device as a result
5724 * of a check condition.
5725 *
5726 * Return value:
5727 * nothing
5728 **/
5729static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5730{
5731 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005732 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005733
5734 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5735 ipr_erp_done(ipr_cmd);
5736 return;
5737 }
5738
5739 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5740
5741 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5742 cmd_pkt->cdb[0] = REQUEST_SENSE;
5743 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5744 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5745 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5746 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5747
Wayne Boyera32c0552010-02-19 13:23:36 -08005748 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5749 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005750
5751 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5752 IPR_REQUEST_SENSE_TIMEOUT * 2);
5753}
5754
5755/**
5756 * ipr_erp_cancel_all - Send cancel all to a device
5757 * @ipr_cmd: ipr command struct
5758 *
5759 * This function sends a cancel all to a device to clear the
5760 * queue. If we are running TCQ on the device, QERR is set to 1,
5761 * which means all outstanding ops have been dropped on the floor.
5762 * Cancel all will return them to us.
5763 *
5764 * Return value:
5765 * nothing
5766 **/
5767static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5768{
5769 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5770 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5771 struct ipr_cmd_pkt *cmd_pkt;
5772
5773 res->in_erp = 1;
5774
5775 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5776
5777 if (!scsi_get_tag_type(scsi_cmd->device)) {
5778 ipr_erp_request_sense(ipr_cmd);
5779 return;
5780 }
5781
5782 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5783 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5784 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5785
5786 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5787 IPR_CANCEL_ALL_TIMEOUT);
5788}
5789
5790/**
5791 * ipr_dump_ioasa - Dump contents of IOASA
5792 * @ioa_cfg: ioa config struct
5793 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06005794 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005795 *
5796 * This function is invoked by the interrupt handler when ops
5797 * fail. It will log the IOASA if appropriate. Only called
5798 * for GPDD ops.
5799 *
5800 * Return value:
5801 * none
5802 **/
5803static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06005804 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005805{
5806 int i;
5807 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05005808 u32 ioasc, fd_ioasc;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005809 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005810 __be32 *ioasa_data = (__be32 *)ioasa;
5811 int error_index;
5812
Wayne Boyer96d21f02010-05-10 09:13:27 -07005813 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5814 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005815
5816 if (0 == ioasc)
5817 return;
5818
5819 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5820 return;
5821
Brian Kingb0692dd2007-03-29 12:43:09 -05005822 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5823 error_index = ipr_get_error(fd_ioasc);
5824 else
5825 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005826
5827 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5828 /* Don't log an error if the IOA already logged one */
Wayne Boyer96d21f02010-05-10 09:13:27 -07005829 if (ioasa->hdr.ilid != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005830 return;
5831
Brian Kingcc9bd5d2007-03-29 12:43:01 -05005832 if (!ipr_is_gscsi(res))
5833 return;
5834
Linus Torvalds1da177e2005-04-16 15:20:36 -07005835 if (ipr_error_table[error_index].log_ioasa == 0)
5836 return;
5837 }
5838
Brian Kingfe964d02006-03-29 09:37:29 -06005839 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005840
Wayne Boyer96d21f02010-05-10 09:13:27 -07005841 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5842 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5843 data_len = sizeof(struct ipr_ioasa64);
5844 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005845 data_len = sizeof(struct ipr_ioasa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005846
5847 ipr_err("IOASA Dump:\n");
5848
5849 for (i = 0; i < data_len / 4; i += 4) {
5850 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5851 be32_to_cpu(ioasa_data[i]),
5852 be32_to_cpu(ioasa_data[i+1]),
5853 be32_to_cpu(ioasa_data[i+2]),
5854 be32_to_cpu(ioasa_data[i+3]));
5855 }
5856}
5857
5858/**
5859 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5860 * @ioasa: IOASA
5861 * @sense_buf: sense data buffer
5862 *
5863 * Return value:
5864 * none
5865 **/
5866static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5867{
5868 u32 failing_lba;
5869 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5870 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005871 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5872 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005873
5874 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5875
5876 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5877 return;
5878
5879 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5880
5881 if (ipr_is_vset_device(res) &&
5882 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5883 ioasa->u.vset.failing_lba_hi != 0) {
5884 sense_buf[0] = 0x72;
5885 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5886 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5887 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5888
5889 sense_buf[7] = 12;
5890 sense_buf[8] = 0;
5891 sense_buf[9] = 0x0A;
5892 sense_buf[10] = 0x80;
5893
5894 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5895
5896 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5897 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5898 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5899 sense_buf[15] = failing_lba & 0x000000ff;
5900
5901 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5902
5903 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5904 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5905 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5906 sense_buf[19] = failing_lba & 0x000000ff;
5907 } else {
5908 sense_buf[0] = 0x70;
5909 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5910 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5911 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5912
5913 /* Illegal request */
5914 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
Wayne Boyer96d21f02010-05-10 09:13:27 -07005915 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005916 sense_buf[7] = 10; /* additional length */
5917
5918 /* IOARCB was in error */
5919 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5920 sense_buf[15] = 0xC0;
5921 else /* Parameter data was invalid */
5922 sense_buf[15] = 0x80;
5923
5924 sense_buf[16] =
5925 ((IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07005926 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005927 sense_buf[17] =
5928 (IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07005929 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005930 } else {
5931 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5932 if (ipr_is_vset_device(res))
5933 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5934 else
5935 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5936
5937 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5938 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5939 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5940 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5941 sense_buf[6] = failing_lba & 0x000000ff;
5942 }
5943
5944 sense_buf[7] = 6; /* additional length */
5945 }
5946 }
5947}
5948
5949/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005950 * ipr_get_autosense - Copy autosense data to sense buffer
5951 * @ipr_cmd: ipr command struct
5952 *
5953 * This function copies the autosense buffer to the buffer
5954 * in the scsi_cmd, if there is autosense available.
5955 *
5956 * Return value:
5957 * 1 if autosense was available / 0 if not
5958 **/
5959static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5960{
Wayne Boyer96d21f02010-05-10 09:13:27 -07005961 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5962 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005963
Wayne Boyer96d21f02010-05-10 09:13:27 -07005964 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005965 return 0;
5966
Wayne Boyer96d21f02010-05-10 09:13:27 -07005967 if (ipr_cmd->ioa_cfg->sis64)
5968 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5969 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5970 SCSI_SENSE_BUFFERSIZE));
5971 else
5972 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5973 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5974 SCSI_SENSE_BUFFERSIZE));
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005975 return 1;
5976}
5977
5978/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005979 * ipr_erp_start - Process an error response for a SCSI op
5980 * @ioa_cfg: ioa config struct
5981 * @ipr_cmd: ipr command struct
5982 *
5983 * This function determines whether or not to initiate ERP
5984 * on the affected device.
5985 *
5986 * Return value:
5987 * nothing
5988 **/
5989static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5990 struct ipr_cmnd *ipr_cmd)
5991{
5992 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5993 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005994 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King8a048992007-04-26 16:00:10 -05005995 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005996
5997 if (!res) {
5998 ipr_scsi_eh_done(ipr_cmd);
5999 return;
6000 }
6001
Brian King8a048992007-04-26 16:00:10 -05006002 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006003 ipr_gen_sense(ipr_cmd);
6004
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006005 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6006
Brian King8a048992007-04-26 16:00:10 -05006007 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006008 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006009 if (ipr_is_naca_model(res))
6010 scsi_cmd->result |= (DID_ABORT << 16);
6011 else
6012 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006013 break;
6014 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006015 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006016 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6017 break;
6018 case IPR_IOASC_HW_SEL_TIMEOUT:
6019 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006020 if (!ipr_is_naca_model(res))
6021 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006022 break;
6023 case IPR_IOASC_SYNC_REQUIRED:
6024 if (!res->in_erp)
6025 res->needs_sync_complete = 1;
6026 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6027 break;
6028 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006029 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006030 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6031 break;
6032 case IPR_IOASC_BUS_WAS_RESET:
6033 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6034 /*
6035 * Report the bus reset and ask for a retry. The device
6036 * will give CC/UA the next command.
6037 */
6038 if (!res->resetting_device)
6039 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6040 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006041 if (!ipr_is_naca_model(res))
6042 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006043 break;
6044 case IPR_IOASC_HW_DEV_BUS_STATUS:
6045 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6046 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006047 if (!ipr_get_autosense(ipr_cmd)) {
6048 if (!ipr_is_naca_model(res)) {
6049 ipr_erp_cancel_all(ipr_cmd);
6050 return;
6051 }
6052 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006053 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006054 if (!ipr_is_naca_model(res))
6055 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006056 break;
6057 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6058 break;
6059 default:
Brian King5b7304f2006-08-02 14:57:51 -05006060 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6061 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006062 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006063 res->needs_sync_complete = 1;
6064 break;
6065 }
6066
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006067 scsi_dma_unmap(ipr_cmd->scsi_cmd);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006068 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006069 scsi_cmd->scsi_done(scsi_cmd);
6070}
6071
6072/**
6073 * ipr_scsi_done - mid-layer done function
6074 * @ipr_cmd: ipr command struct
6075 *
6076 * This function is invoked by the interrupt handler for
6077 * ops generated by the SCSI mid-layer
6078 *
6079 * Return value:
6080 * none
6081 **/
6082static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6083{
6084 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6085 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006086 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006087 unsigned long hrrq_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006088
Wayne Boyer96d21f02010-05-10 09:13:27 -07006089 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006090
6091 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
Brian King172cd6e2012-07-17 08:14:40 -05006092 scsi_dma_unmap(scsi_cmd);
6093
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006094 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006095 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006096 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006097 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006098 } else {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006099 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006100 ipr_erp_start(ioa_cfg, ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006101 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006102 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006103}
6104
6105/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006106 * ipr_queuecommand - Queue a mid-layer request
Brian King00bfef22012-07-17 08:13:52 -05006107 * @shost: scsi host struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006108 * @scsi_cmd: scsi command struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006109 *
6110 * This function queues a request generated by the mid-layer.
6111 *
6112 * Return value:
6113 * 0 on success
6114 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6115 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6116 **/
Brian King00bfef22012-07-17 08:13:52 -05006117static int ipr_queuecommand(struct Scsi_Host *shost,
6118 struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006119{
6120 struct ipr_ioa_cfg *ioa_cfg;
6121 struct ipr_resource_entry *res;
6122 struct ipr_ioarcb *ioarcb;
6123 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006124 unsigned long hrrq_flags, lock_flags;
Dan Carpenterd12f1572012-07-30 11:18:22 +03006125 int rc;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006126 struct ipr_hrr_queue *hrrq;
6127 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006128
Brian King00bfef22012-07-17 08:13:52 -05006129 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6130
Linus Torvalds1da177e2005-04-16 15:20:36 -07006131 scsi_cmd->result = (DID_OK << 16);
Brian King00bfef22012-07-17 08:13:52 -05006132 res = scsi_cmd->device->hostdata;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006133
6134 if (ipr_is_gata(res) && res->sata_port) {
6135 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6136 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6138 return rc;
6139 }
6140
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006141 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6142 hrrq = &ioa_cfg->hrrq[hrrq_id];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006143
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006144 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006145 /*
6146 * We are currently blocking all devices due to a host reset
6147 * We have told the host to stop giving us new requests, but
6148 * ERP ops don't count. FIXME
6149 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006150 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead)) {
6151 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006152 return SCSI_MLQUEUE_HOST_BUSY;
Brian King00bfef22012-07-17 08:13:52 -05006153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006154
6155 /*
6156 * FIXME - Create scsi_set_host_offline interface
6157 * and the ioa_is_dead check can be removed
6158 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006159 if (unlikely(hrrq->ioa_is_dead || !res)) {
6160 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006161 goto err_nodev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006162 }
6163
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006164 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6165 if (ipr_cmd == NULL) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006166 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006167 return SCSI_MLQUEUE_HOST_BUSY;
6168 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006169 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006170
Brian King172cd6e2012-07-17 08:14:40 -05006171 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006172 ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006173
6174 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6175 ipr_cmd->scsi_cmd = scsi_cmd;
Brian King172cd6e2012-07-17 08:14:40 -05006176 ipr_cmd->done = ipr_scsi_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006177
6178 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6179 if (scsi_cmd->underflow == 0)
6180 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6181
Linus Torvalds1da177e2005-04-16 15:20:36 -07006182 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
Wayne Boyerab6c10b2011-03-31 09:56:10 -07006183 if (ipr_is_gscsi(res))
6184 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006185 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6186 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6187 }
6188
6189 if (scsi_cmd->cmnd[0] >= 0xC0 &&
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006190 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006191 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006193
Dan Carpenterd12f1572012-07-30 11:18:22 +03006194 if (ioa_cfg->sis64)
6195 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6196 else
6197 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006198
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006199 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6200 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006201 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006202 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006203 if (!rc)
6204 scsi_dma_unmap(scsi_cmd);
Brian Kinga5fb4072012-03-14 21:20:09 -05006205 return SCSI_MLQUEUE_HOST_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006206 }
6207
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006208 if (unlikely(hrrq->ioa_is_dead)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006209 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006210 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006211 scsi_dma_unmap(scsi_cmd);
6212 goto err_nodev;
6213 }
6214
6215 ioarcb->res_handle = res->res_handle;
6216 if (res->needs_sync_complete) {
6217 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6218 res->needs_sync_complete = 0;
6219 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006220 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
Brian King00bfef22012-07-17 08:13:52 -05006221 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian Kinga5fb4072012-03-14 21:20:09 -05006222 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006223 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006224 return 0;
6225
6226err_nodev:
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006227 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006228 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6229 scsi_cmd->result = (DID_NO_CONNECT << 16);
6230 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006231 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006232 return 0;
6233}
6234
6235/**
Brian King35a39692006-09-25 12:39:20 -05006236 * ipr_ioctl - IOCTL handler
6237 * @sdev: scsi device struct
6238 * @cmd: IOCTL cmd
6239 * @arg: IOCTL arg
6240 *
6241 * Return value:
6242 * 0 on success / other on failure
6243 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06006244static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05006245{
6246 struct ipr_resource_entry *res;
6247
6248 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05006249 if (res && ipr_is_gata(res)) {
6250 if (cmd == HDIO_GET_IDENTITY)
6251 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05006252 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05006253 }
Brian King35a39692006-09-25 12:39:20 -05006254
6255 return -EINVAL;
6256}
6257
6258/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006259 * ipr_info - Get information about the card/driver
6260 * @scsi_host: scsi host struct
6261 *
6262 * Return value:
6263 * pointer to buffer with description string
6264 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006265static const char *ipr_ioa_info(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006266{
6267 static char buffer[512];
6268 struct ipr_ioa_cfg *ioa_cfg;
6269 unsigned long lock_flags = 0;
6270
6271 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6272
6273 spin_lock_irqsave(host->host_lock, lock_flags);
6274 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6275 spin_unlock_irqrestore(host->host_lock, lock_flags);
6276
6277 return buffer;
6278}
6279
6280static struct scsi_host_template driver_template = {
6281 .module = THIS_MODULE,
6282 .name = "IPR",
6283 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05006284 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006285 .queuecommand = ipr_queuecommand,
6286 .eh_abort_handler = ipr_eh_abort,
6287 .eh_device_reset_handler = ipr_eh_dev_reset,
6288 .eh_host_reset_handler = ipr_eh_host_reset,
6289 .slave_alloc = ipr_slave_alloc,
6290 .slave_configure = ipr_slave_configure,
6291 .slave_destroy = ipr_slave_destroy,
Brian King35a39692006-09-25 12:39:20 -05006292 .target_alloc = ipr_target_alloc,
6293 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006294 .change_queue_depth = ipr_change_queue_depth,
6295 .change_queue_type = ipr_change_queue_type,
6296 .bios_param = ipr_biosparam,
6297 .can_queue = IPR_MAX_COMMANDS,
6298 .this_id = -1,
6299 .sg_tablesize = IPR_MAX_SGLIST,
6300 .max_sectors = IPR_IOA_MAX_SECTORS,
6301 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6302 .use_clustering = ENABLE_CLUSTERING,
6303 .shost_attrs = ipr_ioa_attrs,
6304 .sdev_attrs = ipr_dev_attrs,
6305 .proc_name = IPR_NAME
6306};
6307
Brian King35a39692006-09-25 12:39:20 -05006308/**
6309 * ipr_ata_phy_reset - libata phy_reset handler
6310 * @ap: ata port to reset
6311 *
6312 **/
6313static void ipr_ata_phy_reset(struct ata_port *ap)
6314{
6315 unsigned long flags;
6316 struct ipr_sata_port *sata_port = ap->private_data;
6317 struct ipr_resource_entry *res = sata_port->res;
6318 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6319 int rc;
6320
6321 ENTER;
6322 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006323 while (ioa_cfg->in_reset_reload) {
Brian King35a39692006-09-25 12:39:20 -05006324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6325 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6326 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6327 }
6328
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006329 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Brian King35a39692006-09-25 12:39:20 -05006330 goto out_unlock;
6331
6332 rc = ipr_device_reset(ioa_cfg, res);
6333
6334 if (rc) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02006335 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006336 goto out_unlock;
6337 }
6338
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006339 ap->link.device[0].class = res->ata_class;
6340 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Tejun Heo3e4ec342010-05-10 21:41:30 +02006341 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006342
6343out_unlock:
6344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6345 LEAVE;
6346}
6347
6348/**
6349 * ipr_ata_post_internal - Cleanup after an internal command
6350 * @qc: ATA queued command
6351 *
6352 * Return value:
6353 * none
6354 **/
6355static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6356{
6357 struct ipr_sata_port *sata_port = qc->ap->private_data;
6358 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6359 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006360 struct ipr_hrr_queue *hrrq;
Brian King35a39692006-09-25 12:39:20 -05006361 unsigned long flags;
6362
6363 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006364 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06006365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6366 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6367 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6368 }
6369
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006370 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006371 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006372 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6373 if (ipr_cmd->qc == qc) {
6374 ipr_device_reset(ioa_cfg, sata_port->res);
6375 break;
6376 }
Brian King35a39692006-09-25 12:39:20 -05006377 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006378 spin_unlock(&hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006379 }
6380 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6381}
6382
6383/**
Brian King35a39692006-09-25 12:39:20 -05006384 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6385 * @regs: destination
6386 * @tf: source ATA taskfile
6387 *
6388 * Return value:
6389 * none
6390 **/
6391static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6392 struct ata_taskfile *tf)
6393{
6394 regs->feature = tf->feature;
6395 regs->nsect = tf->nsect;
6396 regs->lbal = tf->lbal;
6397 regs->lbam = tf->lbam;
6398 regs->lbah = tf->lbah;
6399 regs->device = tf->device;
6400 regs->command = tf->command;
6401 regs->hob_feature = tf->hob_feature;
6402 regs->hob_nsect = tf->hob_nsect;
6403 regs->hob_lbal = tf->hob_lbal;
6404 regs->hob_lbam = tf->hob_lbam;
6405 regs->hob_lbah = tf->hob_lbah;
6406 regs->ctl = tf->ctl;
6407}
6408
6409/**
6410 * ipr_sata_done - done function for SATA commands
6411 * @ipr_cmd: ipr command struct
6412 *
6413 * This function is invoked by the interrupt handler for
6414 * ops generated by the SCSI mid-layer to SATA devices
6415 *
6416 * Return value:
6417 * none
6418 **/
6419static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6420{
6421 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6422 struct ata_queued_cmd *qc = ipr_cmd->qc;
6423 struct ipr_sata_port *sata_port = qc->ap->private_data;
6424 struct ipr_resource_entry *res = sata_port->res;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006425 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King35a39692006-09-25 12:39:20 -05006426
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006427 spin_lock(&ipr_cmd->hrrq->_lock);
Wayne Boyer96d21f02010-05-10 09:13:27 -07006428 if (ipr_cmd->ioa_cfg->sis64)
6429 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6430 sizeof(struct ipr_ioasa_gata));
6431 else
6432 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6433 sizeof(struct ipr_ioasa_gata));
Brian King35a39692006-09-25 12:39:20 -05006434 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6435
Wayne Boyer96d21f02010-05-10 09:13:27 -07006436 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006437 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05006438
6439 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
Wayne Boyer96d21f02010-05-10 09:13:27 -07006440 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
Brian King35a39692006-09-25 12:39:20 -05006441 else
Wayne Boyer96d21f02010-05-10 09:13:27 -07006442 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006443 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006444 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006445 ata_qc_complete(qc);
6446}
6447
6448/**
Wayne Boyera32c0552010-02-19 13:23:36 -08006449 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6450 * @ipr_cmd: ipr command struct
6451 * @qc: ATA queued command
6452 *
6453 **/
6454static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6455 struct ata_queued_cmd *qc)
6456{
6457 u32 ioadl_flags = 0;
6458 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6459 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6460 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6461 int len = qc->nbytes;
6462 struct scatterlist *sg;
6463 unsigned int si;
6464 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6465
6466 if (len == 0)
6467 return;
6468
6469 if (qc->dma_dir == DMA_TO_DEVICE) {
6470 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6471 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6472 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6473 ioadl_flags = IPR_IOADL_FLAGS_READ;
6474
6475 ioarcb->data_transfer_length = cpu_to_be32(len);
6476 ioarcb->ioadl_len =
6477 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6478 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6479 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6480
6481 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6482 ioadl64->flags = cpu_to_be32(ioadl_flags);
6483 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6484 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6485
6486 last_ioadl64 = ioadl64;
6487 ioadl64++;
6488 }
6489
6490 if (likely(last_ioadl64))
6491 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6492}
6493
6494/**
Brian King35a39692006-09-25 12:39:20 -05006495 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6496 * @ipr_cmd: ipr command struct
6497 * @qc: ATA queued command
6498 *
6499 **/
6500static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6501 struct ata_queued_cmd *qc)
6502{
6503 u32 ioadl_flags = 0;
6504 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08006505 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006506 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01006507 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05006508 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09006509 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05006510
6511 if (len == 0)
6512 return;
6513
6514 if (qc->dma_dir == DMA_TO_DEVICE) {
6515 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6516 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08006517 ioarcb->data_transfer_length = cpu_to_be32(len);
6518 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05006519 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6520 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6521 ioadl_flags = IPR_IOADL_FLAGS_READ;
6522 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6523 ioarcb->read_ioadl_len =
6524 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6525 }
6526
Tejun Heoff2aeb12007-12-05 16:43:11 +09006527 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05006528 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6529 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006530
6531 last_ioadl = ioadl;
6532 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05006533 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006534
6535 if (likely(last_ioadl))
6536 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05006537}
6538
6539/**
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006540 * ipr_qc_defer - Get a free ipr_cmd
6541 * @qc: queued command
6542 *
6543 * Return value:
6544 * 0 if success
6545 **/
6546static int ipr_qc_defer(struct ata_queued_cmd *qc)
6547{
6548 struct ata_port *ap = qc->ap;
6549 struct ipr_sata_port *sata_port = ap->private_data;
6550 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6551 struct ipr_cmnd *ipr_cmd;
6552 struct ipr_hrr_queue *hrrq;
6553 int hrrq_id;
6554
6555 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6556 hrrq = &ioa_cfg->hrrq[hrrq_id];
6557
6558 qc->lldd_task = NULL;
6559 spin_lock(&hrrq->_lock);
6560 if (unlikely(hrrq->ioa_is_dead)) {
6561 spin_unlock(&hrrq->_lock);
6562 return 0;
6563 }
6564
6565 if (unlikely(!hrrq->allow_cmds)) {
6566 spin_unlock(&hrrq->_lock);
6567 return ATA_DEFER_LINK;
6568 }
6569
6570 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6571 if (ipr_cmd == NULL) {
6572 spin_unlock(&hrrq->_lock);
6573 return ATA_DEFER_LINK;
6574 }
6575
6576 qc->lldd_task = ipr_cmd;
6577 spin_unlock(&hrrq->_lock);
6578 return 0;
6579}
6580
6581/**
Brian King35a39692006-09-25 12:39:20 -05006582 * ipr_qc_issue - Issue a SATA qc to a device
6583 * @qc: queued command
6584 *
6585 * Return value:
6586 * 0 if success
6587 **/
6588static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6589{
6590 struct ata_port *ap = qc->ap;
6591 struct ipr_sata_port *sata_port = ap->private_data;
6592 struct ipr_resource_entry *res = sata_port->res;
6593 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6594 struct ipr_cmnd *ipr_cmd;
6595 struct ipr_ioarcb *ioarcb;
6596 struct ipr_ioarcb_ata_regs *regs;
6597
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006598 if (qc->lldd_task == NULL)
6599 ipr_qc_defer(qc);
6600
6601 ipr_cmd = qc->lldd_task;
6602 if (ipr_cmd == NULL)
Brian King0feeed82007-03-29 12:43:43 -05006603 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05006604
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006605 qc->lldd_task = NULL;
6606 spin_lock(&ipr_cmd->hrrq->_lock);
6607 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6608 ipr_cmd->hrrq->ioa_is_dead)) {
6609 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6610 spin_unlock(&ipr_cmd->hrrq->_lock);
6611 return AC_ERR_SYSTEM;
6612 }
6613
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006614 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King35a39692006-09-25 12:39:20 -05006615 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05006616
Wayne Boyera32c0552010-02-19 13:23:36 -08006617 if (ioa_cfg->sis64) {
6618 regs = &ipr_cmd->i.ata_ioadl.regs;
6619 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6620 } else
6621 regs = &ioarcb->u.add_data.u.regs;
6622
6623 memset(regs, 0, sizeof(*regs));
6624 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05006625
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006626 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Brian King35a39692006-09-25 12:39:20 -05006627 ipr_cmd->qc = qc;
6628 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006629 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05006630 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6631 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6632 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01006633 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05006634
Wayne Boyera32c0552010-02-19 13:23:36 -08006635 if (ioa_cfg->sis64)
6636 ipr_build_ata_ioadl64(ipr_cmd, qc);
6637 else
6638 ipr_build_ata_ioadl(ipr_cmd, qc);
6639
Brian King35a39692006-09-25 12:39:20 -05006640 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6641 ipr_copy_sata_tf(regs, &qc->tf);
6642 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006643 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05006644
6645 switch (qc->tf.protocol) {
6646 case ATA_PROT_NODATA:
6647 case ATA_PROT_PIO:
6648 break;
6649
6650 case ATA_PROT_DMA:
6651 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6652 break;
6653
Tejun Heo0dc36882007-12-18 16:34:43 -05006654 case ATAPI_PROT_PIO:
6655 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05006656 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6657 break;
6658
Tejun Heo0dc36882007-12-18 16:34:43 -05006659 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05006660 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6661 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6662 break;
6663
6664 default:
6665 WARN_ON(1);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006666 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King0feeed82007-03-29 12:43:43 -05006667 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05006668 }
6669
Wayne Boyera32c0552010-02-19 13:23:36 -08006670 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006671 spin_unlock(&ipr_cmd->hrrq->_lock);
Wayne Boyera32c0552010-02-19 13:23:36 -08006672
Brian King35a39692006-09-25 12:39:20 -05006673 return 0;
6674}
6675
6676/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006677 * ipr_qc_fill_rtf - Read result TF
6678 * @qc: ATA queued command
6679 *
6680 * Return value:
6681 * true
6682 **/
6683static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6684{
6685 struct ipr_sata_port *sata_port = qc->ap->private_data;
6686 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6687 struct ata_taskfile *tf = &qc->result_tf;
6688
6689 tf->feature = g->error;
6690 tf->nsect = g->nsect;
6691 tf->lbal = g->lbal;
6692 tf->lbam = g->lbam;
6693 tf->lbah = g->lbah;
6694 tf->device = g->device;
6695 tf->command = g->status;
6696 tf->hob_nsect = g->hob_nsect;
6697 tf->hob_lbal = g->hob_lbal;
6698 tf->hob_lbam = g->hob_lbam;
6699 tf->hob_lbah = g->hob_lbah;
6700 tf->ctl = g->alt_status;
6701
6702 return true;
6703}
6704
Brian King35a39692006-09-25 12:39:20 -05006705static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05006706 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09006707 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05006708 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05006709 .qc_prep = ata_noop_qc_prep,
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006710 .qc_defer = ipr_qc_defer,
Brian King35a39692006-09-25 12:39:20 -05006711 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006712 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05006713 .port_start = ata_sas_port_start,
6714 .port_stop = ata_sas_port_stop
6715};
6716
6717static struct ata_port_info sata_port_info = {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +03006718 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
Sergei Shtylyov0f2e0332011-01-21 20:32:01 +03006719 .pio_mask = ATA_PIO4_ONLY,
6720 .mwdma_mask = ATA_MWDMA2,
6721 .udma_mask = ATA_UDMA6,
Brian King35a39692006-09-25 12:39:20 -05006722 .port_ops = &ipr_sata_ops
6723};
6724
Linus Torvalds1da177e2005-04-16 15:20:36 -07006725#ifdef CONFIG_PPC_PSERIES
6726static const u16 ipr_blocked_processors[] = {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00006727 PVR_NORTHSTAR,
6728 PVR_PULSAR,
6729 PVR_POWER4,
6730 PVR_ICESTAR,
6731 PVR_SSTAR,
6732 PVR_POWER4p,
6733 PVR_630,
6734 PVR_630p
Linus Torvalds1da177e2005-04-16 15:20:36 -07006735};
6736
6737/**
6738 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6739 * @ioa_cfg: ioa cfg struct
6740 *
6741 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6742 * certain pSeries hardware. This function determines if the given
6743 * adapter is in one of these confgurations or not.
6744 *
6745 * Return value:
6746 * 1 if adapter is not supported / 0 if adapter is supported
6747 **/
6748static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6749{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006750 int i;
6751
Auke Kok44c10132007-06-08 15:46:36 -07006752 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006753 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00006754 if (pvr_version_is(ipr_blocked_processors[i]))
Auke Kok44c10132007-06-08 15:46:36 -07006755 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006756 }
6757 }
6758 return 0;
6759}
6760#else
6761#define ipr_invalid_adapter(ioa_cfg) 0
6762#endif
6763
6764/**
6765 * ipr_ioa_bringdown_done - IOA bring down completion.
6766 * @ipr_cmd: ipr command struct
6767 *
6768 * This function processes the completion of an adapter bring down.
6769 * It wakes any reset sleepers.
6770 *
6771 * Return value:
6772 * IPR_RC_JOB_RETURN
6773 **/
6774static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6775{
6776 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6777
6778 ENTER;
6779 ioa_cfg->in_reset_reload = 0;
6780 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006781 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006782 wake_up_all(&ioa_cfg->reset_wait_q);
6783
6784 spin_unlock_irq(ioa_cfg->host->host_lock);
6785 scsi_unblock_requests(ioa_cfg->host);
6786 spin_lock_irq(ioa_cfg->host->host_lock);
6787 LEAVE;
6788
6789 return IPR_RC_JOB_RETURN;
6790}
6791
6792/**
6793 * ipr_ioa_reset_done - IOA reset completion.
6794 * @ipr_cmd: ipr command struct
6795 *
6796 * This function processes the completion of an adapter reset.
6797 * It schedules any necessary mid-layer add/removes and
6798 * wakes any reset sleepers.
6799 *
6800 * Return value:
6801 * IPR_RC_JOB_RETURN
6802 **/
6803static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6804{
6805 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6806 struct ipr_resource_entry *res;
6807 struct ipr_hostrcb *hostrcb, *temp;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006808 int i = 0, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006809
6810 ENTER;
6811 ioa_cfg->in_reset_reload = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006812 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6813 spin_lock(&ioa_cfg->hrrq[j]._lock);
6814 ioa_cfg->hrrq[j].allow_cmds = 1;
6815 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6816 }
6817 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006818 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06006819 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006820
6821 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6822 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6823 ipr_trace;
6824 break;
6825 }
6826 }
6827 schedule_work(&ioa_cfg->work_q);
6828
6829 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6830 list_del(&hostrcb->queue);
6831 if (i++ < IPR_NUM_LOG_HCAMS)
6832 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6833 else
6834 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6835 }
6836
Brian King6bb04172007-04-26 16:00:08 -05006837 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006838 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6839
6840 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006841 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006842 wake_up_all(&ioa_cfg->reset_wait_q);
6843
Mark Nelson30237852008-12-10 12:23:20 +11006844 spin_unlock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006845 scsi_unblock_requests(ioa_cfg->host);
Mark Nelson30237852008-12-10 12:23:20 +11006846 spin_lock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006847
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006848 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006849 scsi_block_requests(ioa_cfg->host);
6850
6851 LEAVE;
6852 return IPR_RC_JOB_RETURN;
6853}
6854
6855/**
6856 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6857 * @supported_dev: supported device struct
6858 * @vpids: vendor product id struct
6859 *
6860 * Return value:
6861 * none
6862 **/
6863static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6864 struct ipr_std_inq_vpids *vpids)
6865{
6866 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6867 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6868 supported_dev->num_records = 1;
6869 supported_dev->data_length =
6870 cpu_to_be16(sizeof(struct ipr_supported_device));
6871 supported_dev->reserved = 0;
6872}
6873
6874/**
6875 * ipr_set_supported_devs - Send Set Supported Devices for a device
6876 * @ipr_cmd: ipr command struct
6877 *
Wayne Boyera32c0552010-02-19 13:23:36 -08006878 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07006879 *
6880 * Return value:
6881 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6882 **/
6883static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6884{
6885 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6886 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006887 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6888 struct ipr_resource_entry *res = ipr_cmd->u.res;
6889
6890 ipr_cmd->job_step = ipr_ioa_reset_done;
6891
6892 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06006893 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006894 continue;
6895
6896 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006897 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006898
6899 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6900 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6901 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6902
6903 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006904 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006905 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6906 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6907
Wayne Boyera32c0552010-02-19 13:23:36 -08006908 ipr_init_ioadl(ipr_cmd,
6909 ioa_cfg->vpd_cbs_dma +
6910 offsetof(struct ipr_misc_cbs, supp_dev),
6911 sizeof(struct ipr_supported_device),
6912 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006913
6914 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6915 IPR_SET_SUP_DEVICE_TIMEOUT);
6916
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006917 if (!ioa_cfg->sis64)
6918 ipr_cmd->job_step = ipr_set_supported_devs;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006919 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006920 return IPR_RC_JOB_RETURN;
6921 }
6922
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006923 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006924 return IPR_RC_JOB_CONTINUE;
6925}
6926
6927/**
6928 * ipr_get_mode_page - Locate specified mode page
6929 * @mode_pages: mode page buffer
6930 * @page_code: page code to find
6931 * @len: minimum required length for mode page
6932 *
6933 * Return value:
6934 * pointer to mode page / NULL on failure
6935 **/
6936static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6937 u32 page_code, u32 len)
6938{
6939 struct ipr_mode_page_hdr *mode_hdr;
6940 u32 page_length;
6941 u32 length;
6942
6943 if (!mode_pages || (mode_pages->hdr.length == 0))
6944 return NULL;
6945
6946 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6947 mode_hdr = (struct ipr_mode_page_hdr *)
6948 (mode_pages->data + mode_pages->hdr.block_desc_len);
6949
6950 while (length) {
6951 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6952 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6953 return mode_hdr;
6954 break;
6955 } else {
6956 page_length = (sizeof(struct ipr_mode_page_hdr) +
6957 mode_hdr->page_length);
6958 length -= page_length;
6959 mode_hdr = (struct ipr_mode_page_hdr *)
6960 ((unsigned long)mode_hdr + page_length);
6961 }
6962 }
6963 return NULL;
6964}
6965
6966/**
6967 * ipr_check_term_power - Check for term power errors
6968 * @ioa_cfg: ioa config struct
6969 * @mode_pages: IOAFP mode pages buffer
6970 *
6971 * Check the IOAFP's mode page 28 for term power errors
6972 *
6973 * Return value:
6974 * nothing
6975 **/
6976static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6977 struct ipr_mode_pages *mode_pages)
6978{
6979 int i;
6980 int entry_length;
6981 struct ipr_dev_bus_entry *bus;
6982 struct ipr_mode_page28 *mode_page;
6983
6984 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6985 sizeof(struct ipr_mode_page28));
6986
6987 entry_length = mode_page->entry_length;
6988
6989 bus = mode_page->bus;
6990
6991 for (i = 0; i < mode_page->num_entries; i++) {
6992 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6993 dev_err(&ioa_cfg->pdev->dev,
6994 "Term power is absent on scsi bus %d\n",
6995 bus->res_addr.bus);
6996 }
6997
6998 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6999 }
7000}
7001
7002/**
7003 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7004 * @ioa_cfg: ioa config struct
7005 *
7006 * Looks through the config table checking for SES devices. If
7007 * the SES device is in the SES table indicating a maximum SCSI
7008 * bus speed, the speed is limited for the bus.
7009 *
7010 * Return value:
7011 * none
7012 **/
7013static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7014{
7015 u32 max_xfer_rate;
7016 int i;
7017
7018 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7019 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7020 ioa_cfg->bus_attr[i].bus_width);
7021
7022 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7023 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7024 }
7025}
7026
7027/**
7028 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7029 * @ioa_cfg: ioa config struct
7030 * @mode_pages: mode page 28 buffer
7031 *
7032 * Updates mode page 28 based on driver configuration
7033 *
7034 * Return value:
7035 * none
7036 **/
7037static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007038 struct ipr_mode_pages *mode_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007039{
7040 int i, entry_length;
7041 struct ipr_dev_bus_entry *bus;
7042 struct ipr_bus_attributes *bus_attr;
7043 struct ipr_mode_page28 *mode_page;
7044
7045 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7046 sizeof(struct ipr_mode_page28));
7047
7048 entry_length = mode_page->entry_length;
7049
7050 /* Loop for each device bus entry */
7051 for (i = 0, bus = mode_page->bus;
7052 i < mode_page->num_entries;
7053 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7054 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7055 dev_err(&ioa_cfg->pdev->dev,
7056 "Invalid resource address reported: 0x%08X\n",
7057 IPR_GET_PHYS_LOC(bus->res_addr));
7058 continue;
7059 }
7060
7061 bus_attr = &ioa_cfg->bus_attr[i];
7062 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7063 bus->bus_width = bus_attr->bus_width;
7064 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7065 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7066 if (bus_attr->qas_enabled)
7067 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7068 else
7069 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7070 }
7071}
7072
7073/**
7074 * ipr_build_mode_select - Build a mode select command
7075 * @ipr_cmd: ipr command struct
7076 * @res_handle: resource handle to send command to
7077 * @parm: Byte 2 of Mode Sense command
7078 * @dma_addr: DMA buffer address
7079 * @xfer_len: data transfer length
7080 *
7081 * Return value:
7082 * none
7083 **/
7084static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08007085 __be32 res_handle, u8 parm,
7086 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007087{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007088 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7089
7090 ioarcb->res_handle = res_handle;
7091 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7092 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7093 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7094 ioarcb->cmd_pkt.cdb[1] = parm;
7095 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7096
Wayne Boyera32c0552010-02-19 13:23:36 -08007097 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007098}
7099
7100/**
7101 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7102 * @ipr_cmd: ipr command struct
7103 *
7104 * This function sets up the SCSI bus attributes and sends
7105 * a Mode Select for Page 28 to activate them.
7106 *
7107 * Return value:
7108 * IPR_RC_JOB_RETURN
7109 **/
7110static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7111{
7112 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7113 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7114 int length;
7115
7116 ENTER;
Brian King47338042006-02-08 20:57:42 -06007117 ipr_scsi_bus_speed_limit(ioa_cfg);
7118 ipr_check_term_power(ioa_cfg, mode_pages);
7119 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7120 length = mode_pages->hdr.length + 1;
7121 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007122
7123 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7124 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7125 length);
7126
Wayne Boyerf72919e2010-02-19 13:24:21 -08007127 ipr_cmd->job_step = ipr_set_supported_devs;
7128 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7129 struct ipr_resource_entry, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007130 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7131
7132 LEAVE;
7133 return IPR_RC_JOB_RETURN;
7134}
7135
7136/**
7137 * ipr_build_mode_sense - Builds a mode sense command
7138 * @ipr_cmd: ipr command struct
7139 * @res: resource entry struct
7140 * @parm: Byte 2 of mode sense command
7141 * @dma_addr: DMA address of mode sense buffer
7142 * @xfer_len: Size of DMA buffer
7143 *
7144 * Return value:
7145 * none
7146 **/
7147static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7148 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08007149 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007150{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007151 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7152
7153 ioarcb->res_handle = res_handle;
7154 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7155 ioarcb->cmd_pkt.cdb[2] = parm;
7156 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7157 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7158
Wayne Boyera32c0552010-02-19 13:23:36 -08007159 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007160}
7161
7162/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007163 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7164 * @ipr_cmd: ipr command struct
7165 *
7166 * This function handles the failure of an IOA bringup command.
7167 *
7168 * Return value:
7169 * IPR_RC_JOB_RETURN
7170 **/
7171static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7172{
7173 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007174 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007175
7176 dev_err(&ioa_cfg->pdev->dev,
7177 "0x%02X failed with IOASC: 0x%08X\n",
7178 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7179
7180 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007181 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007182 return IPR_RC_JOB_RETURN;
7183}
7184
7185/**
7186 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7187 * @ipr_cmd: ipr command struct
7188 *
7189 * This function handles the failure of a Mode Sense to the IOAFP.
7190 * Some adapters do not handle all mode pages.
7191 *
7192 * Return value:
7193 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7194 **/
7195static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7196{
Wayne Boyerf72919e2010-02-19 13:24:21 -08007197 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007198 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007199
7200 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08007201 ipr_cmd->job_step = ipr_set_supported_devs;
7202 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7203 struct ipr_resource_entry, queue);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007204 return IPR_RC_JOB_CONTINUE;
7205 }
7206
7207 return ipr_reset_cmd_failed(ipr_cmd);
7208}
7209
7210/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007211 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7212 * @ipr_cmd: ipr command struct
7213 *
7214 * This function send a Page 28 mode sense to the IOA to
7215 * retrieve SCSI bus attributes.
7216 *
7217 * Return value:
7218 * IPR_RC_JOB_RETURN
7219 **/
7220static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7221{
7222 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7223
7224 ENTER;
7225 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7226 0x28, ioa_cfg->vpd_cbs_dma +
7227 offsetof(struct ipr_misc_cbs, mode_pages),
7228 sizeof(struct ipr_mode_pages));
7229
7230 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007231 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007232
7233 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7234
7235 LEAVE;
7236 return IPR_RC_JOB_RETURN;
7237}
7238
7239/**
Brian Kingac09c342007-04-26 16:00:16 -05007240 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7241 * @ipr_cmd: ipr command struct
7242 *
7243 * This function enables dual IOA RAID support if possible.
7244 *
7245 * Return value:
7246 * IPR_RC_JOB_RETURN
7247 **/
7248static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7249{
7250 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7251 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7252 struct ipr_mode_page24 *mode_page;
7253 int length;
7254
7255 ENTER;
7256 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7257 sizeof(struct ipr_mode_page24));
7258
7259 if (mode_page)
7260 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7261
7262 length = mode_pages->hdr.length + 1;
7263 mode_pages->hdr.length = 0;
7264
7265 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7266 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7267 length);
7268
7269 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7270 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7271
7272 LEAVE;
7273 return IPR_RC_JOB_RETURN;
7274}
7275
7276/**
7277 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7278 * @ipr_cmd: ipr command struct
7279 *
7280 * This function handles the failure of a Mode Sense to the IOAFP.
7281 * Some adapters do not handle all mode pages.
7282 *
7283 * Return value:
7284 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7285 **/
7286static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7287{
Wayne Boyer96d21f02010-05-10 09:13:27 -07007288 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian Kingac09c342007-04-26 16:00:16 -05007289
7290 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7291 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7292 return IPR_RC_JOB_CONTINUE;
7293 }
7294
7295 return ipr_reset_cmd_failed(ipr_cmd);
7296}
7297
7298/**
7299 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7300 * @ipr_cmd: ipr command struct
7301 *
7302 * This function send a mode sense to the IOA to retrieve
7303 * the IOA Advanced Function Control mode page.
7304 *
7305 * Return value:
7306 * IPR_RC_JOB_RETURN
7307 **/
7308static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7309{
7310 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7311
7312 ENTER;
7313 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7314 0x24, ioa_cfg->vpd_cbs_dma +
7315 offsetof(struct ipr_misc_cbs, mode_pages),
7316 sizeof(struct ipr_mode_pages));
7317
7318 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7319 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7320
7321 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7322
7323 LEAVE;
7324 return IPR_RC_JOB_RETURN;
7325}
7326
7327/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007328 * ipr_init_res_table - Initialize the resource table
7329 * @ipr_cmd: ipr command struct
7330 *
7331 * This function looks through the existing resource table, comparing
7332 * it with the config table. This function will take care of old/new
7333 * devices and schedule adding/removing them from the mid-layer
7334 * as appropriate.
7335 *
7336 * Return value:
7337 * IPR_RC_JOB_CONTINUE
7338 **/
7339static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7340{
7341 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7342 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007343 struct ipr_config_table_entry_wrapper cfgtew;
7344 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007345 LIST_HEAD(old_res);
7346
7347 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007348 if (ioa_cfg->sis64)
7349 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7350 else
7351 flag = ioa_cfg->u.cfg_table->hdr.flags;
7352
7353 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007354 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7355
7356 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7357 list_move_tail(&res->queue, &old_res);
7358
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007359 if (ioa_cfg->sis64)
Wayne Boyer438b0332010-05-10 09:13:00 -07007360 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007361 else
7362 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7363
7364 for (i = 0; i < entries; i++) {
7365 if (ioa_cfg->sis64)
7366 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7367 else
7368 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007369 found = 0;
7370
7371 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007372 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007373 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7374 found = 1;
7375 break;
7376 }
7377 }
7378
7379 if (!found) {
7380 if (list_empty(&ioa_cfg->free_res_q)) {
7381 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7382 break;
7383 }
7384
7385 found = 1;
7386 res = list_entry(ioa_cfg->free_res_q.next,
7387 struct ipr_resource_entry, queue);
7388 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007389 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007390 res->add_to_ml = 1;
Wayne Boyer56115592010-06-10 14:46:34 -07007391 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7392 res->sdev->allow_restart = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007393
7394 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007395 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007396 }
7397
7398 list_for_each_entry_safe(res, temp, &old_res, queue) {
7399 if (res->sdev) {
7400 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007401 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007402 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007403 }
7404 }
7405
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007406 list_for_each_entry_safe(res, temp, &old_res, queue) {
7407 ipr_clear_res_target(res);
7408 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7409 }
7410
Brian Kingac09c342007-04-26 16:00:16 -05007411 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7412 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7413 else
7414 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007415
7416 LEAVE;
7417 return IPR_RC_JOB_CONTINUE;
7418}
7419
7420/**
7421 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7422 * @ipr_cmd: ipr command struct
7423 *
7424 * This function sends a Query IOA Configuration command
7425 * to the adapter to retrieve the IOA configuration table.
7426 *
7427 * Return value:
7428 * IPR_RC_JOB_RETURN
7429 **/
7430static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7431{
7432 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7433 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007434 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05007435 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007436
7437 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05007438 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7439 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007440 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7441 ucode_vpd->major_release, ucode_vpd->card_type,
7442 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7443 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7444 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7445
7446 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer438b0332010-05-10 09:13:00 -07007447 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007448 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7449 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007450
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007451 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08007452 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007453
7454 ipr_cmd->job_step = ipr_init_res_table;
7455
7456 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7457
7458 LEAVE;
7459 return IPR_RC_JOB_RETURN;
7460}
7461
7462/**
7463 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7464 * @ipr_cmd: ipr command struct
7465 *
7466 * This utility function sends an inquiry to the adapter.
7467 *
7468 * Return value:
7469 * none
7470 **/
7471static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08007472 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007473{
7474 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007475
7476 ENTER;
7477 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7478 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7479
7480 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7481 ioarcb->cmd_pkt.cdb[1] = flags;
7482 ioarcb->cmd_pkt.cdb[2] = page;
7483 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7484
Wayne Boyera32c0552010-02-19 13:23:36 -08007485 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007486
7487 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7488 LEAVE;
7489}
7490
7491/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06007492 * ipr_inquiry_page_supported - Is the given inquiry page supported
7493 * @page0: inquiry page 0 buffer
7494 * @page: page code.
7495 *
7496 * This function determines if the specified inquiry page is supported.
7497 *
7498 * Return value:
7499 * 1 if page is supported / 0 if not
7500 **/
7501static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7502{
7503 int i;
7504
7505 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7506 if (page0->page[i] == page)
7507 return 1;
7508
7509 return 0;
7510}
7511
7512/**
Brian Kingac09c342007-04-26 16:00:16 -05007513 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7514 * @ipr_cmd: ipr command struct
7515 *
7516 * This function sends a Page 0xD0 inquiry to the adapter
7517 * to retrieve adapter capabilities.
7518 *
7519 * Return value:
7520 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7521 **/
7522static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7523{
7524 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7525 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7526 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7527
7528 ENTER;
7529 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7530 memset(cap, 0, sizeof(*cap));
7531
7532 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7533 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7534 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7535 sizeof(struct ipr_inquiry_cap));
7536 return IPR_RC_JOB_RETURN;
7537 }
7538
7539 LEAVE;
7540 return IPR_RC_JOB_CONTINUE;
7541}
7542
7543/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007544 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7545 * @ipr_cmd: ipr command struct
7546 *
7547 * This function sends a Page 3 inquiry to the adapter
7548 * to retrieve software VPD information.
7549 *
7550 * Return value:
7551 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7552 **/
7553static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7554{
7555 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007556
7557 ENTER;
7558
Brian Kingac09c342007-04-26 16:00:16 -05007559 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007560
7561 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7562 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7563 sizeof(struct ipr_inquiry_page3));
7564
7565 LEAVE;
7566 return IPR_RC_JOB_RETURN;
7567}
7568
7569/**
7570 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7571 * @ipr_cmd: ipr command struct
7572 *
7573 * This function sends a Page 0 inquiry to the adapter
7574 * to retrieve supported inquiry pages.
7575 *
7576 * Return value:
7577 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7578 **/
7579static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7580{
7581 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007582 char type[5];
7583
7584 ENTER;
7585
7586 /* Grab the type out of the VPD and store it away */
7587 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7588 type[4] = '\0';
7589 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7590
brking@us.ibm.com62275042005-11-01 17:01:14 -06007591 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007592
brking@us.ibm.com62275042005-11-01 17:01:14 -06007593 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7594 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7595 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007596
7597 LEAVE;
7598 return IPR_RC_JOB_RETURN;
7599}
7600
7601/**
7602 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7603 * @ipr_cmd: ipr command struct
7604 *
7605 * This function sends a standard inquiry to the adapter.
7606 *
7607 * Return value:
7608 * IPR_RC_JOB_RETURN
7609 **/
7610static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7611{
7612 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7613
7614 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007615 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007616
7617 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7618 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7619 sizeof(struct ipr_ioa_vpd));
7620
7621 LEAVE;
7622 return IPR_RC_JOB_RETURN;
7623}
7624
7625/**
Wayne Boyer214777b2010-02-19 13:24:26 -08007626 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007627 * @ipr_cmd: ipr command struct
7628 *
7629 * This function send an Identify Host Request Response Queue
7630 * command to establish the HRRQ with the adapter.
7631 *
7632 * Return value:
7633 * IPR_RC_JOB_RETURN
7634 **/
Wayne Boyer214777b2010-02-19 13:24:26 -08007635static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007636{
7637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7638 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007639 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007640
7641 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007642 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007643 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7644
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007645 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7646 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007647
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007648 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7649 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007650
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007651 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7652 if (ioa_cfg->sis64)
7653 ioarcb->cmd_pkt.cdb[1] = 0x1;
7654
7655 if (ioa_cfg->nvectors == 1)
7656 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7657 else
7658 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7659
7660 ioarcb->cmd_pkt.cdb[2] =
7661 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7662 ioarcb->cmd_pkt.cdb[3] =
7663 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7664 ioarcb->cmd_pkt.cdb[4] =
7665 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7666 ioarcb->cmd_pkt.cdb[5] =
7667 ((u64) hrrq->host_rrq_dma) & 0xff;
7668 ioarcb->cmd_pkt.cdb[7] =
7669 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7670 ioarcb->cmd_pkt.cdb[8] =
7671 (sizeof(u32) * hrrq->size) & 0xff;
7672
7673 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007674 ioarcb->cmd_pkt.cdb[9] =
7675 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007676
7677 if (ioa_cfg->sis64) {
7678 ioarcb->cmd_pkt.cdb[10] =
7679 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7680 ioarcb->cmd_pkt.cdb[11] =
7681 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7682 ioarcb->cmd_pkt.cdb[12] =
7683 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7684 ioarcb->cmd_pkt.cdb[13] =
7685 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7686 }
7687
7688 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007689 ioarcb->cmd_pkt.cdb[14] =
7690 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007691
7692 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7693 IPR_INTERNAL_TIMEOUT);
7694
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007695 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7696 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007697
7698 LEAVE;
7699 return IPR_RC_JOB_RETURN;
Wayne Boyer214777b2010-02-19 13:24:26 -08007700 }
7701
Linus Torvalds1da177e2005-04-16 15:20:36 -07007702 LEAVE;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007703 return IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007704}
7705
7706/**
7707 * ipr_reset_timer_done - Adapter reset timer function
7708 * @ipr_cmd: ipr command struct
7709 *
7710 * Description: This function is used in adapter reset processing
7711 * for timing events. If the reset_cmd pointer in the IOA
7712 * config struct is not this adapter's we are doing nested
7713 * resets and fail_all_ops will take care of freeing the
7714 * command block.
7715 *
7716 * Return value:
7717 * none
7718 **/
7719static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7720{
7721 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7722 unsigned long lock_flags = 0;
7723
7724 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7725
7726 if (ioa_cfg->reset_cmd == ipr_cmd) {
7727 list_del(&ipr_cmd->queue);
7728 ipr_cmd->done(ipr_cmd);
7729 }
7730
7731 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7732}
7733
7734/**
7735 * ipr_reset_start_timer - Start a timer for adapter reset job
7736 * @ipr_cmd: ipr command struct
7737 * @timeout: timeout value
7738 *
7739 * Description: This function is used in adapter reset processing
7740 * for timing events. If the reset_cmd pointer in the IOA
7741 * config struct is not this adapter's we are doing nested
7742 * resets and fail_all_ops will take care of freeing the
7743 * command block.
7744 *
7745 * Return value:
7746 * none
7747 **/
7748static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7749 unsigned long timeout)
7750{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007751
7752 ENTER;
7753 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007754 ipr_cmd->done = ipr_reset_ioa_job;
7755
7756 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7757 ipr_cmd->timer.expires = jiffies + timeout;
7758 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7759 add_timer(&ipr_cmd->timer);
7760}
7761
7762/**
7763 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7764 * @ioa_cfg: ioa cfg struct
7765 *
7766 * Return value:
7767 * nothing
7768 **/
7769static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7770{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007771 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007772
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007773 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007774 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007775 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7776
7777 /* Initialize Host RRQ pointers */
7778 hrrq->hrrq_start = hrrq->host_rrq;
7779 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7780 hrrq->hrrq_curr = hrrq->hrrq_start;
7781 hrrq->toggle_bit = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007782 spin_unlock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007783 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007784 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007785
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007786 ioa_cfg->identify_hrrq_index = 0;
7787 if (ioa_cfg->hrrq_num == 1)
7788 atomic_set(&ioa_cfg->hrrq_index, 0);
7789 else
7790 atomic_set(&ioa_cfg->hrrq_index, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007791
7792 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007793 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007794}
7795
7796/**
Wayne Boyer214777b2010-02-19 13:24:26 -08007797 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7798 * @ipr_cmd: ipr command struct
7799 *
7800 * Return value:
7801 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7802 **/
7803static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7804{
7805 unsigned long stage, stage_time;
7806 u32 feedback;
7807 volatile u32 int_reg;
7808 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7809 u64 maskval = 0;
7810
7811 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7812 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7813 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7814
7815 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7816
7817 /* sanity check the stage_time value */
Wayne Boyer438b0332010-05-10 09:13:00 -07007818 if (stage_time == 0)
7819 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7820 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
Wayne Boyer214777b2010-02-19 13:24:26 -08007821 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7822 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7823 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7824
7825 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7826 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7827 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7828 stage_time = ioa_cfg->transop_timeout;
7829 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7830 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
Wayne Boyer1df79ca2010-07-14 10:49:43 -07007831 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7832 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7833 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7834 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7835 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7836 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7837 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7838 return IPR_RC_JOB_CONTINUE;
7839 }
Wayne Boyer214777b2010-02-19 13:24:26 -08007840 }
7841
7842 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7843 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7844 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7845 ipr_cmd->done = ipr_reset_ioa_job;
7846 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007847
7848 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Wayne Boyer214777b2010-02-19 13:24:26 -08007849
7850 return IPR_RC_JOB_RETURN;
7851}
7852
7853/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007854 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7855 * @ipr_cmd: ipr command struct
7856 *
7857 * This function reinitializes some control blocks and
7858 * enables destructive diagnostics on the adapter.
7859 *
7860 * Return value:
7861 * IPR_RC_JOB_RETURN
7862 **/
7863static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7864{
7865 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7866 volatile u32 int_reg;
Wayne Boyer7be96902010-05-10 09:14:07 -07007867 volatile u64 maskval;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007868 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007869
7870 ENTER;
Wayne Boyer214777b2010-02-19 13:24:26 -08007871 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007872 ipr_init_ioa_mem(ioa_cfg);
7873
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007874 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7875 spin_lock(&ioa_cfg->hrrq[i]._lock);
7876 ioa_cfg->hrrq[i].allow_interrupts = 1;
7877 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7878 }
7879 wmb();
Wayne Boyer8701f182010-06-04 10:26:50 -07007880 if (ioa_cfg->sis64) {
7881 /* Set the adapter to the correct endian mode. */
7882 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7883 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7884 }
7885
Wayne Boyer7be96902010-05-10 09:14:07 -07007886 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007887
7888 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7889 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
Wayne Boyer214777b2010-02-19 13:24:26 -08007890 ioa_cfg->regs.clr_interrupt_mask_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007891 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7892 return IPR_RC_JOB_CONTINUE;
7893 }
7894
7895 /* Enable destructive diagnostics on IOA */
Wayne Boyer214777b2010-02-19 13:24:26 -08007896 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007897
Wayne Boyer7be96902010-05-10 09:14:07 -07007898 if (ioa_cfg->sis64) {
7899 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7900 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7901 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7902 } else
7903 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer214777b2010-02-19 13:24:26 -08007904
Linus Torvalds1da177e2005-04-16 15:20:36 -07007905 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7906
7907 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7908
Wayne Boyer214777b2010-02-19 13:24:26 -08007909 if (ioa_cfg->sis64) {
7910 ipr_cmd->job_step = ipr_reset_next_stage;
7911 return IPR_RC_JOB_CONTINUE;
7912 }
7913
Linus Torvalds1da177e2005-04-16 15:20:36 -07007914 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
Brian King5469cb52007-03-29 12:42:40 -05007915 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007916 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7917 ipr_cmd->done = ipr_reset_ioa_job;
7918 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007919 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007920
7921 LEAVE;
7922 return IPR_RC_JOB_RETURN;
7923}
7924
7925/**
7926 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7927 * @ipr_cmd: ipr command struct
7928 *
7929 * This function is invoked when an adapter dump has run out
7930 * of processing time.
7931 *
7932 * Return value:
7933 * IPR_RC_JOB_CONTINUE
7934 **/
7935static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7936{
7937 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7938
7939 if (ioa_cfg->sdt_state == GET_DUMP)
Brian King41e9a692011-09-21 08:51:11 -05007940 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7941 else if (ioa_cfg->sdt_state == READ_DUMP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007942 ioa_cfg->sdt_state = ABORT_DUMP;
7943
Brian King4c647e92011-10-15 09:08:56 -05007944 ioa_cfg->dump_timeout = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007945 ipr_cmd->job_step = ipr_reset_alert;
7946
7947 return IPR_RC_JOB_CONTINUE;
7948}
7949
7950/**
7951 * ipr_unit_check_no_data - Log a unit check/no data error log
7952 * @ioa_cfg: ioa config struct
7953 *
7954 * Logs an error indicating the adapter unit checked, but for some
7955 * reason, we were unable to fetch the unit check buffer.
7956 *
7957 * Return value:
7958 * nothing
7959 **/
7960static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7961{
7962 ioa_cfg->errors_logged++;
7963 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7964}
7965
7966/**
7967 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7968 * @ioa_cfg: ioa config struct
7969 *
7970 * Fetches the unit check buffer from the adapter by clocking the data
7971 * through the mailbox register.
7972 *
7973 * Return value:
7974 * nothing
7975 **/
7976static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7977{
7978 unsigned long mailbox;
7979 struct ipr_hostrcb *hostrcb;
7980 struct ipr_uc_sdt sdt;
7981 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05007982 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007983
7984 mailbox = readl(ioa_cfg->ioa_mailbox);
7985
Wayne Boyerdcbad002010-02-19 13:24:14 -08007986 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007987 ipr_unit_check_no_data(ioa_cfg);
7988 return;
7989 }
7990
7991 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7992 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7993 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7994
Wayne Boyerdcbad002010-02-19 13:24:14 -08007995 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7996 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7997 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007998 ipr_unit_check_no_data(ioa_cfg);
7999 return;
8000 }
8001
8002 /* Find length of the first sdt entry (UC buffer) */
Wayne Boyerdcbad002010-02-19 13:24:14 -08008003 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8004 length = be32_to_cpu(sdt.entry[0].end_token);
8005 else
8006 length = (be32_to_cpu(sdt.entry[0].end_token) -
8007 be32_to_cpu(sdt.entry[0].start_token)) &
8008 IPR_FMT2_MBX_ADDR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008009
8010 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8011 struct ipr_hostrcb, queue);
8012 list_del(&hostrcb->queue);
8013 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8014
8015 rc = ipr_get_ldump_data_section(ioa_cfg,
Wayne Boyerdcbad002010-02-19 13:24:14 -08008016 be32_to_cpu(sdt.entry[0].start_token),
Linus Torvalds1da177e2005-04-16 15:20:36 -07008017 (__be32 *)&hostrcb->hcam,
8018 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8019
Brian King65f56472007-04-26 16:00:12 -05008020 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008021 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08008022 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05008023 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8024 ioa_cfg->sdt_state == GET_DUMP)
8025 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8026 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07008027 ipr_unit_check_no_data(ioa_cfg);
8028
8029 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8030}
8031
8032/**
Wayne Boyer110def82010-11-04 09:36:16 -07008033 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8034 * @ipr_cmd: ipr command struct
8035 *
8036 * Description: This function will call to get the unit check buffer.
8037 *
8038 * Return value:
8039 * IPR_RC_JOB_RETURN
8040 **/
8041static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8042{
8043 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8044
8045 ENTER;
8046 ioa_cfg->ioa_unit_checked = 0;
8047 ipr_get_unit_check_buffer(ioa_cfg);
8048 ipr_cmd->job_step = ipr_reset_alert;
8049 ipr_reset_start_timer(ipr_cmd, 0);
8050
8051 LEAVE;
8052 return IPR_RC_JOB_RETURN;
8053}
8054
8055/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008056 * ipr_reset_restore_cfg_space - Restore PCI config space.
8057 * @ipr_cmd: ipr command struct
8058 *
8059 * Description: This function restores the saved PCI config space of
8060 * the adapter, fails all outstanding ops back to the callers, and
8061 * fetches the dump/unit check if applicable to this reset.
8062 *
8063 * Return value:
8064 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8065 **/
8066static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8067{
8068 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer630ad8312011-04-07 12:12:30 -07008069 u32 int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008070
8071 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02008072 ioa_cfg->pdev->state_saved = true;
Jon Mason1d3c16a2010-11-30 17:43:26 -06008073 pci_restore_state(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008074
8075 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
Wayne Boyer96d21f02010-05-10 09:13:27 -07008076 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008077 return IPR_RC_JOB_CONTINUE;
8078 }
8079
8080 ipr_fail_all_ops(ioa_cfg);
8081
Wayne Boyer8701f182010-06-04 10:26:50 -07008082 if (ioa_cfg->sis64) {
8083 /* Set the adapter to the correct endian mode. */
8084 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8085 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8086 }
8087
Linus Torvalds1da177e2005-04-16 15:20:36 -07008088 if (ioa_cfg->ioa_unit_checked) {
Wayne Boyer110def82010-11-04 09:36:16 -07008089 if (ioa_cfg->sis64) {
8090 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8091 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8092 return IPR_RC_JOB_RETURN;
8093 } else {
8094 ioa_cfg->ioa_unit_checked = 0;
8095 ipr_get_unit_check_buffer(ioa_cfg);
8096 ipr_cmd->job_step = ipr_reset_alert;
8097 ipr_reset_start_timer(ipr_cmd, 0);
8098 return IPR_RC_JOB_RETURN;
8099 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008100 }
8101
8102 if (ioa_cfg->in_ioa_bringdown) {
8103 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8104 } else {
8105 ipr_cmd->job_step = ipr_reset_enable_ioa;
8106
8107 if (GET_DUMP == ioa_cfg->sdt_state) {
Brian King41e9a692011-09-21 08:51:11 -05008108 ioa_cfg->sdt_state = READ_DUMP;
Brian King4c647e92011-10-15 09:08:56 -05008109 ioa_cfg->dump_timeout = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03008110 if (ioa_cfg->sis64)
8111 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8112 else
8113 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008114 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8115 schedule_work(&ioa_cfg->work_q);
8116 return IPR_RC_JOB_RETURN;
8117 }
8118 }
8119
Wayne Boyer438b0332010-05-10 09:13:00 -07008120 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008121 return IPR_RC_JOB_CONTINUE;
8122}
8123
8124/**
Brian Kinge619e1a2007-01-23 11:25:37 -06008125 * ipr_reset_bist_done - BIST has completed on the adapter.
8126 * @ipr_cmd: ipr command struct
8127 *
8128 * Description: Unblock config space and resume the reset process.
8129 *
8130 * Return value:
8131 * IPR_RC_JOB_CONTINUE
8132 **/
8133static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8134{
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008135 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8136
Brian Kinge619e1a2007-01-23 11:25:37 -06008137 ENTER;
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008138 if (ioa_cfg->cfg_locked)
8139 pci_cfg_access_unlock(ioa_cfg->pdev);
8140 ioa_cfg->cfg_locked = 0;
Brian Kinge619e1a2007-01-23 11:25:37 -06008141 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8142 LEAVE;
8143 return IPR_RC_JOB_CONTINUE;
8144}
8145
8146/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008147 * ipr_reset_start_bist - Run BIST on the adapter.
8148 * @ipr_cmd: ipr command struct
8149 *
8150 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8151 *
8152 * Return value:
8153 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8154 **/
8155static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8156{
8157 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008158 int rc = PCIBIOS_SUCCESSFUL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008159
8160 ENTER;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008161 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8162 writel(IPR_UPROCI_SIS64_START_BIST,
8163 ioa_cfg->regs.set_uproc_interrupt_reg32);
8164 else
8165 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8166
8167 if (rc == PCIBIOS_SUCCESSFUL) {
Brian Kinge619e1a2007-01-23 11:25:37 -06008168 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008169 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8170 rc = IPR_RC_JOB_RETURN;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008171 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008172 if (ioa_cfg->cfg_locked)
8173 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8174 ioa_cfg->cfg_locked = 0;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008175 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8176 rc = IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008177 }
8178
8179 LEAVE;
8180 return rc;
8181}
8182
8183/**
Brian King463fc692007-05-07 17:09:05 -05008184 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8185 * @ipr_cmd: ipr command struct
8186 *
8187 * Description: This clears PCI reset to the adapter and delays two seconds.
8188 *
8189 * Return value:
8190 * IPR_RC_JOB_RETURN
8191 **/
8192static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8193{
8194 ENTER;
8195 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8196 ipr_cmd->job_step = ipr_reset_bist_done;
8197 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8198 LEAVE;
8199 return IPR_RC_JOB_RETURN;
8200}
8201
8202/**
8203 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8204 * @ipr_cmd: ipr command struct
8205 *
8206 * Description: This asserts PCI reset to the adapter.
8207 *
8208 * Return value:
8209 * IPR_RC_JOB_RETURN
8210 **/
8211static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8212{
8213 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8214 struct pci_dev *pdev = ioa_cfg->pdev;
8215
8216 ENTER;
Brian King463fc692007-05-07 17:09:05 -05008217 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8218 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8219 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8220 LEAVE;
8221 return IPR_RC_JOB_RETURN;
8222}
8223
8224/**
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008225 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8226 * @ipr_cmd: ipr command struct
8227 *
8228 * Description: This attempts to block config access to the IOA.
8229 *
8230 * Return value:
8231 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8232 **/
8233static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8234{
8235 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8236 int rc = IPR_RC_JOB_CONTINUE;
8237
8238 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8239 ioa_cfg->cfg_locked = 1;
8240 ipr_cmd->job_step = ioa_cfg->reset;
8241 } else {
8242 if (ipr_cmd->u.time_left) {
8243 rc = IPR_RC_JOB_RETURN;
8244 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8245 ipr_reset_start_timer(ipr_cmd,
8246 IPR_CHECK_FOR_RESET_TIMEOUT);
8247 } else {
8248 ipr_cmd->job_step = ioa_cfg->reset;
8249 dev_err(&ioa_cfg->pdev->dev,
8250 "Timed out waiting to lock config access. Resetting anyway.\n");
8251 }
8252 }
8253
8254 return rc;
8255}
8256
8257/**
8258 * ipr_reset_block_config_access - Block config access to the IOA
8259 * @ipr_cmd: ipr command struct
8260 *
8261 * Description: This attempts to block config access to the IOA
8262 *
8263 * Return value:
8264 * IPR_RC_JOB_CONTINUE
8265 **/
8266static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8267{
8268 ipr_cmd->ioa_cfg->cfg_locked = 0;
8269 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8270 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8271 return IPR_RC_JOB_CONTINUE;
8272}
8273
8274/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008275 * ipr_reset_allowed - Query whether or not IOA can be reset
8276 * @ioa_cfg: ioa config struct
8277 *
8278 * Return value:
8279 * 0 if reset not allowed / non-zero if reset is allowed
8280 **/
8281static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8282{
8283 volatile u32 temp_reg;
8284
8285 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8286 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8287}
8288
8289/**
8290 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8291 * @ipr_cmd: ipr command struct
8292 *
8293 * Description: This function waits for adapter permission to run BIST,
8294 * then runs BIST. If the adapter does not give permission after a
8295 * reasonable time, we will reset the adapter anyway. The impact of
8296 * resetting the adapter without warning the adapter is the risk of
8297 * losing the persistent error log on the adapter. If the adapter is
8298 * reset while it is writing to the flash on the adapter, the flash
8299 * segment will have bad ECC and be zeroed.
8300 *
8301 * Return value:
8302 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8303 **/
8304static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8305{
8306 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8307 int rc = IPR_RC_JOB_RETURN;
8308
8309 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8310 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8311 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8312 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008313 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008314 rc = IPR_RC_JOB_CONTINUE;
8315 }
8316
8317 return rc;
8318}
8319
8320/**
Wayne Boyer8701f182010-06-04 10:26:50 -07008321 * ipr_reset_alert - Alert the adapter of a pending reset
Linus Torvalds1da177e2005-04-16 15:20:36 -07008322 * @ipr_cmd: ipr command struct
8323 *
8324 * Description: This function alerts the adapter that it will be reset.
8325 * If memory space is not currently enabled, proceed directly
8326 * to running BIST on the adapter. The timer must always be started
8327 * so we guarantee we do not run BIST from ipr_isr.
8328 *
8329 * Return value:
8330 * IPR_RC_JOB_RETURN
8331 **/
8332static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8333{
8334 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8335 u16 cmd_reg;
8336 int rc;
8337
8338 ENTER;
8339 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8340
8341 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8342 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
Wayne Boyer214777b2010-02-19 13:24:26 -08008343 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008344 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8345 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008346 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008347 }
8348
8349 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8350 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8351
8352 LEAVE;
8353 return IPR_RC_JOB_RETURN;
8354}
8355
8356/**
8357 * ipr_reset_ucode_download_done - Microcode download completion
8358 * @ipr_cmd: ipr command struct
8359 *
8360 * Description: This function unmaps the microcode download buffer.
8361 *
8362 * Return value:
8363 * IPR_RC_JOB_CONTINUE
8364 **/
8365static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8366{
8367 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8368 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8369
8370 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8371 sglist->num_sg, DMA_TO_DEVICE);
8372
8373 ipr_cmd->job_step = ipr_reset_alert;
8374 return IPR_RC_JOB_CONTINUE;
8375}
8376
8377/**
8378 * ipr_reset_ucode_download - Download microcode to the adapter
8379 * @ipr_cmd: ipr command struct
8380 *
8381 * Description: This function checks to see if it there is microcode
8382 * to download to the adapter. If there is, a download is performed.
8383 *
8384 * Return value:
8385 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8386 **/
8387static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8388{
8389 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8390 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8391
8392 ENTER;
8393 ipr_cmd->job_step = ipr_reset_alert;
8394
8395 if (!sglist)
8396 return IPR_RC_JOB_CONTINUE;
8397
8398 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8399 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8400 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8401 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8402 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8403 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8404 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8405
Wayne Boyera32c0552010-02-19 13:23:36 -08008406 if (ioa_cfg->sis64)
8407 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8408 else
8409 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008410 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8411
8412 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8413 IPR_WRITE_BUFFER_TIMEOUT);
8414
8415 LEAVE;
8416 return IPR_RC_JOB_RETURN;
8417}
8418
8419/**
8420 * ipr_reset_shutdown_ioa - Shutdown the adapter
8421 * @ipr_cmd: ipr command struct
8422 *
8423 * Description: This function issues an adapter shutdown of the
8424 * specified type to the specified adapter as part of the
8425 * adapter reset job.
8426 *
8427 * Return value:
8428 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8429 **/
8430static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8431{
8432 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8433 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8434 unsigned long timeout;
8435 int rc = IPR_RC_JOB_CONTINUE;
8436
8437 ENTER;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008438 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8439 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008440 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8441 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8442 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8443 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8444
Brian Kingac09c342007-04-26 16:00:16 -05008445 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8446 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008447 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8448 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05008449 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8450 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008451 else
Brian Kingac09c342007-04-26 16:00:16 -05008452 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008453
8454 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8455
8456 rc = IPR_RC_JOB_RETURN;
8457 ipr_cmd->job_step = ipr_reset_ucode_download;
8458 } else
8459 ipr_cmd->job_step = ipr_reset_alert;
8460
8461 LEAVE;
8462 return rc;
8463}
8464
8465/**
8466 * ipr_reset_ioa_job - Adapter reset job
8467 * @ipr_cmd: ipr command struct
8468 *
8469 * Description: This function is the job router for the adapter reset job.
8470 *
8471 * Return value:
8472 * none
8473 **/
8474static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8475{
8476 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008477 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8478
8479 do {
Wayne Boyer96d21f02010-05-10 09:13:27 -07008480 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008481
8482 if (ioa_cfg->reset_cmd != ipr_cmd) {
8483 /*
8484 * We are doing nested adapter resets and this is
8485 * not the current reset job.
8486 */
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008487 list_add_tail(&ipr_cmd->queue,
8488 &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008489 return;
8490 }
8491
8492 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06008493 rc = ipr_cmd->job_step_failed(ipr_cmd);
8494 if (rc == IPR_RC_JOB_RETURN)
8495 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008496 }
8497
8498 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06008499 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008500 rc = ipr_cmd->job_step(ipr_cmd);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03008501 } while (rc == IPR_RC_JOB_CONTINUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008502}
8503
8504/**
8505 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8506 * @ioa_cfg: ioa config struct
8507 * @job_step: first job step of reset job
8508 * @shutdown_type: shutdown type
8509 *
8510 * Description: This function will initiate the reset of the given adapter
8511 * starting at the selected job step.
8512 * If the caller needs to wait on the completion of the reset,
8513 * the caller must sleep on the reset_wait_q.
8514 *
8515 * Return value:
8516 * none
8517 **/
8518static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8519 int (*job_step) (struct ipr_cmnd *),
8520 enum ipr_shutdown_type shutdown_type)
8521{
8522 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008523 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008524
8525 ioa_cfg->in_reset_reload = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008526 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8527 spin_lock(&ioa_cfg->hrrq[i]._lock);
8528 ioa_cfg->hrrq[i].allow_cmds = 0;
8529 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8530 }
8531 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008532 scsi_block_requests(ioa_cfg->host);
8533
8534 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8535 ioa_cfg->reset_cmd = ipr_cmd;
8536 ipr_cmd->job_step = job_step;
8537 ipr_cmd->u.shutdown_type = shutdown_type;
8538
8539 ipr_reset_ioa_job(ipr_cmd);
8540}
8541
8542/**
8543 * ipr_initiate_ioa_reset - Initiate an adapter reset
8544 * @ioa_cfg: ioa config struct
8545 * @shutdown_type: shutdown type
8546 *
8547 * Description: This function will initiate the reset of the given adapter.
8548 * If the caller needs to wait on the completion of the reset,
8549 * the caller must sleep on the reset_wait_q.
8550 *
8551 * Return value:
8552 * none
8553 **/
8554static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8555 enum ipr_shutdown_type shutdown_type)
8556{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008557 int i;
8558
8559 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008560 return;
8561
Brian King41e9a692011-09-21 08:51:11 -05008562 if (ioa_cfg->in_reset_reload) {
8563 if (ioa_cfg->sdt_state == GET_DUMP)
8564 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8565 else if (ioa_cfg->sdt_state == READ_DUMP)
8566 ioa_cfg->sdt_state = ABORT_DUMP;
8567 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008568
8569 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8570 dev_err(&ioa_cfg->pdev->dev,
8571 "IOA taken offline - error recovery failed\n");
8572
8573 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008574 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8575 spin_lock(&ioa_cfg->hrrq[i]._lock);
8576 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8577 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8578 }
8579 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008580
8581 if (ioa_cfg->in_ioa_bringdown) {
8582 ioa_cfg->reset_cmd = NULL;
8583 ioa_cfg->in_reset_reload = 0;
8584 ipr_fail_all_ops(ioa_cfg);
8585 wake_up_all(&ioa_cfg->reset_wait_q);
8586
8587 spin_unlock_irq(ioa_cfg->host->host_lock);
8588 scsi_unblock_requests(ioa_cfg->host);
8589 spin_lock_irq(ioa_cfg->host->host_lock);
8590 return;
8591 } else {
8592 ioa_cfg->in_ioa_bringdown = 1;
8593 shutdown_type = IPR_SHUTDOWN_NONE;
8594 }
8595 }
8596
8597 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8598 shutdown_type);
8599}
8600
8601/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008602 * ipr_reset_freeze - Hold off all I/O activity
8603 * @ipr_cmd: ipr command struct
8604 *
8605 * Description: If the PCI slot is frozen, hold off all I/O
8606 * activity; then, as soon as the slot is available again,
8607 * initiate an adapter reset.
8608 */
8609static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8610{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008611 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8612 int i;
8613
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008614 /* Disallow new interrupts, avoid loop */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008615 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8616 spin_lock(&ioa_cfg->hrrq[i]._lock);
8617 ioa_cfg->hrrq[i].allow_interrupts = 0;
8618 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8619 }
8620 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008621 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008622 ipr_cmd->done = ipr_reset_ioa_job;
8623 return IPR_RC_JOB_RETURN;
8624}
8625
8626/**
8627 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8628 * @pdev: PCI device struct
8629 *
8630 * Description: This routine is called to tell us that the PCI bus
8631 * is down. Can't do anything here, except put the device driver
8632 * into a holding pattern, waiting for the PCI bus to come back.
8633 */
8634static void ipr_pci_frozen(struct pci_dev *pdev)
8635{
8636 unsigned long flags = 0;
8637 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8638
8639 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8640 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8641 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8642}
8643
8644/**
8645 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8646 * @pdev: PCI device struct
8647 *
8648 * Description: This routine is called by the pci error recovery
8649 * code after the PCI slot has been reset, just before we
8650 * should resume normal operations.
8651 */
8652static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8653{
8654 unsigned long flags = 0;
8655 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8656
8657 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King463fc692007-05-07 17:09:05 -05008658 if (ioa_cfg->needs_warm_reset)
8659 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8660 else
8661 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8662 IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008663 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8664 return PCI_ERS_RESULT_RECOVERED;
8665}
8666
8667/**
8668 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8669 * @pdev: PCI device struct
8670 *
8671 * Description: This routine is called when the PCI bus has
8672 * permanently failed.
8673 */
8674static void ipr_pci_perm_failure(struct pci_dev *pdev)
8675{
8676 unsigned long flags = 0;
8677 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008678 int i;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008679
8680 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8681 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8682 ioa_cfg->sdt_state = ABORT_DUMP;
8683 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8684 ioa_cfg->in_ioa_bringdown = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008685 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8686 spin_lock(&ioa_cfg->hrrq[i]._lock);
8687 ioa_cfg->hrrq[i].allow_cmds = 0;
8688 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8689 }
8690 wmb();
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008691 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8692 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8693}
8694
8695/**
8696 * ipr_pci_error_detected - Called when a PCI error is detected.
8697 * @pdev: PCI device struct
8698 * @state: PCI channel state
8699 *
8700 * Description: Called when a PCI error is detected.
8701 *
8702 * Return value:
8703 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8704 */
8705static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8706 pci_channel_state_t state)
8707{
8708 switch (state) {
8709 case pci_channel_io_frozen:
8710 ipr_pci_frozen(pdev);
8711 return PCI_ERS_RESULT_NEED_RESET;
8712 case pci_channel_io_perm_failure:
8713 ipr_pci_perm_failure(pdev);
8714 return PCI_ERS_RESULT_DISCONNECT;
8715 break;
8716 default:
8717 break;
8718 }
8719 return PCI_ERS_RESULT_NEED_RESET;
8720}
8721
8722/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008723 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8724 * @ioa_cfg: ioa cfg struct
8725 *
8726 * Description: This is the second phase of adapter intialization
8727 * This function takes care of initilizing the adapter to the point
8728 * where it can accept new commands.
8729
8730 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02008731 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07008732 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008733static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008734{
8735 int rc = 0;
8736 unsigned long host_lock_flags = 0;
8737
8738 ENTER;
8739 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8740 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008741 if (ioa_cfg->needs_hard_reset) {
8742 ioa_cfg->needs_hard_reset = 0;
8743 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8744 } else
8745 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8746 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008747 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8748 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8749 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8750
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008751 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008752 rc = -EIO;
8753 } else if (ipr_invalid_adapter(ioa_cfg)) {
8754 if (!ipr_testmode)
8755 rc = -EIO;
8756
8757 dev_err(&ioa_cfg->pdev->dev,
8758 "Adapter not supported in this hardware configuration.\n");
8759 }
8760
8761 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8762
8763 LEAVE;
8764 return rc;
8765}
8766
8767/**
8768 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8769 * @ioa_cfg: ioa config struct
8770 *
8771 * Return value:
8772 * none
8773 **/
8774static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8775{
8776 int i;
8777
8778 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8779 if (ioa_cfg->ipr_cmnd_list[i])
8780 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8781 ioa_cfg->ipr_cmnd_list[i],
8782 ioa_cfg->ipr_cmnd_list_dma[i]);
8783
8784 ioa_cfg->ipr_cmnd_list[i] = NULL;
8785 }
8786
8787 if (ioa_cfg->ipr_cmd_pool)
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03008788 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008789
Brian King89aad422012-03-14 21:20:10 -05008790 kfree(ioa_cfg->ipr_cmnd_list);
8791 kfree(ioa_cfg->ipr_cmnd_list_dma);
8792 ioa_cfg->ipr_cmnd_list = NULL;
8793 ioa_cfg->ipr_cmnd_list_dma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008794 ioa_cfg->ipr_cmd_pool = NULL;
8795}
8796
8797/**
8798 * ipr_free_mem - Frees memory allocated for an adapter
8799 * @ioa_cfg: ioa cfg struct
8800 *
8801 * Return value:
8802 * nothing
8803 **/
8804static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8805{
8806 int i;
8807
8808 kfree(ioa_cfg->res_entries);
8809 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8810 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8811 ipr_free_cmd_blks(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008812
8813 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8814 pci_free_consistent(ioa_cfg->pdev,
8815 sizeof(u32) * ioa_cfg->hrrq[i].size,
8816 ioa_cfg->hrrq[i].host_rrq,
8817 ioa_cfg->hrrq[i].host_rrq_dma);
8818
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008819 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8820 ioa_cfg->u.cfg_table,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008821 ioa_cfg->cfg_table_dma);
8822
8823 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8824 pci_free_consistent(ioa_cfg->pdev,
8825 sizeof(struct ipr_hostrcb),
8826 ioa_cfg->hostrcb[i],
8827 ioa_cfg->hostrcb_dma[i]);
8828 }
8829
8830 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008831 kfree(ioa_cfg->trace);
8832}
8833
8834/**
8835 * ipr_free_all_resources - Free all allocated resources for an adapter.
8836 * @ipr_cmd: ipr command struct
8837 *
8838 * This function frees all allocated resources for the
8839 * specified adapter.
8840 *
8841 * Return value:
8842 * none
8843 **/
8844static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8845{
8846 struct pci_dev *pdev = ioa_cfg->pdev;
8847
8848 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008849 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8850 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8851 int i;
8852 for (i = 0; i < ioa_cfg->nvectors; i++)
8853 free_irq(ioa_cfg->vectors_info[i].vec,
8854 &ioa_cfg->hrrq[i]);
8855 } else
8856 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8857
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008858 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008859 pci_disable_msi(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008860 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8861 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008862 pci_disable_msix(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008863 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8864 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008865
Linus Torvalds1da177e2005-04-16 15:20:36 -07008866 iounmap(ioa_cfg->hdw_dma_regs);
8867 pci_release_regions(pdev);
8868 ipr_free_mem(ioa_cfg);
8869 scsi_host_put(ioa_cfg->host);
8870 pci_disable_device(pdev);
8871 LEAVE;
8872}
8873
8874/**
8875 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8876 * @ioa_cfg: ioa config struct
8877 *
8878 * Return value:
8879 * 0 on success / -ENOMEM on allocation failure
8880 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008881static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008882{
8883 struct ipr_cmnd *ipr_cmd;
8884 struct ipr_ioarcb *ioarcb;
8885 dma_addr_t dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008886 int i, entries_each_hrrq, hrrq_id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008887
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03008888 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8889 sizeof(struct ipr_cmnd), 512, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008890
8891 if (!ioa_cfg->ipr_cmd_pool)
8892 return -ENOMEM;
8893
Brian King89aad422012-03-14 21:20:10 -05008894 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8895 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8896
8897 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8898 ipr_free_cmd_blks(ioa_cfg);
8899 return -ENOMEM;
8900 }
8901
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008902 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8903 if (ioa_cfg->hrrq_num > 1) {
8904 if (i == 0) {
8905 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8906 ioa_cfg->hrrq[i].min_cmd_id = 0;
8907 ioa_cfg->hrrq[i].max_cmd_id =
8908 (entries_each_hrrq - 1);
8909 } else {
8910 entries_each_hrrq =
8911 IPR_NUM_BASE_CMD_BLKS/
8912 (ioa_cfg->hrrq_num - 1);
8913 ioa_cfg->hrrq[i].min_cmd_id =
8914 IPR_NUM_INTERNAL_CMD_BLKS +
8915 (i - 1) * entries_each_hrrq;
8916 ioa_cfg->hrrq[i].max_cmd_id =
8917 (IPR_NUM_INTERNAL_CMD_BLKS +
8918 i * entries_each_hrrq - 1);
8919 }
8920 } else {
8921 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8922 ioa_cfg->hrrq[i].min_cmd_id = 0;
8923 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8924 }
8925 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8926 }
8927
8928 BUG_ON(ioa_cfg->hrrq_num == 0);
8929
8930 i = IPR_NUM_CMD_BLKS -
8931 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8932 if (i > 0) {
8933 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8934 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8935 }
8936
Linus Torvalds1da177e2005-04-16 15:20:36 -07008937 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03008938 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008939
8940 if (!ipr_cmd) {
8941 ipr_free_cmd_blks(ioa_cfg);
8942 return -ENOMEM;
8943 }
8944
8945 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8946 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8947 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8948
8949 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08008950 ipr_cmd->dma_addr = dma_addr;
8951 if (ioa_cfg->sis64)
8952 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8953 else
8954 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8955
Linus Torvalds1da177e2005-04-16 15:20:36 -07008956 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08008957 if (ioa_cfg->sis64) {
8958 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8959 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8960 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07008961 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
Wayne Boyera32c0552010-02-19 13:23:36 -08008962 } else {
8963 ioarcb->write_ioadl_addr =
8964 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8965 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8966 ioarcb->ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07008967 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
Wayne Boyera32c0552010-02-19 13:23:36 -08008968 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008969 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8970 ipr_cmd->cmd_index = i;
8971 ipr_cmd->ioa_cfg = ioa_cfg;
8972 ipr_cmd->sense_buffer_dma = dma_addr +
8973 offsetof(struct ipr_cmnd, sense_buffer);
8974
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008975 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
8976 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8977 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8978 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8979 hrrq_id++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008980 }
8981
8982 return 0;
8983}
8984
8985/**
8986 * ipr_alloc_mem - Allocate memory for an adapter
8987 * @ioa_cfg: ioa config struct
8988 *
8989 * Return value:
8990 * 0 on success / non-zero for error
8991 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008992static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008993{
8994 struct pci_dev *pdev = ioa_cfg->pdev;
8995 int i, rc = -ENOMEM;
8996
8997 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06008998 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008999 ioa_cfg->max_devs_supported, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009000
9001 if (!ioa_cfg->res_entries)
9002 goto out;
9003
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009004 if (ioa_cfg->sis64) {
9005 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
9006 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
9007 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
9008 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
9009 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
9010 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
Brian Kinga2e49cb2013-01-11 17:43:48 -06009011
9012 if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
9013 || !ioa_cfg->vset_ids)
9014 goto out_free_res_entries;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009015 }
9016
9017 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009018 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009019 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9020 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009021
9022 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9023 sizeof(struct ipr_misc_cbs),
9024 &ioa_cfg->vpd_cbs_dma);
9025
9026 if (!ioa_cfg->vpd_cbs)
9027 goto out_free_res_entries;
9028
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009029 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9030 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9031 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009032 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9033 if (i == 0)
9034 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9035 else
9036 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009037 }
9038
Linus Torvalds1da177e2005-04-16 15:20:36 -07009039 if (ipr_alloc_cmd_blks(ioa_cfg))
9040 goto out_free_vpd_cbs;
9041
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009042 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9043 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9044 sizeof(u32) * ioa_cfg->hrrq[i].size,
9045 &ioa_cfg->hrrq[i].host_rrq_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009046
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009047 if (!ioa_cfg->hrrq[i].host_rrq) {
9048 while (--i > 0)
9049 pci_free_consistent(pdev,
9050 sizeof(u32) * ioa_cfg->hrrq[i].size,
9051 ioa_cfg->hrrq[i].host_rrq,
9052 ioa_cfg->hrrq[i].host_rrq_dma);
9053 goto out_ipr_free_cmd_blocks;
9054 }
9055 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9056 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009057
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009058 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9059 ioa_cfg->cfg_table_size,
9060 &ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009061
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009062 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009063 goto out_free_host_rrq;
9064
9065 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9066 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9067 sizeof(struct ipr_hostrcb),
9068 &ioa_cfg->hostrcb_dma[i]);
9069
9070 if (!ioa_cfg->hostrcb[i])
9071 goto out_free_hostrcb_dma;
9072
9073 ioa_cfg->hostrcb[i]->hostrcb_dma =
9074 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06009075 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009076 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9077 }
9078
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06009079 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009080 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9081
9082 if (!ioa_cfg->trace)
9083 goto out_free_hostrcb_dma;
9084
Linus Torvalds1da177e2005-04-16 15:20:36 -07009085 rc = 0;
9086out:
9087 LEAVE;
9088 return rc;
9089
9090out_free_hostrcb_dma:
9091 while (i-- > 0) {
9092 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9093 ioa_cfg->hostrcb[i],
9094 ioa_cfg->hostrcb_dma[i]);
9095 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009096 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9097 ioa_cfg->u.cfg_table,
9098 ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009099out_free_host_rrq:
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009100 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9101 pci_free_consistent(pdev,
9102 sizeof(u32) * ioa_cfg->hrrq[i].size,
9103 ioa_cfg->hrrq[i].host_rrq,
9104 ioa_cfg->hrrq[i].host_rrq_dma);
9105 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009106out_ipr_free_cmd_blocks:
9107 ipr_free_cmd_blks(ioa_cfg);
9108out_free_vpd_cbs:
9109 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9110 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9111out_free_res_entries:
9112 kfree(ioa_cfg->res_entries);
Brian Kinga2e49cb2013-01-11 17:43:48 -06009113 kfree(ioa_cfg->target_ids);
9114 kfree(ioa_cfg->array_ids);
9115 kfree(ioa_cfg->vset_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009116 goto out;
9117}
9118
9119/**
9120 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9121 * @ioa_cfg: ioa config struct
9122 *
9123 * Return value:
9124 * none
9125 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009126static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009127{
9128 int i;
9129
9130 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9131 ioa_cfg->bus_attr[i].bus = i;
9132 ioa_cfg->bus_attr[i].qas_enabled = 0;
9133 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9134 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9135 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9136 else
9137 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9138 }
9139}
9140
9141/**
9142 * ipr_init_ioa_cfg - Initialize IOA config struct
9143 * @ioa_cfg: ioa config struct
9144 * @host: scsi host struct
9145 * @pdev: PCI dev struct
9146 *
9147 * Return value:
9148 * none
9149 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009150static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9151 struct Scsi_Host *host, struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009152{
9153 const struct ipr_interrupt_offsets *p;
9154 struct ipr_interrupts *t;
9155 void __iomem *base;
9156
9157 ioa_cfg->host = host;
9158 ioa_cfg->pdev = pdev;
9159 ioa_cfg->log_level = ipr_log_level;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06009160 ioa_cfg->doorbell = IPR_DOORBELL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009161 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9162 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009163 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9164 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9165 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9166 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9167
Linus Torvalds1da177e2005-04-16 15:20:36 -07009168 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9169 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9170 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9171 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
David Howellsc4028952006-11-22 14:57:56 +00009172 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009173 init_waitqueue_head(&ioa_cfg->reset_wait_q);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009174 init_waitqueue_head(&ioa_cfg->msi_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009175 ioa_cfg->sdt_state = INACTIVE;
9176
9177 ipr_initialize_bus_attr(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009178 ioa_cfg->max_devs_supported = ipr_max_devs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009179
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009180 if (ioa_cfg->sis64) {
9181 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9182 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9183 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9184 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9185 } else {
9186 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9187 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9188 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9189 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009191 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9192 host->unique_id = host->host_no;
9193 host->max_cmd_len = IPR_MAX_CDB_LEN;
Brian King89aad422012-03-14 21:20:10 -05009194 host->can_queue = ioa_cfg->max_cmds;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009195 pci_set_drvdata(pdev, ioa_cfg);
9196
9197 p = &ioa_cfg->chip_cfg->regs;
9198 t = &ioa_cfg->regs;
9199 base = ioa_cfg->hdw_dma_regs;
9200
9201 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9202 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009203 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009204 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009205 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009206 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009207 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009208 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009209 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009210 t->ioarrin_reg = base + p->ioarrin_reg;
9211 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009212 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009213 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009214 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009215 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009216 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009217
9218 if (ioa_cfg->sis64) {
Wayne Boyer214777b2010-02-19 13:24:26 -08009219 t->init_feedback_reg = base + p->init_feedback_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009220 t->dump_addr_reg = base + p->dump_addr_reg;
9221 t->dump_data_reg = base + p->dump_data_reg;
Wayne Boyer8701f182010-06-04 10:26:50 -07009222 t->endian_swap_reg = base + p->endian_swap_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009224}
9225
9226/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009227 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07009228 * @dev_id: PCI device id struct
9229 *
9230 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009231 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009232 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009233static const struct ipr_chip_t *
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009234ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009235{
9236 int i;
9237
Linus Torvalds1da177e2005-04-16 15:20:36 -07009238 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9239 if (ipr_chip[i].vendor == dev_id->vendor &&
9240 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009241 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07009242 return NULL;
9243}
9244
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009245static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9246{
9247 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9248 int i, err, vectors;
9249
9250 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9251 entries[i].entry = i;
9252
9253 vectors = ipr_number_of_msix;
9254
9255 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9256 vectors = err;
9257
9258 if (err < 0) {
9259 pci_disable_msix(ioa_cfg->pdev);
9260 return err;
9261 }
9262
9263 if (!err) {
9264 for (i = 0; i < vectors; i++)
9265 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9266 ioa_cfg->nvectors = vectors;
9267 }
9268
9269 return err;
9270}
9271
9272static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9273{
9274 int i, err, vectors;
9275
9276 vectors = ipr_number_of_msix;
9277
9278 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9279 vectors = err;
9280
9281 if (err < 0) {
9282 pci_disable_msi(ioa_cfg->pdev);
9283 return err;
9284 }
9285
9286 if (!err) {
9287 for (i = 0; i < vectors; i++)
9288 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9289 ioa_cfg->nvectors = vectors;
9290 }
9291
9292 return err;
9293}
9294
9295static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9296{
9297 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9298
9299 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9300 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9301 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9302 ioa_cfg->vectors_info[vec_idx].
9303 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9304 }
9305}
9306
9307static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9308{
9309 int i, rc;
9310
9311 for (i = 1; i < ioa_cfg->nvectors; i++) {
9312 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9313 ipr_isr_mhrrq,
9314 0,
9315 ioa_cfg->vectors_info[i].desc,
9316 &ioa_cfg->hrrq[i]);
9317 if (rc) {
9318 while (--i >= 0)
9319 free_irq(ioa_cfg->vectors_info[i].vec,
9320 &ioa_cfg->hrrq[i]);
9321 return rc;
9322 }
9323 }
9324 return 0;
9325}
9326
Linus Torvalds1da177e2005-04-16 15:20:36 -07009327/**
Wayne Boyer95fecd92009-06-16 15:13:28 -07009328 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9329 * @pdev: PCI device struct
9330 *
9331 * Description: Simply set the msi_received flag to 1 indicating that
9332 * Message Signaled Interrupts are supported.
9333 *
9334 * Return value:
9335 * 0 on success / non-zero on failure
9336 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009337static irqreturn_t ipr_test_intr(int irq, void *devp)
Wayne Boyer95fecd92009-06-16 15:13:28 -07009338{
9339 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9340 unsigned long lock_flags = 0;
9341 irqreturn_t rc = IRQ_HANDLED;
9342
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009343 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009344 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9345
9346 ioa_cfg->msi_received = 1;
9347 wake_up(&ioa_cfg->msi_wait_q);
9348
9349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9350 return rc;
9351}
9352
9353/**
9354 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9355 * @pdev: PCI device struct
9356 *
9357 * Description: The return value from pci_enable_msi() can not always be
9358 * trusted. This routine sets up and initiates a test interrupt to determine
9359 * if the interrupt is received via the ipr_test_intr() service routine.
9360 * If the tests fails, the driver will fall back to LSI.
9361 *
9362 * Return value:
9363 * 0 on success / non-zero on failure
9364 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009365static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
Wayne Boyer95fecd92009-06-16 15:13:28 -07009366{
9367 int rc;
9368 volatile u32 int_reg;
9369 unsigned long lock_flags = 0;
9370
9371 ENTER;
9372
9373 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9374 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9375 ioa_cfg->msi_received = 0;
9376 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer214777b2010-02-19 13:24:26 -08009377 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009378 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9379 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9380
9381 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9382 if (rc) {
9383 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9384 return rc;
9385 } else if (ipr_debug)
9386 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9387
Wayne Boyer214777b2010-02-19 13:24:26 -08009388 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009389 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9390 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009391 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009392 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9393
Wayne Boyer95fecd92009-06-16 15:13:28 -07009394 if (!ioa_cfg->msi_received) {
9395 /* MSI test failed */
9396 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9397 rc = -EOPNOTSUPP;
9398 } else if (ipr_debug)
9399 dev_info(&pdev->dev, "MSI test succeeded.\n");
9400
9401 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9402
9403 free_irq(pdev->irq, ioa_cfg);
9404
9405 LEAVE;
9406
9407 return rc;
9408}
9409
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009410 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -07009411 * @pdev: PCI device struct
9412 * @dev_id: PCI device id struct
9413 *
9414 * Return value:
9415 * 0 on success / non-zero on failure
9416 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009417static int ipr_probe_ioa(struct pci_dev *pdev,
9418 const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009419{
9420 struct ipr_ioa_cfg *ioa_cfg;
9421 struct Scsi_Host *host;
9422 unsigned long ipr_regs_pci;
9423 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -07009424 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -05009425 volatile u32 mask, uproc, interrupts;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009426 unsigned long lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009427
9428 ENTER;
9429
9430 if ((rc = pci_enable_device(pdev))) {
9431 dev_err(&pdev->dev, "Cannot enable adapter\n");
9432 goto out;
9433 }
9434
9435 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9436
9437 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9438
9439 if (!host) {
9440 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9441 rc = -ENOMEM;
9442 goto out_disable;
9443 }
9444
9445 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9446 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Dan Williams8d8e7d132012-07-09 21:06:08 -07009447 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009448
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009449 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009450
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009451 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009452 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9453 dev_id->vendor, dev_id->device);
9454 goto out_scsi_host_put;
9455 }
9456
Wayne Boyera32c0552010-02-19 13:23:36 -08009457 /* set SIS 32 or SIS 64 */
9458 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009459 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
Brian King7dd21302012-03-14 21:20:08 -05009460 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
Brian King89aad422012-03-14 21:20:10 -05009461 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009462
Brian King5469cb52007-03-29 12:42:40 -05009463 if (ipr_transop_timeout)
9464 ioa_cfg->transop_timeout = ipr_transop_timeout;
9465 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9466 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9467 else
9468 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9469
Auke Kok44c10132007-06-08 15:46:36 -07009470 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -05009471
Linus Torvalds1da177e2005-04-16 15:20:36 -07009472 ipr_regs_pci = pci_resource_start(pdev, 0);
9473
9474 rc = pci_request_regions(pdev, IPR_NAME);
9475 if (rc < 0) {
9476 dev_err(&pdev->dev,
9477 "Couldn't register memory range of registers\n");
9478 goto out_scsi_host_put;
9479 }
9480
Arjan van de Ven25729a72008-09-28 16:18:02 -07009481 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009482
9483 if (!ipr_regs) {
9484 dev_err(&pdev->dev,
9485 "Couldn't map memory range of registers\n");
9486 rc = -ENOMEM;
9487 goto out_release_regions;
9488 }
9489
9490 ioa_cfg->hdw_dma_regs = ipr_regs;
9491 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9492 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9493
9494 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9495
9496 pci_set_master(pdev);
9497
Wayne Boyera32c0552010-02-19 13:23:36 -08009498 if (ioa_cfg->sis64) {
9499 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9500 if (rc < 0) {
9501 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9502 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9503 }
9504
9505 } else
9506 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9507
Linus Torvalds1da177e2005-04-16 15:20:36 -07009508 if (rc < 0) {
9509 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9510 goto cleanup_nomem;
9511 }
9512
9513 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9514 ioa_cfg->chip_cfg->cache_line_size);
9515
9516 if (rc != PCIBIOS_SUCCESSFUL) {
9517 dev_err(&pdev->dev, "Write of cache line size failed\n");
9518 rc = -EIO;
9519 goto cleanup_nomem;
9520 }
9521
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009522 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9523 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9524 IPR_MAX_MSIX_VECTORS);
9525 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9526 }
9527
9528 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009529 ipr_enable_msix(ioa_cfg) == 0)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009530 ioa_cfg->intr_flag = IPR_USE_MSIX;
9531 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009532 ipr_enable_msi(ioa_cfg) == 0)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009533 ioa_cfg->intr_flag = IPR_USE_MSI;
9534 else {
9535 ioa_cfg->intr_flag = IPR_USE_LSI;
9536 ioa_cfg->nvectors = 1;
9537 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9538 }
9539
9540 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9541 ioa_cfg->intr_flag == IPR_USE_MSIX) {
Wayne Boyer95fecd92009-06-16 15:13:28 -07009542 rc = ipr_test_msi(ioa_cfg, pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009543 if (rc == -EOPNOTSUPP) {
9544 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9545 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9546 pci_disable_msi(pdev);
9547 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9548 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9549 pci_disable_msix(pdev);
9550 }
9551
9552 ioa_cfg->intr_flag = IPR_USE_LSI;
9553 ioa_cfg->nvectors = 1;
9554 }
Wayne Boyer95fecd92009-06-16 15:13:28 -07009555 else if (rc)
9556 goto out_msi_disable;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009557 else {
9558 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9559 dev_info(&pdev->dev,
9560 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9561 ioa_cfg->nvectors, pdev->irq);
9562 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9563 dev_info(&pdev->dev,
9564 "Request for %d MSIXs succeeded.",
9565 ioa_cfg->nvectors);
9566 }
9567 }
9568
9569 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9570 (unsigned int)num_online_cpus(),
9571 (unsigned int)IPR_MAX_HRRQ_NUM);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009572
Linus Torvalds1da177e2005-04-16 15:20:36 -07009573 /* Save away PCI config space for use following IOA reset */
9574 rc = pci_save_state(pdev);
9575
9576 if (rc != PCIBIOS_SUCCESSFUL) {
9577 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9578 rc = -EIO;
Julia Lawallf170c682011-07-11 14:08:25 -07009579 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009580 }
9581
9582 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -07009583 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009584
9585 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -07009586 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009587
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009588 if (ioa_cfg->sis64)
9589 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9590 + ((sizeof(struct ipr_config_table_entry64)
9591 * ioa_cfg->max_devs_supported)));
9592 else
9593 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9594 + ((sizeof(struct ipr_config_table_entry)
9595 * ioa_cfg->max_devs_supported)));
9596
Linus Torvalds1da177e2005-04-16 15:20:36 -07009597 rc = ipr_alloc_mem(ioa_cfg);
9598 if (rc < 0) {
9599 dev_err(&pdev->dev,
9600 "Couldn't allocate enough memory for device driver!\n");
Julia Lawallf170c682011-07-11 14:08:25 -07009601 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009602 }
9603
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009604 /*
9605 * If HRRQ updated interrupt is not masked, or reset alert is set,
9606 * the card is in an unknown state and needs a hard reset
9607 */
Wayne Boyer214777b2010-02-19 13:24:26 -08009608 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9609 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9610 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009611 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9612 ioa_cfg->needs_hard_reset = 1;
Anton Blanchard5d7c20b2011-08-01 19:43:45 +10009613 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
Brian King473b1e82007-05-02 10:44:11 -05009614 ioa_cfg->needs_hard_reset = 1;
9615 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9616 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009617
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009618 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009619 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009621
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009622 if (ioa_cfg->intr_flag == IPR_USE_MSI
9623 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9624 name_msi_vectors(ioa_cfg);
9625 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9626 0,
9627 ioa_cfg->vectors_info[0].desc,
9628 &ioa_cfg->hrrq[0]);
9629 if (!rc)
9630 rc = ipr_request_other_msi_irqs(ioa_cfg);
9631 } else {
9632 rc = request_irq(pdev->irq, ipr_isr,
9633 IRQF_SHARED,
9634 IPR_NAME, &ioa_cfg->hrrq[0]);
9635 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009636 if (rc) {
9637 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9638 pdev->irq, rc);
9639 goto cleanup_nolog;
9640 }
9641
Brian King463fc692007-05-07 17:09:05 -05009642 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9643 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9644 ioa_cfg->needs_warm_reset = 1;
9645 ioa_cfg->reset = ipr_reset_slot_reset;
9646 } else
9647 ioa_cfg->reset = ipr_reset_start_bist;
9648
Linus Torvalds1da177e2005-04-16 15:20:36 -07009649 spin_lock(&ipr_driver_lock);
9650 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9651 spin_unlock(&ipr_driver_lock);
9652
9653 LEAVE;
9654out:
9655 return rc;
9656
9657cleanup_nolog:
9658 ipr_free_mem(ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -07009659out_msi_disable:
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009660 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9661 pci_disable_msi(pdev);
9662 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9663 pci_disable_msix(pdev);
Julia Lawallf170c682011-07-11 14:08:25 -07009664cleanup_nomem:
9665 iounmap(ipr_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009666out_release_regions:
9667 pci_release_regions(pdev);
9668out_scsi_host_put:
9669 scsi_host_put(host);
9670out_disable:
9671 pci_disable_device(pdev);
9672 goto out;
9673}
9674
9675/**
9676 * ipr_scan_vsets - Scans for VSET devices
9677 * @ioa_cfg: ioa config struct
9678 *
9679 * Description: Since the VSET resources do not follow SAM in that we can have
9680 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9681 *
9682 * Return value:
9683 * none
9684 **/
9685static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9686{
9687 int target, lun;
9688
9689 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009690 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009691 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9692}
9693
9694/**
9695 * ipr_initiate_ioa_bringdown - Bring down an adapter
9696 * @ioa_cfg: ioa config struct
9697 * @shutdown_type: shutdown type
9698 *
9699 * Description: This function will initiate bringing down the adapter.
9700 * This consists of issuing an IOA shutdown to the adapter
9701 * to flush the cache, and running BIST.
9702 * If the caller needs to wait on the completion of the reset,
9703 * the caller must sleep on the reset_wait_q.
9704 *
9705 * Return value:
9706 * none
9707 **/
9708static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9709 enum ipr_shutdown_type shutdown_type)
9710{
9711 ENTER;
9712 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9713 ioa_cfg->sdt_state = ABORT_DUMP;
9714 ioa_cfg->reset_retries = 0;
9715 ioa_cfg->in_ioa_bringdown = 1;
9716 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9717 LEAVE;
9718}
9719
9720/**
9721 * __ipr_remove - Remove a single adapter
9722 * @pdev: pci device struct
9723 *
9724 * Adapter hot plug remove entry point.
9725 *
9726 * Return value:
9727 * none
9728 **/
9729static void __ipr_remove(struct pci_dev *pdev)
9730{
9731 unsigned long host_lock_flags = 0;
9732 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9733 ENTER;
9734
9735 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009736 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05009737 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9738 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9739 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9740 }
9741
Linus Torvalds1da177e2005-04-16 15:20:36 -07009742 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9743
9744 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9745 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Tejun Heo43829732012-08-20 14:51:24 -07009746 flush_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009747 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9748
9749 spin_lock(&ipr_driver_lock);
9750 list_del(&ioa_cfg->queue);
9751 spin_unlock(&ipr_driver_lock);
9752
9753 if (ioa_cfg->sdt_state == ABORT_DUMP)
9754 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9755 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9756
9757 ipr_free_all_resources(ioa_cfg);
9758
9759 LEAVE;
9760}
9761
9762/**
9763 * ipr_remove - IOA hot plug remove entry point
9764 * @pdev: pci device struct
9765 *
9766 * Adapter hot plug remove entry point.
9767 *
9768 * Return value:
9769 * none
9770 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009771static void ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009772{
9773 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9774
9775 ENTER;
9776
Tony Jonesee959b02008-02-22 00:13:36 +01009777 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009778 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +01009779 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009780 &ipr_dump_attr);
9781 scsi_remove_host(ioa_cfg->host);
9782
9783 __ipr_remove(pdev);
9784
9785 LEAVE;
9786}
9787
9788/**
9789 * ipr_probe - Adapter hot plug add entry point
9790 *
9791 * Return value:
9792 * 0 on success / non-zero on failure
9793 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009794static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009795{
9796 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06009797 int rc, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009798
9799 rc = ipr_probe_ioa(pdev, dev_id);
9800
9801 if (rc)
9802 return rc;
9803
9804 ioa_cfg = pci_get_drvdata(pdev);
9805 rc = ipr_probe_ioa_part2(ioa_cfg);
9806
9807 if (rc) {
9808 __ipr_remove(pdev);
9809 return rc;
9810 }
9811
9812 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9813
9814 if (rc) {
9815 __ipr_remove(pdev);
9816 return rc;
9817 }
9818
Tony Jonesee959b02008-02-22 00:13:36 +01009819 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009820 &ipr_trace_attr);
9821
9822 if (rc) {
9823 scsi_remove_host(ioa_cfg->host);
9824 __ipr_remove(pdev);
9825 return rc;
9826 }
9827
Tony Jonesee959b02008-02-22 00:13:36 +01009828 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009829 &ipr_dump_attr);
9830
9831 if (rc) {
Tony Jonesee959b02008-02-22 00:13:36 +01009832 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009833 &ipr_trace_attr);
9834 scsi_remove_host(ioa_cfg->host);
9835 __ipr_remove(pdev);
9836 return rc;
9837 }
9838
9839 scsi_scan_host(ioa_cfg->host);
9840 ipr_scan_vsets(ioa_cfg);
9841 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9842 ioa_cfg->allow_ml_add_del = 1;
brking@us.ibm.com11cd8f12005-11-01 17:00:11 -06009843 ioa_cfg->host->max_channel = IPR_VSET_BUS;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06009844 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9845
9846 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9847 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9848 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9849 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9850 ioa_cfg->iopoll_weight, ipr_iopoll);
9851 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9852 }
9853 }
9854
Linus Torvalds1da177e2005-04-16 15:20:36 -07009855 schedule_work(&ioa_cfg->work_q);
9856 return 0;
9857}
9858
9859/**
9860 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009861 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07009862 *
9863 * This function is invoked upon system shutdown/reboot. It will issue
9864 * an adapter shutdown to the adapter to flush the write cache.
9865 *
9866 * Return value:
9867 * none
9868 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009869static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009870{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009871 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009872 unsigned long lock_flags = 0;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06009873 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009874
9875 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06009876 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9877 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9878 ioa_cfg->iopoll_weight = 0;
9879 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9880 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9881 }
9882
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009883 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05009884 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9885 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9886 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9887 }
9888
Linus Torvalds1da177e2005-04-16 15:20:36 -07009889 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9890 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9891 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9892}
9893
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009894static struct pci_device_id ipr_pci_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009895 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009896 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009897 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009898 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009899 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009900 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009901 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06009902 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009903 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06009904 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009905 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06009906 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009907 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06009908 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009909 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -05009910 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9911 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009912 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06009913 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009914 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05009915 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9916 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06009917 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05009918 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9919 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009920 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06009921 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009922 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05009923 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9924 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -06009925 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05009926 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9927 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06009928 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -05009929 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9930 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -05009931 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King185eb312007-03-29 12:42:53 -05009932 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
9933 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Wayne Boyerb0f56d32010-06-24 13:34:14 -07009934 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
9935 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -05009936 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -05009937 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009938 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -06009939 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009940 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -06009941 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06009942 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05009943 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9944 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06009945 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05009946 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9947 IPR_USE_LONG_TRANSOP_TIMEOUT },
Wayne Boyerd7b46272010-02-19 13:24:38 -08009948 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9949 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9950 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9951 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9952 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9953 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
Wayne Boyer32622bd2010-10-18 20:24:34 -07009954 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -06009955 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
9956 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer5a918352011-10-27 11:58:21 -07009957 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
9958 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer32622bd2010-10-18 20:24:34 -07009959 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -08009960 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -08009961 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -08009962 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -08009963 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -08009964 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -08009965 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -08009966 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9967 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9968 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -08009969 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -06009970 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9971 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
9972 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9973 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
9974 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9975 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
9976 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9977 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07009978 { }
9979};
9980MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9981
Stephen Hemmingera55b2d22012-09-07 09:33:16 -07009982static const struct pci_error_handlers ipr_err_handler = {
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009983 .error_detected = ipr_pci_error_detected,
9984 .slot_reset = ipr_pci_slot_reset,
9985};
9986
Linus Torvalds1da177e2005-04-16 15:20:36 -07009987static struct pci_driver ipr_driver = {
9988 .name = IPR_NAME,
9989 .id_table = ipr_pci_table,
9990 .probe = ipr_probe,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009991 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07009992 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009993 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009994};
9995
9996/**
Wayne Boyerf72919e2010-02-19 13:24:21 -08009997 * ipr_halt_done - Shutdown prepare completion
9998 *
9999 * Return value:
10000 * none
10001 **/
10002static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10003{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010004 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010005}
10006
10007/**
10008 * ipr_halt - Issue shutdown prepare to all adapters
10009 *
10010 * Return value:
10011 * NOTIFY_OK on success / NOTIFY_DONE on failure
10012 **/
10013static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10014{
10015 struct ipr_cmnd *ipr_cmd;
10016 struct ipr_ioa_cfg *ioa_cfg;
10017 unsigned long flags = 0;
10018
10019 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10020 return NOTIFY_DONE;
10021
10022 spin_lock(&ipr_driver_lock);
10023
10024 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10025 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010026 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Wayne Boyerf72919e2010-02-19 13:24:21 -080010027 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10028 continue;
10029 }
10030
10031 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10032 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10033 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10034 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10035 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10036
10037 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10038 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10039 }
10040 spin_unlock(&ipr_driver_lock);
10041
10042 return NOTIFY_OK;
10043}
10044
10045static struct notifier_block ipr_notifier = {
10046 ipr_halt, NULL, 0
10047};
10048
10049/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010050 * ipr_init - Module entry point
10051 *
10052 * Return value:
10053 * 0 on success / negative value on failure
10054 **/
10055static int __init ipr_init(void)
10056{
10057 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10058 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10059
Wayne Boyerf72919e2010-02-19 13:24:21 -080010060 register_reboot_notifier(&ipr_notifier);
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -070010061 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010062}
10063
10064/**
10065 * ipr_exit - Module unload
10066 *
10067 * Module unload entry point.
10068 *
10069 * Return value:
10070 * none
10071 **/
10072static void __exit ipr_exit(void)
10073{
Wayne Boyerf72919e2010-02-19 13:24:21 -080010074 unregister_reboot_notifier(&ipr_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010075 pci_unregister_driver(&ipr_driver);
10076}
10077
10078module_init(ipr_init);
10079module_exit(ipr_exit);