blob: 2da8f6f71002cfd6f042744f3dd855c59d1cf8b0 [file] [log] [blame]
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001/*
2 * Disk Array driver for HP Smart Array SAS controllers
Don Brace94c7bc32016-02-23 15:16:46 -06003 * Copyright 2016 Microsemi Corporation
Don Brace1358f6d2015-07-18 11:12:38 -05004 * Copyright 2014-2015 PMC-Sierra, Inc.
5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 *
Don Brace94c7bc32016-02-23 15:16:46 -060016 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
Stephen M. Cameronedd16362009-12-08 14:09:11 -080017 *
18 */
19
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/types.h>
23#include <linux/pci.h>
Matthew Garrette5a44df2011-11-11 11:14:23 -050024#include <linux/pci-aspm.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080025#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/fs.h>
29#include <linux/timer.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080030#include <linux/init.h>
31#include <linux/spinlock.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080032#include <linux/compat.h>
33#include <linux/blktrace_api.h>
34#include <linux/uaccess.h>
35#include <linux/io.h>
36#include <linux/dma-mapping.h>
37#include <linux/completion.h>
38#include <linux/moduleparam.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_host.h>
Stephen M. Cameron667e23d2010-02-25 14:02:51 -060043#include <scsi/scsi_tcq.h>
Stephen Cameron9437ac42015-04-23 09:32:16 -050044#include <scsi/scsi_eh.h>
Kevin Barnettd04e62b2015-11-04 15:52:34 -060045#include <scsi/scsi_transport_sas.h>
Webb Scales73153fe2015-04-23 09:35:04 -050046#include <scsi/scsi_dbg.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080047#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
Arun Sharma600634972011-07-26 16:09:06 -070050#include <linux/atomic.h>
Stephen M. Camerona0c12412011-10-26 16:22:04 -050051#include <linux/jiffies.h>
Don Brace42a91642014-11-14 17:26:27 -060052#include <linux/percpu-defs.h>
Stephen M. Cameron094963d2014-05-29 10:53:18 -050053#include <linux/percpu.h>
Don Brace2b08b3e2015-01-23 16:41:09 -060054#include <asm/unaligned.h>
Stephen M. Cameron283b4a92014-02-18 13:55:33 -060055#include <asm/div64.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080056#include "hpsa_cmd.h"
57#include "hpsa.h"
58
Don Braceec2c3aa2015-11-04 15:52:40 -060059/*
60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
61 * with an optional trailing '-' followed by a byte value (0-255).
62 */
Don Brace30c00612017-05-04 17:51:56 -050063#define HPSA_DRIVER_VERSION "3.4.20-0"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080064#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -060065#define HPSA "hpsa"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080066
Robert Elliott007e7aa2015-01-23 16:44:56 -060067/* How long to wait for CISS doorbell communication */
68#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
69#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
70#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
71#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
Stephen M. Cameronedd16362009-12-08 14:09:11 -080072#define MAX_IOCTL_CONFIG_WAIT 1000
73
74/*define how many times we will try a command because of bus resets */
75#define MAX_CMD_RETRIES 3
76
77/* Embedded module documentation macros - see modules.h */
78MODULE_AUTHOR("Hewlett-Packard Company");
79MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
80 HPSA_DRIVER_VERSION);
81MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
82MODULE_VERSION(HPSA_DRIVER_VERSION);
83MODULE_LICENSE("GPL");
84
85static int hpsa_allow_any;
86module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
87MODULE_PARM_DESC(hpsa_allow_any,
88 "Allow hpsa driver to access unknown HP Smart Array hardware");
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -060089static int hpsa_simple_mode;
90module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
91MODULE_PARM_DESC(hpsa_simple_mode,
92 "Use 'simple mode' rather than 'performant mode'");
Stephen M. Cameronedd16362009-12-08 14:09:11 -080093
94/* define the PCI info for the cards we can control */
95static const struct pci_device_id hpsa_pci_device_id[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -080096 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
Mike Miller163dbcd2013-09-04 15:11:10 -0500101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
Mike Millerf8b01eb2010-02-04 08:42:45 -0600103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
scameron@beardog.cce.hp.com9143a962011-03-07 10:44:16 -0600104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
Don Brace7f1974a2017-03-28 16:40:13 -0500111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
Mike Millerfe0c9612012-09-20 16:05:18 -0500112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
Don Brace7f1974a2017-03-28 16:40:13 -0500116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
Mike Millerfe0c9612012-09-20 16:05:18 -0500117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
Mike Miller97b9f532013-09-04 15:05:55 -0500119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
Mike Miller97b9f532013-09-04 15:05:55 -0500130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
135 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
136 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
137 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
Don Bracefdfa4b62015-04-23 09:35:27 -0500138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
Don Bracecbb47dc2015-07-18 11:12:54 -0500139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
141 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
142 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
143 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
Stephen M. Cameron8e616a52014-02-18 13:58:02 -0600144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
145 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
146 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
147 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
148 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
Mike Miller7c03b872010-12-01 11:16:07 -0600149 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
Stephen M. Cameron6798cc02010-06-16 13:51:20 -0500150 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
Hannes Reinecke135ae6e2017-08-15 08:58:04 +0200151 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
152 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800153 {0,}
154};
155
156MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
157
158/* board_id = Subsystem Device ID & Vendor ID
159 * product = Marketing Name for the board
160 * access = Address of the struct of function pointers
161 */
162static struct board_type products[] = {
Hannes Reinecke135ae6e2017-08-15 08:58:04 +0200163 {0x40700E11, "Smart Array 5300", &SA5A_access},
164 {0x40800E11, "Smart Array 5i", &SA5B_access},
165 {0x40820E11, "Smart Array 532", &SA5B_access},
166 {0x40830E11, "Smart Array 5312", &SA5B_access},
167 {0x409A0E11, "Smart Array 641", &SA5A_access},
168 {0x409B0E11, "Smart Array 642", &SA5A_access},
169 {0x409C0E11, "Smart Array 6400", &SA5A_access},
170 {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
171 {0x40910E11, "Smart Array 6i", &SA5A_access},
172 {0x3225103C, "Smart Array P600", &SA5A_access},
173 {0x3223103C, "Smart Array P800", &SA5A_access},
174 {0x3234103C, "Smart Array P400", &SA5A_access},
175 {0x3235103C, "Smart Array P400i", &SA5A_access},
176 {0x3211103C, "Smart Array E200i", &SA5A_access},
177 {0x3212103C, "Smart Array E200", &SA5A_access},
178 {0x3213103C, "Smart Array E200i", &SA5A_access},
179 {0x3214103C, "Smart Array E200i", &SA5A_access},
180 {0x3215103C, "Smart Array E200i", &SA5A_access},
181 {0x3237103C, "Smart Array E500", &SA5A_access},
182 {0x323D103C, "Smart Array P700m", &SA5A_access},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800183 {0x3241103C, "Smart Array P212", &SA5_access},
184 {0x3243103C, "Smart Array P410", &SA5_access},
185 {0x3245103C, "Smart Array P410i", &SA5_access},
186 {0x3247103C, "Smart Array P411", &SA5_access},
187 {0x3249103C, "Smart Array P812", &SA5_access},
Mike Miller163dbcd2013-09-04 15:11:10 -0500188 {0x324A103C, "Smart Array P712m", &SA5_access},
189 {0x324B103C, "Smart Array P711m", &SA5_access},
Stephen M. Cameron7d2cce52014-11-14 17:26:38 -0600190 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
Mike Millerfe0c9612012-09-20 16:05:18 -0500191 {0x3350103C, "Smart Array P222", &SA5_access},
192 {0x3351103C, "Smart Array P420", &SA5_access},
193 {0x3352103C, "Smart Array P421", &SA5_access},
194 {0x3353103C, "Smart Array P822", &SA5_access},
195 {0x3354103C, "Smart Array P420i", &SA5_access},
196 {0x3355103C, "Smart Array P220i", &SA5_access},
197 {0x3356103C, "Smart Array P721m", &SA5_access},
Don Brace7f1974a2017-03-28 16:40:13 -0500198 {0x1920103C, "Smart Array P430i", &SA5_access},
Mike Miller1fd6c8e2013-09-04 15:08:29 -0500199 {0x1921103C, "Smart Array P830i", &SA5_access},
200 {0x1922103C, "Smart Array P430", &SA5_access},
201 {0x1923103C, "Smart Array P431", &SA5_access},
202 {0x1924103C, "Smart Array P830", &SA5_access},
Don Brace7f1974a2017-03-28 16:40:13 -0500203 {0x1925103C, "Smart Array P831", &SA5_access},
Mike Miller1fd6c8e2013-09-04 15:08:29 -0500204 {0x1926103C, "Smart Array P731m", &SA5_access},
205 {0x1928103C, "Smart Array P230i", &SA5_access},
206 {0x1929103C, "Smart Array P530", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600207 {0x21BD103C, "Smart Array P244br", &SA5_access},
208 {0x21BE103C, "Smart Array P741m", &SA5_access},
209 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
210 {0x21C0103C, "Smart Array P440ar", &SA5_access},
Don Bracec8ae0ab2015-01-23 16:45:12 -0600211 {0x21C1103C, "Smart Array P840ar", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600212 {0x21C2103C, "Smart Array P440", &SA5_access},
213 {0x21C3103C, "Smart Array P441", &SA5_access},
Mike Miller97b9f532013-09-04 15:05:55 -0500214 {0x21C4103C, "Smart Array", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600215 {0x21C5103C, "Smart Array P841", &SA5_access},
216 {0x21C6103C, "Smart HBA H244br", &SA5_access},
217 {0x21C7103C, "Smart HBA H240", &SA5_access},
218 {0x21C8103C, "Smart HBA H241", &SA5_access},
Mike Miller97b9f532013-09-04 15:05:55 -0500219 {0x21C9103C, "Smart Array", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600220 {0x21CA103C, "Smart Array P246br", &SA5_access},
221 {0x21CB103C, "Smart Array P840", &SA5_access},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500222 {0x21CC103C, "Smart Array", &SA5_access},
223 {0x21CD103C, "Smart Array", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600224 {0x21CE103C, "Smart HBA", &SA5_access},
Don Bracefdfa4b62015-04-23 09:35:27 -0500225 {0x05809005, "SmartHBA-SA", &SA5_access},
Don Bracecbb47dc2015-07-18 11:12:54 -0500226 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
227 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
228 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
229 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
230 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
Stephen M. Cameron8e616a52014-02-18 13:58:02 -0600231 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
232 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
233 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
234 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
235 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800236 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
237};
238
Kevin Barnettd04e62b2015-11-04 15:52:34 -0600239static struct scsi_transport_template *hpsa_sas_transport_template;
240static int hpsa_add_sas_host(struct ctlr_info *h);
241static void hpsa_delete_sas_host(struct ctlr_info *h);
242static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
243 struct hpsa_scsi_dev_t *device);
244static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
245static struct hpsa_scsi_dev_t
246 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
247 struct sas_rphy *rphy);
248
Webb Scalesa58e7e52015-04-23 09:34:16 -0500249#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
250static const struct scsi_cmnd hpsa_cmd_busy;
251#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
252static const struct scsi_cmnd hpsa_cmd_idle;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800253static int number_of_controllers;
254
Stephen M. Cameron10f66012010-06-16 13:51:50 -0500255static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
256static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
Don Brace42a91642014-11-14 17:26:27 -0600257static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800258
259#ifdef CONFIG_COMPAT
Don Brace42a91642014-11-14 17:26:27 -0600260static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
261 void __user *arg);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800262#endif
263
264static void cmd_free(struct ctlr_info *h, struct CommandList *c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800265static struct CommandList *cmd_alloc(struct ctlr_info *h);
Webb Scales73153fe2015-04-23 09:35:04 -0500266static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
267static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
268 struct scsi_cmnd *scmd);
Stephen M. Camerona2dac132013-02-20 11:24:41 -0600269static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -0600270 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800271 int cmd_type);
Robert Elliott2c143342015-01-23 16:42:48 -0600272static void hpsa_free_cmd_pool(struct ctlr_info *h);
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -0600273#define VPD_PAGE (1 << 8)
Don Braceb48d9802015-11-04 15:50:13 -0600274#define HPSA_SIMPLE_ERROR_BITS 0x03
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800275
Jeff Garzikf2812332010-11-16 02:10:29 -0500276static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
Stephen M. Camerona08a8472010-02-04 08:43:16 -0600277static void hpsa_scan_start(struct Scsi_Host *);
278static int hpsa_scan_finished(struct Scsi_Host *sh,
279 unsigned long elapsed_time);
Don Brace7c0a0222015-01-23 16:41:30 -0600280static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800281
282static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
283static int hpsa_slave_alloc(struct scsi_device *sdev);
Stephen Cameron41ce4c32015-04-23 09:31:47 -0500284static int hpsa_slave_configure(struct scsi_device *sdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800285static void hpsa_slave_destroy(struct scsi_device *sdev);
286
Don Brace8aa60682015-11-04 15:50:01 -0600287static void hpsa_update_scsi_devices(struct ctlr_info *h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800288static int check_for_unit_attention(struct ctlr_info *h,
289 struct CommandList *c);
290static void check_ioctl_unit_attention(struct ctlr_info *h,
291 struct CommandList *c);
Don Brace303932f2010-02-04 08:42:40 -0600292/* performant mode helper functions */
293static void calc_bucket_map(int *bucket, int num_buckets,
Don Brace2b08b3e2015-01-23 16:41:09 -0600294 int nsgs, int min_blocks, u32 *bucket_map);
Robert Elliott105a3db2015-04-23 09:33:48 -0500295static void hpsa_free_performant_mode(struct ctlr_info *h);
296static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
Matt Gates254f7962012-05-01 11:43:06 -0500297static inline u32 next_command(struct ctlr_info *h, u8 q);
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800298static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
299 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
300 u64 *cfg_offset);
301static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
302 unsigned long *memory_bar);
Hannes Reinecke135ae6e2017-08-15 08:58:04 +0200303static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
304 bool *legacy_board);
Don Bracebfd75462016-11-15 14:45:32 -0600305static int wait_for_device_to_become_ready(struct ctlr_info *h,
306 unsigned char lunaddr[],
307 int reply_queue);
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800308static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
309 int wait_for_ready);
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500310static inline void finish_cmd(struct CommandList *c);
Robert Elliottc706a792015-01-23 16:45:01 -0600311static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -0600312#define BOARD_NOT_READY 0
313#define BOARD_READY 1
Stephen M. Cameron23100dd2014-02-18 13:57:37 -0600314static void hpsa_drain_accel_commands(struct ctlr_info *h);
Stephen M. Cameron76438d02014-02-18 13:55:43 -0600315static void hpsa_flush_cache(struct ctlr_info *h);
Scott Teelc3497752014-02-18 13:56:34 -0600316static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
317 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -0600318 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
Don Brace080ef1c2015-01-23 16:43:25 -0600319static void hpsa_command_resubmit_worker(struct work_struct *work);
Webb Scales25163bd2015-04-23 09:32:00 -0500320static u32 lockup_detected(struct ctlr_info *h);
321static int detect_controller_lockup(struct ctlr_info *h);
Scott Teelc2adae42015-11-04 15:52:16 -0600322static void hpsa_disable_rld_caching(struct ctlr_info *h);
Kevin Barnettd04e62b2015-11-04 15:52:34 -0600323static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
324 struct ReportExtendedLUNdata *buf, int bufsize);
Scott Teel83832782016-09-09 16:30:29 -0500325static bool hpsa_vpd_page_supported(struct ctlr_info *h,
326 unsigned char scsi3addr[], u8 page);
Scott Teel34592252015-11-04 15:52:09 -0600327static int hpsa_luns_changed(struct ctlr_info *h);
Don Braceba74fdc2016-04-27 17:14:17 -0500328static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
329 struct hpsa_scsi_dev_t *dev,
330 unsigned char *scsi3addr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800331
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800332static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
333{
334 unsigned long *priv = shost_priv(sdev->host);
335 return (struct ctlr_info *) *priv;
336}
337
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600338static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
339{
340 unsigned long *priv = shost_priv(sh);
341 return (struct ctlr_info *) *priv;
342}
343
Webb Scalesa58e7e52015-04-23 09:34:16 -0500344static inline bool hpsa_is_cmd_idle(struct CommandList *c)
345{
346 return c->scsi_cmd == SCSI_CMD_IDLE;
347}
348
Webb Scalesd604f532015-04-23 09:35:22 -0500349static inline bool hpsa_is_pending_event(struct CommandList *c)
350{
Don Brace08ec46f2017-05-04 17:51:49 -0500351 return c->reset_pending;
Webb Scalesd604f532015-04-23 09:35:22 -0500352}
353
Stephen Cameron9437ac42015-04-23 09:32:16 -0500354/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
355static void decode_sense_data(const u8 *sense_data, int sense_data_len,
356 u8 *sense_key, u8 *asc, u8 *ascq)
357{
358 struct scsi_sense_hdr sshdr;
359 bool rc;
360
361 *sense_key = -1;
362 *asc = -1;
363 *ascq = -1;
364
365 if (sense_data_len < 1)
366 return;
367
368 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
369 if (rc) {
370 *sense_key = sshdr.sense_key;
371 *asc = sshdr.asc;
372 *ascq = sshdr.ascq;
373 }
374}
375
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800376static int check_for_unit_attention(struct ctlr_info *h,
377 struct CommandList *c)
378{
Stephen Cameron9437ac42015-04-23 09:32:16 -0500379 u8 sense_key, asc, ascq;
380 int sense_len;
381
382 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
383 sense_len = sizeof(c->err_info->SenseInfo);
384 else
385 sense_len = c->err_info->SenseLen;
386
387 decode_sense_data(c->err_info->SenseInfo, sense_len,
388 &sense_key, &asc, &ascq);
Don Brace81c27552015-07-18 11:12:28 -0500389 if (sense_key != UNIT_ATTENTION || asc == 0xff)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800390 return 0;
391
Stephen Cameron9437ac42015-04-23 09:32:16 -0500392 switch (asc) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800393 case STATE_CHANGED:
Stephen Cameron9437ac42015-04-23 09:32:16 -0500394 dev_warn(&h->pdev->dev,
Robert Elliott2946e822015-04-23 09:35:09 -0500395 "%s: a state change detected, command retried\n",
396 h->devname);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800397 break;
398 case LUN_FAILED:
Stephen M. Cameron7f736952014-11-14 17:26:48 -0600399 dev_warn(&h->pdev->dev,
Robert Elliott2946e822015-04-23 09:35:09 -0500400 "%s: LUN failure detected\n", h->devname);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800401 break;
402 case REPORT_LUNS_CHANGED:
Stephen M. Cameron7f736952014-11-14 17:26:48 -0600403 dev_warn(&h->pdev->dev,
Robert Elliott2946e822015-04-23 09:35:09 -0500404 "%s: report LUN data changed\n", h->devname);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800405 /*
Scott Teel4f4eb9f2012-01-19 14:01:25 -0600406 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
407 * target (array) devices.
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800408 */
409 break;
410 case POWER_OR_RESET:
Robert Elliott2946e822015-04-23 09:35:09 -0500411 dev_warn(&h->pdev->dev,
412 "%s: a power on or device reset detected\n",
413 h->devname);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800414 break;
415 case UNIT_ATTENTION_CLEARED:
Robert Elliott2946e822015-04-23 09:35:09 -0500416 dev_warn(&h->pdev->dev,
417 "%s: unit attention cleared by another initiator\n",
418 h->devname);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800419 break;
420 default:
Robert Elliott2946e822015-04-23 09:35:09 -0500421 dev_warn(&h->pdev->dev,
422 "%s: unknown unit attention detected\n",
423 h->devname);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800424 break;
425 }
426 return 1;
427}
428
Matt Bondurant852af202012-05-01 11:42:35 -0500429static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
430{
431 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
432 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
433 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
434 return 0;
435 dev_warn(&h->pdev->dev, HPSA "device busy");
436 return 1;
437}
438
Stephen Camerone985c582015-04-23 09:32:22 -0500439static u32 lockup_detected(struct ctlr_info *h);
440static ssize_t host_show_lockup_detected(struct device *dev,
441 struct device_attribute *attr, char *buf)
442{
443 int ld;
444 struct ctlr_info *h;
445 struct Scsi_Host *shost = class_to_shost(dev);
446
447 h = shost_to_hba(shost);
448 ld = lockup_detected(h);
449
450 return sprintf(buf, "ld=%d\n", ld);
451}
452
Scott Teelda0697b2014-02-18 13:57:00 -0600453static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
454 struct device_attribute *attr,
455 const char *buf, size_t count)
456{
457 int status, len;
458 struct ctlr_info *h;
459 struct Scsi_Host *shost = class_to_shost(dev);
460 char tmpbuf[10];
461
462 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
463 return -EACCES;
464 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
465 strncpy(tmpbuf, buf, len);
466 tmpbuf[len] = '\0';
467 if (sscanf(tmpbuf, "%d", &status) != 1)
468 return -EINVAL;
469 h = shost_to_hba(shost);
470 h->acciopath_status = !!status;
471 dev_warn(&h->pdev->dev,
472 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
473 h->acciopath_status ? "enabled" : "disabled");
474 return count;
475}
476
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600477static ssize_t host_store_raid_offload_debug(struct device *dev,
478 struct device_attribute *attr,
479 const char *buf, size_t count)
480{
481 int debug_level, len;
482 struct ctlr_info *h;
483 struct Scsi_Host *shost = class_to_shost(dev);
484 char tmpbuf[10];
485
486 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
487 return -EACCES;
488 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
489 strncpy(tmpbuf, buf, len);
490 tmpbuf[len] = '\0';
491 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
492 return -EINVAL;
493 if (debug_level < 0)
494 debug_level = 0;
495 h = shost_to_hba(shost);
496 h->raid_offload_debug = debug_level;
497 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
498 h->raid_offload_debug);
499 return count;
500}
501
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800502static ssize_t host_store_rescan(struct device *dev,
503 struct device_attribute *attr,
504 const char *buf, size_t count)
505{
506 struct ctlr_info *h;
507 struct Scsi_Host *shost = class_to_shost(dev);
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600508 h = shost_to_hba(shost);
Mike Miller31468402010-02-25 14:03:12 -0600509 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800510 return count;
511}
512
Stephen M. Camerond28ce022010-05-27 15:14:34 -0500513static ssize_t host_show_firmware_revision(struct device *dev,
514 struct device_attribute *attr, char *buf)
515{
516 struct ctlr_info *h;
517 struct Scsi_Host *shost = class_to_shost(dev);
518 unsigned char *fwrev;
519
520 h = shost_to_hba(shost);
521 if (!h->hba_inquiry_data)
522 return 0;
523 fwrev = &h->hba_inquiry_data[32];
524 return snprintf(buf, 20, "%c%c%c%c\n",
525 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
526}
527
Stephen M. Cameron94a13642011-01-06 14:48:39 -0600528static ssize_t host_show_commands_outstanding(struct device *dev,
529 struct device_attribute *attr, char *buf)
530{
531 struct Scsi_Host *shost = class_to_shost(dev);
532 struct ctlr_info *h = shost_to_hba(shost);
533
Stephen M. Cameron0cbf7682014-11-14 17:27:09 -0600534 return snprintf(buf, 20, "%d\n",
535 atomic_read(&h->commands_outstanding));
Stephen M. Cameron94a13642011-01-06 14:48:39 -0600536}
537
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600538static ssize_t host_show_transport_mode(struct device *dev,
539 struct device_attribute *attr, char *buf)
540{
541 struct ctlr_info *h;
542 struct Scsi_Host *shost = class_to_shost(dev);
543
544 h = shost_to_hba(shost);
545 return snprintf(buf, 20, "%s\n",
Stephen M. Cameron960a30e72011-02-15 15:33:03 -0600546 h->transMethod & CFGTBL_Trans_Performant ?
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600547 "performant" : "simple");
548}
549
Scott Teelda0697b2014-02-18 13:57:00 -0600550static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
551 struct device_attribute *attr, char *buf)
552{
553 struct ctlr_info *h;
554 struct Scsi_Host *shost = class_to_shost(dev);
555
556 h = shost_to_hba(shost);
557 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
558 (h->acciopath_status == 1) ? "enabled" : "disabled");
559}
560
Stephen M. Cameron46380782011-05-03 15:00:01 -0500561/* List of controllers which cannot be hard reset on kexec with reset_devices */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600562static u32 unresettable_controller[] = {
563 0x324a103C, /* Smart Array P712m */
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500564 0x324b103C, /* Smart Array P711m */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600565 0x3223103C, /* Smart Array P800 */
566 0x3234103C, /* Smart Array P400 */
567 0x3235103C, /* Smart Array P400i */
568 0x3211103C, /* Smart Array E200i */
569 0x3212103C, /* Smart Array E200 */
570 0x3213103C, /* Smart Array E200i */
571 0x3214103C, /* Smart Array E200i */
572 0x3215103C, /* Smart Array E200i */
573 0x3237103C, /* Smart Array E500 */
574 0x323D103C, /* Smart Array P700m */
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100575 0x40800E11, /* Smart Array 5i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600576 0x409C0E11, /* Smart Array 6400 */
577 0x409D0E11, /* Smart Array 6400 EM */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100578 0x40700E11, /* Smart Array 5300 */
579 0x40820E11, /* Smart Array 532 */
580 0x40830E11, /* Smart Array 5312 */
581 0x409A0E11, /* Smart Array 641 */
582 0x409B0E11, /* Smart Array 642 */
583 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600584};
585
Stephen M. Cameron46380782011-05-03 15:00:01 -0500586/* List of controllers which cannot even be soft reset */
587static u32 soft_unresettable_controller[] = {
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100588 0x40800E11, /* Smart Array 5i */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100589 0x40700E11, /* Smart Array 5300 */
590 0x40820E11, /* Smart Array 532 */
591 0x40830E11, /* Smart Array 5312 */
592 0x409A0E11, /* Smart Array 641 */
593 0x409B0E11, /* Smart Array 642 */
594 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron46380782011-05-03 15:00:01 -0500595 /* Exclude 640x boards. These are two pci devices in one slot
596 * which share a battery backed cache module. One controls the
597 * cache, the other accesses the cache through the one that controls
598 * it. If we reset the one controlling the cache, the other will
599 * likely not be happy. Just forbid resetting this conjoined mess.
600 * The 640x isn't really supported by hpsa anyway.
601 */
602 0x409C0E11, /* Smart Array 6400 */
603 0x409D0E11, /* Smart Array 6400 EM */
604};
605
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500606static int board_id_in_array(u32 a[], int nelems, u32 board_id)
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600607{
608 int i;
609
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500610 for (i = 0; i < nelems; i++)
611 if (a[i] == board_id)
612 return 1;
613 return 0;
614}
615
616static int ctlr_is_hard_resettable(u32 board_id)
617{
618 return !board_id_in_array(unresettable_controller,
619 ARRAY_SIZE(unresettable_controller), board_id);
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600620}
621
Stephen M. Cameron46380782011-05-03 15:00:01 -0500622static int ctlr_is_soft_resettable(u32 board_id)
623{
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500624 return !board_id_in_array(soft_unresettable_controller,
625 ARRAY_SIZE(soft_unresettable_controller), board_id);
Stephen M. Cameron46380782011-05-03 15:00:01 -0500626}
627
628static int ctlr_is_resettable(u32 board_id)
629{
630 return ctlr_is_hard_resettable(board_id) ||
631 ctlr_is_soft_resettable(board_id);
632}
633
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600634static ssize_t host_show_resettable(struct device *dev,
635 struct device_attribute *attr, char *buf)
636{
637 struct ctlr_info *h;
638 struct Scsi_Host *shost = class_to_shost(dev);
639
640 h = shost_to_hba(shost);
Stephen M. Cameron46380782011-05-03 15:00:01 -0500641 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600642}
643
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800644static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
645{
646 return (scsi3addr[3] & 0xC0) == 0x40;
647}
648
Robert Elliottf2ef0ce2015-01-23 16:41:35 -0600649static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
Don Brace7c59a0d2015-11-04 15:52:22 -0600650 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800651};
Scott Teel6b80b182014-02-18 13:56:55 -0600652#define HPSA_RAID_0 0
653#define HPSA_RAID_4 1
654#define HPSA_RAID_1 2 /* also used for RAID 10 */
655#define HPSA_RAID_5 3 /* also used for RAID 50 */
656#define HPSA_RAID_51 4
657#define HPSA_RAID_6 5 /* also used for RAID 60 */
658#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
Don Brace7c59a0d2015-11-04 15:52:22 -0600659#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
660#define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800661
Kevin Barnettf3f01732015-11-04 15:51:33 -0600662static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
663{
664 return !device->physical_device;
665}
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800666
667static ssize_t raid_level_show(struct device *dev,
668 struct device_attribute *attr, char *buf)
669{
670 ssize_t l = 0;
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600671 unsigned char rlevel;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800672 struct ctlr_info *h;
673 struct scsi_device *sdev;
674 struct hpsa_scsi_dev_t *hdev;
675 unsigned long flags;
676
677 sdev = to_scsi_device(dev);
678 h = sdev_to_hba(sdev);
679 spin_lock_irqsave(&h->lock, flags);
680 hdev = sdev->hostdata;
681 if (!hdev) {
682 spin_unlock_irqrestore(&h->lock, flags);
683 return -ENODEV;
684 }
685
686 /* Is this even a logical drive? */
Kevin Barnettf3f01732015-11-04 15:51:33 -0600687 if (!is_logical_device(hdev)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800688 spin_unlock_irqrestore(&h->lock, flags);
689 l = snprintf(buf, PAGE_SIZE, "N/A\n");
690 return l;
691 }
692
693 rlevel = hdev->raid_level;
694 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600695 if (rlevel > RAID_UNKNOWN)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800696 rlevel = RAID_UNKNOWN;
697 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
698 return l;
699}
700
701static ssize_t lunid_show(struct device *dev,
702 struct device_attribute *attr, char *buf)
703{
704 struct ctlr_info *h;
705 struct scsi_device *sdev;
706 struct hpsa_scsi_dev_t *hdev;
707 unsigned long flags;
708 unsigned char lunid[8];
709
710 sdev = to_scsi_device(dev);
711 h = sdev_to_hba(sdev);
712 spin_lock_irqsave(&h->lock, flags);
713 hdev = sdev->hostdata;
714 if (!hdev) {
715 spin_unlock_irqrestore(&h->lock, flags);
716 return -ENODEV;
717 }
718 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
719 spin_unlock_irqrestore(&h->lock, flags);
Rasmus Villemoes609a70d2016-11-30 23:35:47 +0100720 return snprintf(buf, 20, "0x%8phN\n", lunid);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800721}
722
723static ssize_t unique_id_show(struct device *dev,
724 struct device_attribute *attr, char *buf)
725{
726 struct ctlr_info *h;
727 struct scsi_device *sdev;
728 struct hpsa_scsi_dev_t *hdev;
729 unsigned long flags;
730 unsigned char sn[16];
731
732 sdev = to_scsi_device(dev);
733 h = sdev_to_hba(sdev);
734 spin_lock_irqsave(&h->lock, flags);
735 hdev = sdev->hostdata;
736 if (!hdev) {
737 spin_unlock_irqrestore(&h->lock, flags);
738 return -ENODEV;
739 }
740 memcpy(sn, hdev->device_id, sizeof(sn));
741 spin_unlock_irqrestore(&h->lock, flags);
742 return snprintf(buf, 16 * 2 + 2,
743 "%02X%02X%02X%02X%02X%02X%02X%02X"
744 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
745 sn[0], sn[1], sn[2], sn[3],
746 sn[4], sn[5], sn[6], sn[7],
747 sn[8], sn[9], sn[10], sn[11],
748 sn[12], sn[13], sn[14], sn[15]);
749}
750
Joseph T Handzikded1be42016-04-27 17:13:33 -0500751static ssize_t sas_address_show(struct device *dev,
752 struct device_attribute *attr, char *buf)
753{
754 struct ctlr_info *h;
755 struct scsi_device *sdev;
756 struct hpsa_scsi_dev_t *hdev;
757 unsigned long flags;
758 u64 sas_address;
759
760 sdev = to_scsi_device(dev);
761 h = sdev_to_hba(sdev);
762 spin_lock_irqsave(&h->lock, flags);
763 hdev = sdev->hostdata;
764 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
765 spin_unlock_irqrestore(&h->lock, flags);
766 return -ENODEV;
767 }
768 sas_address = hdev->sas_address;
769 spin_unlock_irqrestore(&h->lock, flags);
770
771 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
772}
773
Scott Teelc1988682014-02-18 13:55:54 -0600774static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
775 struct device_attribute *attr, char *buf)
776{
777 struct ctlr_info *h;
778 struct scsi_device *sdev;
779 struct hpsa_scsi_dev_t *hdev;
780 unsigned long flags;
781 int offload_enabled;
782
783 sdev = to_scsi_device(dev);
784 h = sdev_to_hba(sdev);
785 spin_lock_irqsave(&h->lock, flags);
786 hdev = sdev->hostdata;
787 if (!hdev) {
788 spin_unlock_irqrestore(&h->lock, flags);
789 return -ENODEV;
790 }
791 offload_enabled = hdev->offload_enabled;
792 spin_unlock_irqrestore(&h->lock, flags);
793 return snprintf(buf, 20, "%d\n", offload_enabled);
794}
795
Joe Handzik8270b862015-07-18 11:12:43 -0500796#define MAX_PATHS 8
Joe Handzik8270b862015-07-18 11:12:43 -0500797static ssize_t path_info_show(struct device *dev,
798 struct device_attribute *attr, char *buf)
799{
800 struct ctlr_info *h;
801 struct scsi_device *sdev;
802 struct hpsa_scsi_dev_t *hdev;
803 unsigned long flags;
804 int i;
805 int output_len = 0;
806 u8 box;
807 u8 bay;
808 u8 path_map_index = 0;
809 char *active;
810 unsigned char phys_connector[2];
Joe Handzik8270b862015-07-18 11:12:43 -0500811
Joe Handzik8270b862015-07-18 11:12:43 -0500812 sdev = to_scsi_device(dev);
813 h = sdev_to_hba(sdev);
814 spin_lock_irqsave(&h->devlock, flags);
815 hdev = sdev->hostdata;
816 if (!hdev) {
817 spin_unlock_irqrestore(&h->devlock, flags);
818 return -ENODEV;
819 }
820
821 bay = hdev->bay;
822 for (i = 0; i < MAX_PATHS; i++) {
823 path_map_index = 1<<i;
824 if (i == hdev->active_path_index)
825 active = "Active";
826 else if (hdev->path_map & path_map_index)
827 active = "Inactive";
828 else
829 continue;
830
Rasmus Villemoes1faf0722015-11-04 15:52:28 -0600831 output_len += scnprintf(buf + output_len,
832 PAGE_SIZE - output_len,
833 "[%d:%d:%d:%d] %20.20s ",
Joe Handzik8270b862015-07-18 11:12:43 -0500834 h->scsi_host->host_no,
835 hdev->bus, hdev->target, hdev->lun,
836 scsi_device_type(hdev->devtype));
837
Don Bracecca8f132015-12-22 10:36:48 -0600838 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
Don Brace2708f292015-12-22 10:36:36 -0600839 output_len += scnprintf(buf + output_len,
Rasmus Villemoes1faf0722015-11-04 15:52:28 -0600840 PAGE_SIZE - output_len,
841 "%s\n", active);
Joe Handzik8270b862015-07-18 11:12:43 -0500842 continue;
843 }
844
845 box = hdev->box[i];
846 memcpy(&phys_connector, &hdev->phys_connector[i],
847 sizeof(phys_connector));
848 if (phys_connector[0] < '0')
849 phys_connector[0] = '0';
850 if (phys_connector[1] < '0')
851 phys_connector[1] = '0';
Don Bracecca8f132015-12-22 10:36:48 -0600852 output_len += scnprintf(buf + output_len,
Rasmus Villemoes1faf0722015-11-04 15:52:28 -0600853 PAGE_SIZE - output_len,
Joe Handzik8270b862015-07-18 11:12:43 -0500854 "PORT: %.2s ",
855 phys_connector);
Don Braceaf15ed32016-02-23 15:16:15 -0600856 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
857 hdev->expose_device) {
Joe Handzik8270b862015-07-18 11:12:43 -0500858 if (box == 0 || box == 0xFF) {
Don Brace2708f292015-12-22 10:36:36 -0600859 output_len += scnprintf(buf + output_len,
Rasmus Villemoes1faf0722015-11-04 15:52:28 -0600860 PAGE_SIZE - output_len,
Joe Handzik8270b862015-07-18 11:12:43 -0500861 "BAY: %hhu %s\n",
862 bay, active);
863 } else {
Don Brace2708f292015-12-22 10:36:36 -0600864 output_len += scnprintf(buf + output_len,
Rasmus Villemoes1faf0722015-11-04 15:52:28 -0600865 PAGE_SIZE - output_len,
Joe Handzik8270b862015-07-18 11:12:43 -0500866 "BOX: %hhu BAY: %hhu %s\n",
867 box, bay, active);
868 }
869 } else if (box != 0 && box != 0xFF) {
Don Brace2708f292015-12-22 10:36:36 -0600870 output_len += scnprintf(buf + output_len,
Rasmus Villemoes1faf0722015-11-04 15:52:28 -0600871 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
Joe Handzik8270b862015-07-18 11:12:43 -0500872 box, active);
873 } else
Don Brace2708f292015-12-22 10:36:36 -0600874 output_len += scnprintf(buf + output_len,
Rasmus Villemoes1faf0722015-11-04 15:52:28 -0600875 PAGE_SIZE - output_len, "%s\n", active);
Joe Handzik8270b862015-07-18 11:12:43 -0500876 }
877
878 spin_unlock_irqrestore(&h->devlock, flags);
Rasmus Villemoes1faf0722015-11-04 15:52:28 -0600879 return output_len;
Joe Handzik8270b862015-07-18 11:12:43 -0500880}
881
Hannes Reinecke16961202016-11-18 08:32:49 +0100882static ssize_t host_show_ctlr_num(struct device *dev,
883 struct device_attribute *attr, char *buf)
884{
885 struct ctlr_info *h;
886 struct Scsi_Host *shost = class_to_shost(dev);
887
888 h = shost_to_hba(shost);
889 return snprintf(buf, 20, "%d\n", h->ctlr);
890}
891
Hannes Reinecke135ae6e2017-08-15 08:58:04 +0200892static ssize_t host_show_legacy_board(struct device *dev,
893 struct device_attribute *attr, char *buf)
894{
895 struct ctlr_info *h;
896 struct Scsi_Host *shost = class_to_shost(dev);
897
898 h = shost_to_hba(shost);
899 return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
900}
901
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600902static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
903static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
904static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
905static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
Joseph T Handzikded1be42016-04-27 17:13:33 -0500906static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL);
Scott Teelc1988682014-02-18 13:55:54 -0600907static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
908 host_show_hp_ssd_smart_path_enabled, NULL);
Joe Handzik8270b862015-07-18 11:12:43 -0500909static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
Scott Teelda0697b2014-02-18 13:57:00 -0600910static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
911 host_show_hp_ssd_smart_path_status,
912 host_store_hp_ssd_smart_path_status);
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600913static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
914 host_store_raid_offload_debug);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600915static DEVICE_ATTR(firmware_revision, S_IRUGO,
916 host_show_firmware_revision, NULL);
917static DEVICE_ATTR(commands_outstanding, S_IRUGO,
918 host_show_commands_outstanding, NULL);
919static DEVICE_ATTR(transport_mode, S_IRUGO,
920 host_show_transport_mode, NULL);
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600921static DEVICE_ATTR(resettable, S_IRUGO,
922 host_show_resettable, NULL);
Stephen Camerone985c582015-04-23 09:32:22 -0500923static DEVICE_ATTR(lockup_detected, S_IRUGO,
924 host_show_lockup_detected, NULL);
Hannes Reinecke16961202016-11-18 08:32:49 +0100925static DEVICE_ATTR(ctlr_num, S_IRUGO,
926 host_show_ctlr_num, NULL);
Hannes Reinecke135ae6e2017-08-15 08:58:04 +0200927static DEVICE_ATTR(legacy_board, S_IRUGO,
928 host_show_legacy_board, NULL);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600929
930static struct device_attribute *hpsa_sdev_attrs[] = {
931 &dev_attr_raid_level,
932 &dev_attr_lunid,
933 &dev_attr_unique_id,
Scott Teelc1988682014-02-18 13:55:54 -0600934 &dev_attr_hp_ssd_smart_path_enabled,
Joe Handzik8270b862015-07-18 11:12:43 -0500935 &dev_attr_path_info,
Joseph T Handzikded1be42016-04-27 17:13:33 -0500936 &dev_attr_sas_address,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600937 NULL,
938};
939
940static struct device_attribute *hpsa_shost_attrs[] = {
941 &dev_attr_rescan,
942 &dev_attr_firmware_revision,
943 &dev_attr_commands_outstanding,
944 &dev_attr_transport_mode,
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600945 &dev_attr_resettable,
Scott Teelda0697b2014-02-18 13:57:00 -0600946 &dev_attr_hp_ssd_smart_path_status,
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600947 &dev_attr_raid_offload_debug,
Tomas Henzlfb53c432015-11-06 16:24:09 +0100948 &dev_attr_lockup_detected,
Hannes Reinecke16961202016-11-18 08:32:49 +0100949 &dev_attr_ctlr_num,
Hannes Reinecke135ae6e2017-08-15 08:58:04 +0200950 &dev_attr_legacy_board,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600951 NULL,
952};
953
Don Brace08ec46f2017-05-04 17:51:49 -0500954#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
955 HPSA_MAX_CONCURRENT_PASSTHRUS)
Stephen Cameron41ce4c32015-04-23 09:31:47 -0500956
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600957static struct scsi_host_template hpsa_driver_template = {
958 .module = THIS_MODULE,
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600959 .name = HPSA,
960 .proc_name = HPSA,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600961 .queuecommand = hpsa_scsi_queue_command,
962 .scan_start = hpsa_scan_start,
963 .scan_finished = hpsa_scan_finished,
Don Brace7c0a0222015-01-23 16:41:30 -0600964 .change_queue_depth = hpsa_change_queue_depth,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600965 .this_id = -1,
966 .use_clustering = ENABLE_CLUSTERING,
967 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
968 .ioctl = hpsa_ioctl,
969 .slave_alloc = hpsa_slave_alloc,
Stephen Cameron41ce4c32015-04-23 09:31:47 -0500970 .slave_configure = hpsa_slave_configure,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600971 .slave_destroy = hpsa_slave_destroy,
972#ifdef CONFIG_COMPAT
973 .compat_ioctl = hpsa_compat_ioctl,
974#endif
975 .sdev_attrs = hpsa_sdev_attrs,
976 .shost_attrs = hpsa_shost_attrs,
Yadan Fane2c7b432017-06-23 17:40:05 +0800977 .max_sectors = 1024,
Martin K. Petersen54b2b502013-10-23 06:25:40 -0400978 .no_write_same = 1,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600979};
980
Matt Gates254f7962012-05-01 11:43:06 -0500981static inline u32 next_command(struct ctlr_info *h, u8 q)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600982{
983 u32 a;
Stephen M. Cameron072b0512014-05-29 10:53:07 -0500984 struct reply_queue_buffer *rq = &h->reply_queue[q];
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600985
Matt Gatese1f7de02014-02-18 13:55:17 -0600986 if (h->transMethod & CFGTBL_Trans_io_accel1)
987 return h->access.command_completed(h, q);
988
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600989 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
Matt Gates254f7962012-05-01 11:43:06 -0500990 return h->access.command_completed(h, q);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600991
Matt Gates254f7962012-05-01 11:43:06 -0500992 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
993 a = rq->head[rq->current_entry];
994 rq->current_entry++;
Stephen M. Cameron0cbf7682014-11-14 17:27:09 -0600995 atomic_dec(&h->commands_outstanding);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600996 } else {
997 a = FIFO_EMPTY;
998 }
999 /* Check for wraparound */
Matt Gates254f7962012-05-01 11:43:06 -05001000 if (rq->current_entry == h->max_commands) {
1001 rq->current_entry = 0;
1002 rq->wraparound ^= 1;
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -06001003 }
1004 return a;
1005}
1006
Scott Teelc3497752014-02-18 13:56:34 -06001007/*
1008 * There are some special bits in the bus address of the
1009 * command that we have to set for the controller to know
1010 * how to process the command:
1011 *
1012 * Normal performant mode:
1013 * bit 0: 1 means performant mode, 0 means simple mode.
1014 * bits 1-3 = block fetch table entry
1015 * bits 4-6 = command type (== 0)
1016 *
1017 * ioaccel1 mode:
1018 * bit 0 = "performant mode" bit.
1019 * bits 1-3 = block fetch table entry
1020 * bits 4-6 = command type (== 110)
1021 * (command type is needed because ioaccel1 mode
1022 * commands are submitted through the same register as normal
1023 * mode commands, so this is how the controller knows whether
1024 * the command is normal mode or ioaccel1 mode.)
1025 *
1026 * ioaccel2 mode:
1027 * bit 0 = "performant mode" bit.
1028 * bits 1-4 = block fetch table entry (note extra bit)
1029 * bits 4-6 = not needed, because ioaccel2 mode has
1030 * a separate special register for submitting commands.
1031 */
1032
Webb Scales25163bd2015-04-23 09:32:00 -05001033/*
1034 * set_performant_mode: Modify the tag for cciss performant
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -06001035 * set bit 0 for pull model, bits 3-1 for block fetch
1036 * register number
1037 */
Webb Scales25163bd2015-04-23 09:32:00 -05001038#define DEFAULT_REPLY_QUEUE (-1)
1039static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1040 int reply_queue)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -06001041{
Matt Gates254f7962012-05-01 11:43:06 -05001042 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -06001043 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08001044 if (unlikely(!h->msix_vectors))
Webb Scales25163bd2015-04-23 09:32:00 -05001045 return;
1046 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
Matt Gates254f7962012-05-01 11:43:06 -05001047 c->Header.ReplyQueue =
John Kacur804a5cb2013-07-26 16:06:18 +02001048 raw_smp_processor_id() % h->nreply_queues;
Webb Scales25163bd2015-04-23 09:32:00 -05001049 else
1050 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
Matt Gates254f7962012-05-01 11:43:06 -05001051 }
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -06001052}
1053
Scott Teelc3497752014-02-18 13:56:34 -06001054static void set_ioaccel1_performant_mode(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -05001055 struct CommandList *c,
1056 int reply_queue)
Scott Teelc3497752014-02-18 13:56:34 -06001057{
1058 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1059
Webb Scales25163bd2015-04-23 09:32:00 -05001060 /*
1061 * Tell the controller to post the reply to the queue for this
Scott Teelc3497752014-02-18 13:56:34 -06001062 * processor. This seems to give the best I/O throughput.
1063 */
Webb Scales25163bd2015-04-23 09:32:00 -05001064 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1065 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
1066 else
1067 cp->ReplyQueue = reply_queue % h->nreply_queues;
1068 /*
1069 * Set the bits in the address sent down to include:
Scott Teelc3497752014-02-18 13:56:34 -06001070 * - performant mode bit (bit 0)
1071 * - pull count (bits 1-3)
1072 * - command type (bits 4-6)
1073 */
1074 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1075 IOACCEL1_BUSADDR_CMDTYPE;
1076}
1077
Stephen Cameron8be986c2015-04-23 09:34:06 -05001078static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1079 struct CommandList *c,
1080 int reply_queue)
1081{
1082 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1083 &h->ioaccel2_cmd_pool[c->cmdindex];
1084
1085 /* Tell the controller to post the reply to the queue for this
1086 * processor. This seems to give the best I/O throughput.
1087 */
1088 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1089 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1090 else
1091 cp->reply_queue = reply_queue % h->nreply_queues;
1092 /* Set the bits in the address sent down to include:
1093 * - performant mode bit not used in ioaccel mode 2
1094 * - pull count (bits 0-3)
1095 * - command type isn't needed for ioaccel2
1096 */
1097 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1098}
1099
Scott Teelc3497752014-02-18 13:56:34 -06001100static void set_ioaccel2_performant_mode(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -05001101 struct CommandList *c,
1102 int reply_queue)
Scott Teelc3497752014-02-18 13:56:34 -06001103{
1104 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1105
Webb Scales25163bd2015-04-23 09:32:00 -05001106 /*
1107 * Tell the controller to post the reply to the queue for this
Scott Teelc3497752014-02-18 13:56:34 -06001108 * processor. This seems to give the best I/O throughput.
1109 */
Webb Scales25163bd2015-04-23 09:32:00 -05001110 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1111 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1112 else
1113 cp->reply_queue = reply_queue % h->nreply_queues;
1114 /*
1115 * Set the bits in the address sent down to include:
Scott Teelc3497752014-02-18 13:56:34 -06001116 * - performant mode bit not used in ioaccel mode 2
1117 * - pull count (bits 0-3)
1118 * - command type isn't needed for ioaccel2
1119 */
1120 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1121}
1122
Stephen M. Camerone85c5972012-05-01 11:43:42 -05001123static int is_firmware_flash_cmd(u8 *cdb)
1124{
1125 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1126}
1127
1128/*
1129 * During firmware flash, the heartbeat register may not update as frequently
1130 * as it should. So we dial down lockup detection during firmware flash. and
1131 * dial it back up when firmware flash completes.
1132 */
1133#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1134#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
Scott Teel3d38f002017-05-04 17:51:36 -05001135#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
Stephen M. Camerone85c5972012-05-01 11:43:42 -05001136static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1137 struct CommandList *c)
1138{
1139 if (!is_firmware_flash_cmd(c->Request.CDB))
1140 return;
1141 atomic_inc(&h->firmware_flash_in_progress);
1142 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1143}
1144
1145static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1146 struct CommandList *c)
1147{
1148 if (is_firmware_flash_cmd(c->Request.CDB) &&
1149 atomic_dec_and_test(&h->firmware_flash_in_progress))
1150 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1151}
1152
Webb Scales25163bd2015-04-23 09:32:00 -05001153static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1154 struct CommandList *c, int reply_queue)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -06001155{
Stephen Cameronc05e8862015-01-23 16:44:40 -06001156 dial_down_lockup_detection_during_fw_flash(h, c);
1157 atomic_inc(&h->commands_outstanding);
Scott Teelc3497752014-02-18 13:56:34 -06001158 switch (c->cmd_type) {
1159 case CMD_IOACCEL1:
Webb Scales25163bd2015-04-23 09:32:00 -05001160 set_ioaccel1_performant_mode(h, c, reply_queue);
Stephen Cameronc05e8862015-01-23 16:44:40 -06001161 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
Scott Teelc3497752014-02-18 13:56:34 -06001162 break;
1163 case CMD_IOACCEL2:
Webb Scales25163bd2015-04-23 09:32:00 -05001164 set_ioaccel2_performant_mode(h, c, reply_queue);
Stephen Cameronc05e8862015-01-23 16:44:40 -06001165 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
Scott Teelc3497752014-02-18 13:56:34 -06001166 break;
Stephen Cameron8be986c2015-04-23 09:34:06 -05001167 case IOACCEL2_TMF:
1168 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1169 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1170 break;
Scott Teelc3497752014-02-18 13:56:34 -06001171 default:
Webb Scales25163bd2015-04-23 09:32:00 -05001172 set_performant_mode(h, c, reply_queue);
Stephen Cameronc05e8862015-01-23 16:44:40 -06001173 h->access.submit_command(h, c);
Scott Teelc3497752014-02-18 13:56:34 -06001174 }
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -06001175}
1176
Webb Scalesa58e7e52015-04-23 09:34:16 -05001177static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
Webb Scales25163bd2015-04-23 09:32:00 -05001178{
Webb Scalesd604f532015-04-23 09:35:22 -05001179 if (unlikely(hpsa_is_pending_event(c)))
Webb Scalesa58e7e52015-04-23 09:34:16 -05001180 return finish_cmd(c);
1181
Webb Scales25163bd2015-04-23 09:32:00 -05001182 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1183}
1184
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -06001185static inline int is_hba_lunid(unsigned char scsi3addr[])
1186{
1187 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1188}
1189
1190static inline int is_scsi_rev_5(struct ctlr_info *h)
1191{
1192 if (!h->hba_inquiry_data)
1193 return 0;
1194 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1195 return 1;
1196 return 0;
1197}
1198
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001199static int hpsa_find_target_lun(struct ctlr_info *h,
1200 unsigned char scsi3addr[], int bus, int *target, int *lun)
1201{
1202 /* finds an unused bus, target, lun for a new physical device
1203 * assumes h->devlock is held
1204 */
1205 int i, found = 0;
Scott Teelcfe5bad2011-10-26 16:21:07 -05001206 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001207
Akinobu Mita263d9402012-01-21 00:15:27 +09001208 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001209
1210 for (i = 0; i < h->ndevices; i++) {
1211 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
Akinobu Mita263d9402012-01-21 00:15:27 +09001212 __set_bit(h->dev[i]->target, lun_taken);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001213 }
1214
Akinobu Mita263d9402012-01-21 00:15:27 +09001215 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1216 if (i < HPSA_MAX_DEVICES) {
1217 /* *bus = 1; */
1218 *target = i;
1219 *lun = 0;
1220 found = 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001221 }
1222 return !found;
1223}
1224
Don Brace1d33d852015-11-04 15:50:31 -06001225static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
Webb Scales0d96ef52015-04-23 09:31:55 -05001226 struct hpsa_scsi_dev_t *dev, char *description)
1227{
Don Brace7c59a0d2015-11-04 15:52:22 -06001228#define LABEL_SIZE 25
1229 char label[LABEL_SIZE];
1230
Don Brace9975ec92015-11-04 15:50:25 -06001231 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1232 return;
1233
Don Brace7c59a0d2015-11-04 15:52:22 -06001234 switch (dev->devtype) {
1235 case TYPE_RAID:
1236 snprintf(label, LABEL_SIZE, "controller");
1237 break;
1238 case TYPE_ENCLOSURE:
1239 snprintf(label, LABEL_SIZE, "enclosure");
1240 break;
1241 case TYPE_DISK:
Don Braceaf15ed32016-02-23 15:16:15 -06001242 case TYPE_ZBC:
Don Brace7c59a0d2015-11-04 15:52:22 -06001243 if (dev->external)
1244 snprintf(label, LABEL_SIZE, "external");
1245 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1246 snprintf(label, LABEL_SIZE, "%s",
1247 raid_label[PHYSICAL_DRIVE]);
1248 else
1249 snprintf(label, LABEL_SIZE, "RAID-%s",
1250 dev->raid_level > RAID_UNKNOWN ? "?" :
1251 raid_label[dev->raid_level]);
1252 break;
1253 case TYPE_ROM:
1254 snprintf(label, LABEL_SIZE, "rom");
1255 break;
1256 case TYPE_TAPE:
1257 snprintf(label, LABEL_SIZE, "tape");
1258 break;
1259 case TYPE_MEDIUM_CHANGER:
1260 snprintf(label, LABEL_SIZE, "changer");
1261 break;
1262 default:
1263 snprintf(label, LABEL_SIZE, "UNKNOWN");
1264 break;
1265 }
1266
Webb Scales0d96ef52015-04-23 09:31:55 -05001267 dev_printk(level, &h->pdev->dev,
Don Brace7c59a0d2015-11-04 15:52:22 -06001268 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
Webb Scales0d96ef52015-04-23 09:31:55 -05001269 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1270 description,
1271 scsi_device_type(dev->devtype),
1272 dev->vendor,
1273 dev->model,
Don Brace7c59a0d2015-11-04 15:52:22 -06001274 label,
Webb Scales0d96ef52015-04-23 09:31:55 -05001275 dev->offload_config ? '+' : '-',
1276 dev->offload_enabled ? '+' : '-',
Kevin Barnett2a168202015-11-04 15:51:21 -06001277 dev->expose_device);
Webb Scales0d96ef52015-04-23 09:31:55 -05001278}
1279
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001280/* Add an entry into h->dev[] array. */
Don Brace8aa60682015-11-04 15:50:01 -06001281static int hpsa_scsi_add_entry(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001282 struct hpsa_scsi_dev_t *device,
1283 struct hpsa_scsi_dev_t *added[], int *nadded)
1284{
1285 /* assumes h->devlock is held */
1286 int n = h->ndevices;
1287 int i;
1288 unsigned char addr1[8], addr2[8];
1289 struct hpsa_scsi_dev_t *sd;
1290
Scott Teelcfe5bad2011-10-26 16:21:07 -05001291 if (n >= HPSA_MAX_DEVICES) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001292 dev_err(&h->pdev->dev, "too many devices, some will be "
1293 "inaccessible.\n");
1294 return -1;
1295 }
1296
1297 /* physical devices do not have lun or target assigned until now. */
1298 if (device->lun != -1)
1299 /* Logical device, lun is already assigned. */
1300 goto lun_assigned;
1301
1302 /* If this device a non-zero lun of a multi-lun device
1303 * byte 4 of the 8-byte LUN addr will contain the logical
Don Brace2b08b3e2015-01-23 16:41:09 -06001304 * unit no, zero otherwise.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001305 */
1306 if (device->scsi3addr[4] == 0) {
1307 /* This is not a non-zero lun of a multi-lun device */
1308 if (hpsa_find_target_lun(h, device->scsi3addr,
1309 device->bus, &device->target, &device->lun) != 0)
1310 return -1;
1311 goto lun_assigned;
1312 }
1313
1314 /* This is a non-zero lun of a multi-lun device.
1315 * Search through our list and find the device which
shane.seymour9a4178b2015-07-18 11:13:09 -05001316 * has the same 8 byte LUN address, excepting byte 4 and 5.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001317 * Assign the same bus and target for this new LUN.
1318 * Use the logical unit number from the firmware.
1319 */
1320 memcpy(addr1, device->scsi3addr, 8);
1321 addr1[4] = 0;
shane.seymour9a4178b2015-07-18 11:13:09 -05001322 addr1[5] = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001323 for (i = 0; i < n; i++) {
1324 sd = h->dev[i];
1325 memcpy(addr2, sd->scsi3addr, 8);
1326 addr2[4] = 0;
shane.seymour9a4178b2015-07-18 11:13:09 -05001327 addr2[5] = 0;
1328 /* differ only in byte 4 and 5? */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001329 if (memcmp(addr1, addr2, 8) == 0) {
1330 device->bus = sd->bus;
1331 device->target = sd->target;
1332 device->lun = device->scsi3addr[4];
1333 break;
1334 }
1335 }
1336 if (device->lun == -1) {
1337 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1338 " suspect firmware bug or unsupported hardware "
1339 "configuration.\n");
1340 return -1;
1341 }
1342
1343lun_assigned:
1344
1345 h->dev[n] = device;
1346 h->ndevices++;
1347 added[*nadded] = device;
1348 (*nadded)++;
Webb Scales0d96ef52015-04-23 09:31:55 -05001349 hpsa_show_dev_msg(KERN_INFO, h, device,
Kevin Barnett2a168202015-11-04 15:51:21 -06001350 device->expose_device ? "added" : "masked");
Robert Elliotta473d862015-04-23 09:32:54 -05001351 device->offload_to_be_enabled = device->offload_enabled;
1352 device->offload_enabled = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001353 return 0;
1354}
1355
Scott Teelbd9244f2012-01-19 14:01:30 -06001356/* Update an entry in h->dev[] array. */
Don Brace8aa60682015-11-04 15:50:01 -06001357static void hpsa_scsi_update_entry(struct ctlr_info *h,
Scott Teelbd9244f2012-01-19 14:01:30 -06001358 int entry, struct hpsa_scsi_dev_t *new_entry)
1359{
Robert Elliotta473d862015-04-23 09:32:54 -05001360 int offload_enabled;
Scott Teelbd9244f2012-01-19 14:01:30 -06001361 /* assumes h->devlock is held */
1362 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1363
1364 /* Raid level changed. */
1365 h->dev[entry]->raid_level = new_entry->raid_level;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001366
Don Brace03383732015-01-23 16:43:30 -06001367 /* Raid offload parameters changed. Careful about the ordering. */
1368 if (new_entry->offload_config && new_entry->offload_enabled) {
1369 /*
1370 * if drive is newly offload_enabled, we want to copy the
1371 * raid map data first. If previously offload_enabled and
1372 * offload_config were set, raid map data had better be
1373 * the same as it was before. if raid map data is changed
1374 * then it had better be the case that
1375 * h->dev[entry]->offload_enabled is currently 0.
1376 */
1377 h->dev[entry]->raid_map = new_entry->raid_map;
1378 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
Don Brace03383732015-01-23 16:43:30 -06001379 }
Joe Handzika3144e02015-04-23 09:32:59 -05001380 if (new_entry->hba_ioaccel_enabled) {
1381 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1382 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1383 }
1384 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001385 h->dev[entry]->offload_config = new_entry->offload_config;
Stephen M. Cameron9fb0de22014-02-18 13:56:50 -06001386 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
Don Brace03383732015-01-23 16:43:30 -06001387 h->dev[entry]->queue_depth = new_entry->queue_depth;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001388
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001389 /*
1390 * We can turn off ioaccel offload now, but need to delay turning
1391 * it on until we can update h->dev[entry]->phys_disk[], but we
1392 * can't do that until all the devices are updated.
1393 */
1394 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1395 if (!new_entry->offload_enabled)
1396 h->dev[entry]->offload_enabled = 0;
1397
Robert Elliotta473d862015-04-23 09:32:54 -05001398 offload_enabled = h->dev[entry]->offload_enabled;
1399 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
Webb Scales0d96ef52015-04-23 09:31:55 -05001400 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
Robert Elliotta473d862015-04-23 09:32:54 -05001401 h->dev[entry]->offload_enabled = offload_enabled;
Scott Teelbd9244f2012-01-19 14:01:30 -06001402}
1403
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001404/* Replace an entry from h->dev[] array. */
Don Brace8aa60682015-11-04 15:50:01 -06001405static void hpsa_scsi_replace_entry(struct ctlr_info *h,
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001406 int entry, struct hpsa_scsi_dev_t *new_entry,
1407 struct hpsa_scsi_dev_t *added[], int *nadded,
1408 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1409{
1410 /* assumes h->devlock is held */
Scott Teelcfe5bad2011-10-26 16:21:07 -05001411 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001412 removed[*nremoved] = h->dev[entry];
1413 (*nremoved)++;
Stephen M. Cameron01350d02011-08-09 08:18:01 -05001414
1415 /*
1416 * New physical devices won't have target/lun assigned yet
1417 * so we need to preserve the values in the slot we are replacing.
1418 */
1419 if (new_entry->target == -1) {
1420 new_entry->target = h->dev[entry]->target;
1421 new_entry->lun = h->dev[entry]->lun;
1422 }
1423
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001424 h->dev[entry] = new_entry;
1425 added[*nadded] = new_entry;
1426 (*nadded)++;
Webb Scales0d96ef52015-04-23 09:31:55 -05001427 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
Robert Elliotta473d862015-04-23 09:32:54 -05001428 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1429 new_entry->offload_enabled = 0;
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001430}
1431
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001432/* Remove an entry from h->dev[] array. */
Don Brace8aa60682015-11-04 15:50:01 -06001433static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001434 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1435{
1436 /* assumes h->devlock is held */
1437 int i;
1438 struct hpsa_scsi_dev_t *sd;
1439
Scott Teelcfe5bad2011-10-26 16:21:07 -05001440 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001441
1442 sd = h->dev[entry];
1443 removed[*nremoved] = h->dev[entry];
1444 (*nremoved)++;
1445
1446 for (i = entry; i < h->ndevices-1; i++)
1447 h->dev[i] = h->dev[i+1];
1448 h->ndevices--;
Webb Scales0d96ef52015-04-23 09:31:55 -05001449 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001450}
1451
1452#define SCSI3ADDR_EQ(a, b) ( \
1453 (a)[7] == (b)[7] && \
1454 (a)[6] == (b)[6] && \
1455 (a)[5] == (b)[5] && \
1456 (a)[4] == (b)[4] && \
1457 (a)[3] == (b)[3] && \
1458 (a)[2] == (b)[2] && \
1459 (a)[1] == (b)[1] && \
1460 (a)[0] == (b)[0])
1461
1462static void fixup_botched_add(struct ctlr_info *h,
1463 struct hpsa_scsi_dev_t *added)
1464{
1465 /* called when scsi_add_device fails in order to re-adjust
1466 * h->dev[] to match the mid layer's view.
1467 */
1468 unsigned long flags;
1469 int i, j;
1470
1471 spin_lock_irqsave(&h->lock, flags);
1472 for (i = 0; i < h->ndevices; i++) {
1473 if (h->dev[i] == added) {
1474 for (j = i; j < h->ndevices-1; j++)
1475 h->dev[j] = h->dev[j+1];
1476 h->ndevices--;
1477 break;
1478 }
1479 }
1480 spin_unlock_irqrestore(&h->lock, flags);
1481 kfree(added);
1482}
1483
1484static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1485 struct hpsa_scsi_dev_t *dev2)
1486{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001487 /* we compare everything except lun and target as these
1488 * are not yet assigned. Compare parts likely
1489 * to differ first
1490 */
1491 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1492 sizeof(dev1->scsi3addr)) != 0)
1493 return 0;
1494 if (memcmp(dev1->device_id, dev2->device_id,
1495 sizeof(dev1->device_id)) != 0)
1496 return 0;
1497 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1498 return 0;
1499 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1500 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001501 if (dev1->devtype != dev2->devtype)
1502 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001503 if (dev1->bus != dev2->bus)
1504 return 0;
1505 return 1;
1506}
1507
Scott Teelbd9244f2012-01-19 14:01:30 -06001508static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1509 struct hpsa_scsi_dev_t *dev2)
1510{
1511 /* Device attributes that can change, but don't mean
1512 * that the device is a different device, nor that the OS
1513 * needs to be told anything about the change.
1514 */
1515 if (dev1->raid_level != dev2->raid_level)
1516 return 1;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001517 if (dev1->offload_config != dev2->offload_config)
1518 return 1;
1519 if (dev1->offload_enabled != dev2->offload_enabled)
1520 return 1;
Don Brace93849502015-07-18 11:12:49 -05001521 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1522 if (dev1->queue_depth != dev2->queue_depth)
1523 return 1;
Scott Teelbd9244f2012-01-19 14:01:30 -06001524 return 0;
1525}
1526
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001527/* Find needle in haystack. If exact match found, return DEVICE_SAME,
1528 * and return needle location in *index. If scsi3addr matches, but not
1529 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
Scott Teelbd9244f2012-01-19 14:01:30 -06001530 * location in *index.
1531 * In the case of a minor device attribute change, such as RAID level, just
1532 * return DEVICE_UPDATED, along with the updated device's location in index.
1533 * If needle not found, return DEVICE_NOT_FOUND.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001534 */
1535static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1536 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1537 int *index)
1538{
1539 int i;
1540#define DEVICE_NOT_FOUND 0
1541#define DEVICE_CHANGED 1
1542#define DEVICE_SAME 2
Scott Teelbd9244f2012-01-19 14:01:30 -06001543#define DEVICE_UPDATED 3
Don Brace1d33d852015-11-04 15:50:31 -06001544 if (needle == NULL)
1545 return DEVICE_NOT_FOUND;
1546
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001547 for (i = 0; i < haystack_size; i++) {
Stephen M. Cameron23231042010-02-04 08:43:36 -06001548 if (haystack[i] == NULL) /* previously removed. */
1549 continue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001550 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1551 *index = i;
Scott Teelbd9244f2012-01-19 14:01:30 -06001552 if (device_is_the_same(needle, haystack[i])) {
1553 if (device_updated(needle, haystack[i]))
1554 return DEVICE_UPDATED;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001555 return DEVICE_SAME;
Scott Teelbd9244f2012-01-19 14:01:30 -06001556 } else {
Stephen M. Cameron98465902014-02-21 16:25:00 -06001557 /* Keep offline devices offline */
1558 if (needle->volume_offline)
1559 return DEVICE_NOT_FOUND;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001560 return DEVICE_CHANGED;
Scott Teelbd9244f2012-01-19 14:01:30 -06001561 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001562 }
1563 }
1564 *index = -1;
1565 return DEVICE_NOT_FOUND;
1566}
1567
Stephen M. Cameron98465902014-02-21 16:25:00 -06001568static void hpsa_monitor_offline_device(struct ctlr_info *h,
1569 unsigned char scsi3addr[])
1570{
1571 struct offline_device_entry *device;
1572 unsigned long flags;
1573
1574 /* Check to see if device is already on the list */
1575 spin_lock_irqsave(&h->offline_device_lock, flags);
1576 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1577 if (memcmp(device->scsi3addr, scsi3addr,
1578 sizeof(device->scsi3addr)) == 0) {
1579 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1580 return;
1581 }
1582 }
1583 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1584
1585 /* Device is not on the list, add it. */
1586 device = kmalloc(sizeof(*device), GFP_KERNEL);
Amit Kushwaha7e8a9482016-12-12 16:34:21 +05301587 if (!device)
Stephen M. Cameron98465902014-02-21 16:25:00 -06001588 return;
Amit Kushwaha7e8a9482016-12-12 16:34:21 +05301589
Stephen M. Cameron98465902014-02-21 16:25:00 -06001590 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1591 spin_lock_irqsave(&h->offline_device_lock, flags);
1592 list_add_tail(&device->offline_list, &h->offline_device_list);
1593 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1594}
1595
1596/* Print a message explaining various offline volume states */
1597static void hpsa_show_volume_status(struct ctlr_info *h,
1598 struct hpsa_scsi_dev_t *sd)
1599{
1600 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1601 dev_info(&h->pdev->dev,
1602 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1603 h->scsi_host->host_no,
1604 sd->bus, sd->target, sd->lun);
1605 switch (sd->volume_offline) {
1606 case HPSA_LV_OK:
1607 break;
1608 case HPSA_LV_UNDERGOING_ERASE:
1609 dev_info(&h->pdev->dev,
1610 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1611 h->scsi_host->host_no,
1612 sd->bus, sd->target, sd->lun);
1613 break;
Scott Benesh5ca01202015-07-18 11:13:04 -05001614 case HPSA_LV_NOT_AVAILABLE:
1615 dev_info(&h->pdev->dev,
1616 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1617 h->scsi_host->host_no,
1618 sd->bus, sd->target, sd->lun);
1619 break;
Stephen M. Cameron98465902014-02-21 16:25:00 -06001620 case HPSA_LV_UNDERGOING_RPI:
1621 dev_info(&h->pdev->dev,
Scott Benesh5ca01202015-07-18 11:13:04 -05001622 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
Stephen M. Cameron98465902014-02-21 16:25:00 -06001623 h->scsi_host->host_no,
1624 sd->bus, sd->target, sd->lun);
1625 break;
1626 case HPSA_LV_PENDING_RPI:
1627 dev_info(&h->pdev->dev,
Scott Benesh5ca01202015-07-18 11:13:04 -05001628 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1629 h->scsi_host->host_no,
1630 sd->bus, sd->target, sd->lun);
Stephen M. Cameron98465902014-02-21 16:25:00 -06001631 break;
1632 case HPSA_LV_ENCRYPTED_NO_KEY:
1633 dev_info(&h->pdev->dev,
1634 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1635 h->scsi_host->host_no,
1636 sd->bus, sd->target, sd->lun);
1637 break;
1638 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1639 dev_info(&h->pdev->dev,
1640 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1641 h->scsi_host->host_no,
1642 sd->bus, sd->target, sd->lun);
1643 break;
1644 case HPSA_LV_UNDERGOING_ENCRYPTION:
1645 dev_info(&h->pdev->dev,
1646 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1647 h->scsi_host->host_no,
1648 sd->bus, sd->target, sd->lun);
1649 break;
1650 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1651 dev_info(&h->pdev->dev,
1652 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1653 h->scsi_host->host_no,
1654 sd->bus, sd->target, sd->lun);
1655 break;
1656 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1657 dev_info(&h->pdev->dev,
1658 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1659 h->scsi_host->host_no,
1660 sd->bus, sd->target, sd->lun);
1661 break;
1662 case HPSA_LV_PENDING_ENCRYPTION:
1663 dev_info(&h->pdev->dev,
1664 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1665 h->scsi_host->host_no,
1666 sd->bus, sd->target, sd->lun);
1667 break;
1668 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1669 dev_info(&h->pdev->dev,
1670 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1671 h->scsi_host->host_no,
1672 sd->bus, sd->target, sd->lun);
1673 break;
1674 }
1675}
1676
Don Brace03383732015-01-23 16:43:30 -06001677/*
1678 * Figure the list of physical drive pointers for a logical drive with
1679 * raid offload configured.
1680 */
1681static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1682 struct hpsa_scsi_dev_t *dev[], int ndevices,
1683 struct hpsa_scsi_dev_t *logical_drive)
1684{
1685 struct raid_map_data *map = &logical_drive->raid_map;
1686 struct raid_map_disk_data *dd = &map->data[0];
1687 int i, j;
1688 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1689 le16_to_cpu(map->metadata_disks_per_row);
1690 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1691 le16_to_cpu(map->layout_map_count) *
1692 total_disks_per_row;
1693 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1694 total_disks_per_row;
1695 int qdepth;
1696
1697 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1698 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1699
Webb Scalesd604f532015-04-23 09:35:22 -05001700 logical_drive->nphysical_disks = nraid_map_entries;
1701
Don Brace03383732015-01-23 16:43:30 -06001702 qdepth = 0;
1703 for (i = 0; i < nraid_map_entries; i++) {
1704 logical_drive->phys_disk[i] = NULL;
1705 if (!logical_drive->offload_config)
1706 continue;
1707 for (j = 0; j < ndevices; j++) {
Don Brace1d33d852015-11-04 15:50:31 -06001708 if (dev[j] == NULL)
1709 continue;
Petros Koutoupisff615f02016-05-09 13:44:10 -05001710 if (dev[j]->devtype != TYPE_DISK &&
1711 dev[j]->devtype != TYPE_ZBC)
Don Braceaf15ed32016-02-23 15:16:15 -06001712 continue;
Kevin Barnettf3f01732015-11-04 15:51:33 -06001713 if (is_logical_device(dev[j]))
Don Brace03383732015-01-23 16:43:30 -06001714 continue;
1715 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1716 continue;
1717
1718 logical_drive->phys_disk[i] = dev[j];
1719 if (i < nphys_disk)
1720 qdepth = min(h->nr_cmds, qdepth +
1721 logical_drive->phys_disk[i]->queue_depth);
1722 break;
1723 }
1724
1725 /*
1726 * This can happen if a physical drive is removed and
1727 * the logical drive is degraded. In that case, the RAID
1728 * map data will refer to a physical disk which isn't actually
1729 * present. And in that case offload_enabled should already
1730 * be 0, but we'll turn it off here just in case
1731 */
1732 if (!logical_drive->phys_disk[i]) {
1733 logical_drive->offload_enabled = 0;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001734 logical_drive->offload_to_be_enabled = 0;
1735 logical_drive->queue_depth = 8;
Don Brace03383732015-01-23 16:43:30 -06001736 }
1737 }
1738 if (nraid_map_entries)
1739 /*
1740 * This is correct for reads, too high for full stripe writes,
1741 * way too high for partial stripe writes
1742 */
1743 logical_drive->queue_depth = qdepth;
1744 else
1745 logical_drive->queue_depth = h->nr_cmds;
1746}
1747
1748static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1749 struct hpsa_scsi_dev_t *dev[], int ndevices)
1750{
1751 int i;
1752
1753 for (i = 0; i < ndevices; i++) {
Don Brace1d33d852015-11-04 15:50:31 -06001754 if (dev[i] == NULL)
1755 continue;
Petros Koutoupisff615f02016-05-09 13:44:10 -05001756 if (dev[i]->devtype != TYPE_DISK &&
1757 dev[i]->devtype != TYPE_ZBC)
Don Braceaf15ed32016-02-23 15:16:15 -06001758 continue;
Kevin Barnettf3f01732015-11-04 15:51:33 -06001759 if (!is_logical_device(dev[i]))
Don Brace03383732015-01-23 16:43:30 -06001760 continue;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001761
1762 /*
1763 * If offload is currently enabled, the RAID map and
1764 * phys_disk[] assignment *better* not be changing
1765 * and since it isn't changing, we do not need to
1766 * update it.
1767 */
1768 if (dev[i]->offload_enabled)
1769 continue;
1770
Don Brace03383732015-01-23 16:43:30 -06001771 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1772 }
1773}
1774
Kevin Barnett096ccff2015-11-04 15:51:51 -06001775static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1776{
1777 int rc = 0;
1778
1779 if (!h->scsi_host)
1780 return 1;
1781
Kevin Barnettd04e62b2015-11-04 15:52:34 -06001782 if (is_logical_device(device)) /* RAID */
1783 rc = scsi_add_device(h->scsi_host, device->bus,
Kevin Barnett096ccff2015-11-04 15:51:51 -06001784 device->target, device->lun);
Kevin Barnettd04e62b2015-11-04 15:52:34 -06001785 else /* HBA */
1786 rc = hpsa_add_sas_device(h->sas_host, device);
1787
Kevin Barnett096ccff2015-11-04 15:51:51 -06001788 return rc;
1789}
1790
Don Braceba74fdc2016-04-27 17:14:17 -05001791static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1792 struct hpsa_scsi_dev_t *dev)
1793{
1794 int i;
1795 int count = 0;
1796
1797 for (i = 0; i < h->nr_cmds; i++) {
1798 struct CommandList *c = h->cmd_pool + i;
1799 int refcount = atomic_inc_return(&c->refcount);
1800
1801 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1802 dev->scsi3addr)) {
1803 unsigned long flags;
1804
1805 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
1806 if (!hpsa_is_cmd_idle(c))
1807 ++count;
1808 spin_unlock_irqrestore(&h->lock, flags);
1809 }
1810
1811 cmd_free(h, c);
1812 }
1813
1814 return count;
1815}
1816
1817static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1818 struct hpsa_scsi_dev_t *device)
1819{
1820 int cmds = 0;
1821 int waits = 0;
1822
1823 while (1) {
1824 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1825 if (cmds == 0)
1826 break;
1827 if (++waits > 20)
1828 break;
1829 dev_warn(&h->pdev->dev,
1830 "%s: removing device with %d outstanding commands!\n",
1831 __func__, cmds);
1832 msleep(1000);
1833 }
1834}
1835
Kevin Barnett096ccff2015-11-04 15:51:51 -06001836static void hpsa_remove_device(struct ctlr_info *h,
1837 struct hpsa_scsi_dev_t *device)
1838{
1839 struct scsi_device *sdev = NULL;
1840
1841 if (!h->scsi_host)
1842 return;
1843
Kevin Barnettd04e62b2015-11-04 15:52:34 -06001844 if (is_logical_device(device)) { /* RAID */
1845 sdev = scsi_device_lookup(h->scsi_host, device->bus,
Kevin Barnett096ccff2015-11-04 15:51:51 -06001846 device->target, device->lun);
Kevin Barnettd04e62b2015-11-04 15:52:34 -06001847 if (sdev) {
1848 scsi_remove_device(sdev);
1849 scsi_device_put(sdev);
1850 } else {
1851 /*
1852 * We don't expect to get here. Future commands
1853 * to this device will get a selection timeout as
1854 * if the device were gone.
1855 */
1856 hpsa_show_dev_msg(KERN_WARNING, h, device,
Kevin Barnett096ccff2015-11-04 15:51:51 -06001857 "didn't find device for removal.");
Kevin Barnettd04e62b2015-11-04 15:52:34 -06001858 }
Don Braceba74fdc2016-04-27 17:14:17 -05001859 } else { /* HBA */
1860
1861 device->removed = 1;
1862 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1863
Kevin Barnettd04e62b2015-11-04 15:52:34 -06001864 hpsa_remove_sas_device(device);
Don Braceba74fdc2016-04-27 17:14:17 -05001865 }
Kevin Barnett096ccff2015-11-04 15:51:51 -06001866}
1867
Don Brace8aa60682015-11-04 15:50:01 -06001868static void adjust_hpsa_scsi_table(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001869 struct hpsa_scsi_dev_t *sd[], int nsds)
1870{
1871 /* sd contains scsi3 addresses and devtypes, and inquiry
1872 * data. This function takes what's in sd to be the current
1873 * reality and updates h->dev[] to reflect that reality.
1874 */
1875 int i, entry, device_change, changes = 0;
1876 struct hpsa_scsi_dev_t *csd;
1877 unsigned long flags;
1878 struct hpsa_scsi_dev_t **added, **removed;
1879 int nadded, nremoved;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001880
Don Braceda03ded2015-11-04 15:50:56 -06001881 /*
1882 * A reset can cause a device status to change
1883 * re-schedule the scan to see what happened.
1884 */
Don Bracec59d04f2017-05-04 17:51:22 -05001885 spin_lock_irqsave(&h->reset_lock, flags);
Don Braceda03ded2015-11-04 15:50:56 -06001886 if (h->reset_in_progress) {
1887 h->drv_req_rescan = 1;
Don Bracec59d04f2017-05-04 17:51:22 -05001888 spin_unlock_irqrestore(&h->reset_lock, flags);
Don Braceda03ded2015-11-04 15:50:56 -06001889 return;
1890 }
Don Bracec59d04f2017-05-04 17:51:22 -05001891 spin_unlock_irqrestore(&h->reset_lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001892
Scott Teelcfe5bad2011-10-26 16:21:07 -05001893 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1894 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001895
1896 if (!added || !removed) {
1897 dev_warn(&h->pdev->dev, "out of memory in "
1898 "adjust_hpsa_scsi_table\n");
1899 goto free_and_out;
1900 }
1901
1902 spin_lock_irqsave(&h->devlock, flags);
1903
1904 /* find any devices in h->dev[] that are not in
1905 * sd[] and remove them from h->dev[], and for any
1906 * devices which have changed, remove the old device
1907 * info and add the new device info.
Scott Teelbd9244f2012-01-19 14:01:30 -06001908 * If minor device attributes change, just update
1909 * the existing device structure.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001910 */
1911 i = 0;
1912 nremoved = 0;
1913 nadded = 0;
1914 while (i < h->ndevices) {
1915 csd = h->dev[i];
1916 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1917 if (device_change == DEVICE_NOT_FOUND) {
1918 changes++;
Don Brace8aa60682015-11-04 15:50:01 -06001919 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001920 continue; /* remove ^^^, hence i not incremented */
1921 } else if (device_change == DEVICE_CHANGED) {
1922 changes++;
Don Brace8aa60682015-11-04 15:50:01 -06001923 hpsa_scsi_replace_entry(h, i, sd[entry],
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001924 added, &nadded, removed, &nremoved);
Stephen M. Cameronc7f172d2010-02-04 08:43:31 -06001925 /* Set it to NULL to prevent it from being freed
1926 * at the bottom of hpsa_update_scsi_devices()
1927 */
1928 sd[entry] = NULL;
Scott Teelbd9244f2012-01-19 14:01:30 -06001929 } else if (device_change == DEVICE_UPDATED) {
Don Brace8aa60682015-11-04 15:50:01 -06001930 hpsa_scsi_update_entry(h, i, sd[entry]);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001931 }
1932 i++;
1933 }
1934
1935 /* Now, make sure every device listed in sd[] is also
1936 * listed in h->dev[], adding them if they aren't found
1937 */
1938
1939 for (i = 0; i < nsds; i++) {
1940 if (!sd[i]) /* if already added above. */
1941 continue;
Stephen M. Cameron98465902014-02-21 16:25:00 -06001942
1943 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1944 * as the SCSI mid-layer does not handle such devices well.
1945 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1946 * at 160Hz, and prevents the system from coming up.
1947 */
1948 if (sd[i]->volume_offline) {
1949 hpsa_show_volume_status(h, sd[i]);
Webb Scales0d96ef52015-04-23 09:31:55 -05001950 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
Stephen M. Cameron98465902014-02-21 16:25:00 -06001951 continue;
1952 }
1953
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001954 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1955 h->ndevices, &entry);
1956 if (device_change == DEVICE_NOT_FOUND) {
1957 changes++;
Don Brace8aa60682015-11-04 15:50:01 -06001958 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001959 break;
1960 sd[i] = NULL; /* prevent from being freed later. */
1961 } else if (device_change == DEVICE_CHANGED) {
1962 /* should never happen... */
1963 changes++;
1964 dev_warn(&h->pdev->dev,
1965 "device unexpectedly changed.\n");
1966 /* but if it does happen, we just ignore that device */
1967 }
1968 }
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001969 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1970
1971 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1972 * any logical drives that need it enabled.
1973 */
Don Brace1d33d852015-11-04 15:50:31 -06001974 for (i = 0; i < h->ndevices; i++) {
1975 if (h->dev[i] == NULL)
1976 continue;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001977 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
Don Brace1d33d852015-11-04 15:50:31 -06001978 }
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001979
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001980 spin_unlock_irqrestore(&h->devlock, flags);
1981
Stephen M. Cameron98465902014-02-21 16:25:00 -06001982 /* Monitor devices which are in one of several NOT READY states to be
1983 * brought online later. This must be done without holding h->devlock,
1984 * so don't touch h->dev[]
1985 */
1986 for (i = 0; i < nsds; i++) {
1987 if (!sd[i]) /* if already added above. */
1988 continue;
1989 if (sd[i]->volume_offline)
1990 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1991 }
1992
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001993 /* Don't notify scsi mid layer of any changes the first time through
1994 * (or if there are no changes) scsi_scan_host will do it later the
1995 * first time through.
1996 */
Don Brace8aa60682015-11-04 15:50:01 -06001997 if (!changes)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001998 goto free_and_out;
1999
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002000 /* Notify scsi mid layer of any removed devices */
2001 for (i = 0; i < nremoved; i++) {
Don Brace1d33d852015-11-04 15:50:31 -06002002 if (removed[i] == NULL)
2003 continue;
Kevin Barnett096ccff2015-11-04 15:51:51 -06002004 if (removed[i]->expose_device)
2005 hpsa_remove_device(h, removed[i]);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002006 kfree(removed[i]);
2007 removed[i] = NULL;
2008 }
2009
2010 /* Notify scsi mid layer of any added devices */
2011 for (i = 0; i < nadded; i++) {
Kevin Barnett096ccff2015-11-04 15:51:51 -06002012 int rc = 0;
2013
Don Brace1d33d852015-11-04 15:50:31 -06002014 if (added[i] == NULL)
Stephen Cameron41ce4c32015-04-23 09:31:47 -05002015 continue;
Kevin Barnett2a168202015-11-04 15:51:21 -06002016 if (!(added[i]->expose_device))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002017 continue;
Kevin Barnett096ccff2015-11-04 15:51:51 -06002018 rc = hpsa_add_device(h, added[i]);
2019 if (!rc)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002020 continue;
Kevin Barnett096ccff2015-11-04 15:51:51 -06002021 dev_warn(&h->pdev->dev,
2022 "addition failed %d, device not added.", rc);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002023 /* now we have to remove it from h->dev,
2024 * since it didn't get added to scsi mid layer
2025 */
2026 fixup_botched_add(h, added[i]);
Don Brace853633e2015-11-04 15:50:37 -06002027 h->drv_req_rescan = 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002028 }
2029
2030free_and_out:
2031 kfree(added);
2032 kfree(removed);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002033}
2034
2035/*
Joe Perches9e03aa22013-09-03 13:45:58 -07002036 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002037 * Assume's h->devlock is held.
2038 */
2039static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2040 int bus, int target, int lun)
2041{
2042 int i;
2043 struct hpsa_scsi_dev_t *sd;
2044
2045 for (i = 0; i < h->ndevices; i++) {
2046 sd = h->dev[i];
2047 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2048 return sd;
2049 }
2050 return NULL;
2051}
2052
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002053static int hpsa_slave_alloc(struct scsi_device *sdev)
2054{
Hannes Reinecke7630b3a2016-11-17 12:15:56 +01002055 struct hpsa_scsi_dev_t *sd = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002056 unsigned long flags;
2057 struct ctlr_info *h;
2058
2059 h = sdev_to_hba(sdev);
2060 spin_lock_irqsave(&h->devlock, flags);
Kevin Barnettd04e62b2015-11-04 15:52:34 -06002061 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2062 struct scsi_target *starget;
2063 struct sas_rphy *rphy;
2064
2065 starget = scsi_target(sdev);
2066 rphy = target_to_rphy(starget);
2067 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2068 if (sd) {
2069 sd->target = sdev_id(sdev);
2070 sd->lun = sdev->lun;
2071 }
Hannes Reinecke7630b3a2016-11-17 12:15:56 +01002072 }
2073 if (!sd)
Kevin Barnettd04e62b2015-11-04 15:52:34 -06002074 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2075 sdev_id(sdev), sdev->lun);
2076
2077 if (sd && sd->expose_device) {
Don Brace03383732015-01-23 16:43:30 -06002078 atomic_set(&sd->ioaccel_cmds_out, 0);
Kevin Barnettd04e62b2015-11-04 15:52:34 -06002079 sdev->hostdata = sd;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05002080 } else
2081 sdev->hostdata = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002082 spin_unlock_irqrestore(&h->devlock, flags);
2083 return 0;
2084}
2085
Stephen Cameron41ce4c32015-04-23 09:31:47 -05002086/* configure scsi device based on internal per-device structure */
2087static int hpsa_slave_configure(struct scsi_device *sdev)
2088{
2089 struct hpsa_scsi_dev_t *sd;
2090 int queue_depth;
2091
2092 sd = sdev->hostdata;
Kevin Barnett2a168202015-11-04 15:51:21 -06002093 sdev->no_uld_attach = !sd || !sd->expose_device;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05002094
Don Brace50864352017-05-04 17:51:28 -05002095 if (sd) {
2096 if (sd->external)
2097 queue_depth = EXTERNAL_QD;
2098 else
2099 queue_depth = sd->queue_depth != 0 ?
2100 sd->queue_depth : sdev->host->can_queue;
2101 } else
Stephen Cameron41ce4c32015-04-23 09:31:47 -05002102 queue_depth = sdev->host->can_queue;
2103
2104 scsi_change_queue_depth(sdev, queue_depth);
2105
2106 return 0;
2107}
2108
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002109static void hpsa_slave_destroy(struct scsi_device *sdev)
2110{
Stephen M. Cameronbcc442552010-02-04 08:41:54 -06002111 /* nothing to do. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002112}
2113
Webb Scalesd9a729f2015-04-23 09:33:27 -05002114static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2115{
2116 int i;
2117
2118 if (!h->ioaccel2_cmd_sg_list)
2119 return;
2120 for (i = 0; i < h->nr_cmds; i++) {
2121 kfree(h->ioaccel2_cmd_sg_list[i]);
2122 h->ioaccel2_cmd_sg_list[i] = NULL;
2123 }
2124 kfree(h->ioaccel2_cmd_sg_list);
2125 h->ioaccel2_cmd_sg_list = NULL;
2126}
2127
2128static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2129{
2130 int i;
2131
2132 if (h->chainsize <= 0)
2133 return 0;
2134
2135 h->ioaccel2_cmd_sg_list =
2136 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
2137 GFP_KERNEL);
2138 if (!h->ioaccel2_cmd_sg_list)
2139 return -ENOMEM;
2140 for (i = 0; i < h->nr_cmds; i++) {
2141 h->ioaccel2_cmd_sg_list[i] =
2142 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
2143 h->maxsgentries, GFP_KERNEL);
2144 if (!h->ioaccel2_cmd_sg_list[i])
2145 goto clean;
2146 }
2147 return 0;
2148
2149clean:
2150 hpsa_free_ioaccel2_sg_chain_blocks(h);
2151 return -ENOMEM;
2152}
2153
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002154static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2155{
2156 int i;
2157
2158 if (!h->cmd_sg_list)
2159 return;
2160 for (i = 0; i < h->nr_cmds; i++) {
2161 kfree(h->cmd_sg_list[i]);
2162 h->cmd_sg_list[i] = NULL;
2163 }
2164 kfree(h->cmd_sg_list);
2165 h->cmd_sg_list = NULL;
2166}
2167
Robert Elliott105a3db2015-04-23 09:33:48 -05002168static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002169{
2170 int i;
2171
2172 if (h->chainsize <= 0)
2173 return 0;
2174
2175 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
2176 GFP_KERNEL);
Amit Kushwaha7e8a9482016-12-12 16:34:21 +05302177 if (!h->cmd_sg_list)
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002178 return -ENOMEM;
Amit Kushwaha7e8a9482016-12-12 16:34:21 +05302179
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002180 for (i = 0; i < h->nr_cmds; i++) {
2181 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
2182 h->chainsize, GFP_KERNEL);
Amit Kushwaha7e8a9482016-12-12 16:34:21 +05302183 if (!h->cmd_sg_list[i])
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002184 goto clean;
Amit Kushwaha7e8a9482016-12-12 16:34:21 +05302185
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002186 }
2187 return 0;
2188
2189clean:
2190 hpsa_free_sg_chain_blocks(h);
2191 return -ENOMEM;
2192}
2193
Webb Scalesd9a729f2015-04-23 09:33:27 -05002194static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2195 struct io_accel2_cmd *cp, struct CommandList *c)
2196{
2197 struct ioaccel2_sg_element *chain_block;
2198 u64 temp64;
2199 u32 chain_size;
2200
2201 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
Don Bracea736e9b2015-11-04 15:51:14 -06002202 chain_size = le32_to_cpu(cp->sg[0].length);
Webb Scalesd9a729f2015-04-23 09:33:27 -05002203 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
2204 PCI_DMA_TODEVICE);
2205 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2206 /* prevent subsequent unmapping */
2207 cp->sg->address = 0;
2208 return -1;
2209 }
2210 cp->sg->address = cpu_to_le64(temp64);
2211 return 0;
2212}
2213
2214static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2215 struct io_accel2_cmd *cp)
2216{
2217 struct ioaccel2_sg_element *chain_sg;
2218 u64 temp64;
2219 u32 chain_size;
2220
2221 chain_sg = cp->sg;
2222 temp64 = le64_to_cpu(chain_sg->address);
Don Bracea736e9b2015-11-04 15:51:14 -06002223 chain_size = le32_to_cpu(cp->sg[0].length);
Webb Scalesd9a729f2015-04-23 09:33:27 -05002224 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2225}
2226
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06002227static int hpsa_map_sg_chain_block(struct ctlr_info *h,
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002228 struct CommandList *c)
2229{
2230 struct SGDescriptor *chain_sg, *chain_block;
2231 u64 temp64;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002232 u32 chain_len;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002233
2234 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2235 chain_block = h->cmd_sg_list[c->cmdindex];
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002236 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2237 chain_len = sizeof(*chain_sg) *
Don Brace2b08b3e2015-01-23 16:41:09 -06002238 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002239 chain_sg->Len = cpu_to_le32(chain_len);
2240 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002241 PCI_DMA_TODEVICE);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06002242 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2243 /* prevent subsequent unmapping */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002244 chain_sg->Addr = cpu_to_le64(0);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06002245 return -1;
2246 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002247 chain_sg->Addr = cpu_to_le64(temp64);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06002248 return 0;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002249}
2250
2251static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2252 struct CommandList *c)
2253{
2254 struct SGDescriptor *chain_sg;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002255
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002256 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002257 return;
2258
2259 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002260 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2261 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002262}
2263
Scott Teela09c1442014-02-18 13:57:21 -06002264
2265/* Decode the various types of errors on ioaccel2 path.
2266 * Return 1 for any error that should generate a RAID path retry.
2267 * Return 0 for errors that don't require a RAID path retry.
2268 */
2269static int handle_ioaccel_mode2_error(struct ctlr_info *h,
Scott Teelc3497752014-02-18 13:56:34 -06002270 struct CommandList *c,
2271 struct scsi_cmnd *cmd,
Don Braceba74fdc2016-04-27 17:14:17 -05002272 struct io_accel2_cmd *c2,
2273 struct hpsa_scsi_dev_t *dev)
Scott Teelc3497752014-02-18 13:56:34 -06002274{
2275 int data_len;
Scott Teela09c1442014-02-18 13:57:21 -06002276 int retry = 0;
Joe Handzikc40820d2015-04-23 09:33:32 -05002277 u32 ioaccel2_resid = 0;
Scott Teelc3497752014-02-18 13:56:34 -06002278
2279 switch (c2->error_data.serv_response) {
2280 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2281 switch (c2->error_data.status) {
2282 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2283 break;
2284 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05002285 cmd->result |= SAM_STAT_CHECK_CONDITION;
Scott Teelc3497752014-02-18 13:56:34 -06002286 if (c2->error_data.data_present !=
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05002287 IOACCEL2_SENSE_DATA_PRESENT) {
2288 memset(cmd->sense_buffer, 0,
2289 SCSI_SENSE_BUFFERSIZE);
Scott Teelc3497752014-02-18 13:56:34 -06002290 break;
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05002291 }
Scott Teelc3497752014-02-18 13:56:34 -06002292 /* copy the sense data */
2293 data_len = c2->error_data.sense_data_len;
2294 if (data_len > SCSI_SENSE_BUFFERSIZE)
2295 data_len = SCSI_SENSE_BUFFERSIZE;
2296 if (data_len > sizeof(c2->error_data.sense_data_buff))
2297 data_len =
2298 sizeof(c2->error_data.sense_data_buff);
2299 memcpy(cmd->sense_buffer,
2300 c2->error_data.sense_data_buff, data_len);
Scott Teela09c1442014-02-18 13:57:21 -06002301 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06002302 break;
2303 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
Scott Teela09c1442014-02-18 13:57:21 -06002304 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06002305 break;
2306 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
Scott Teela09c1442014-02-18 13:57:21 -06002307 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06002308 break;
2309 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
Stephen Cameron4a8da222015-04-23 09:32:43 -05002310 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06002311 break;
2312 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
Scott Teela09c1442014-02-18 13:57:21 -06002313 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06002314 break;
2315 default:
Scott Teela09c1442014-02-18 13:57:21 -06002316 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06002317 break;
2318 }
2319 break;
2320 case IOACCEL2_SERV_RESPONSE_FAILURE:
Joe Handzikc40820d2015-04-23 09:33:32 -05002321 switch (c2->error_data.status) {
2322 case IOACCEL2_STATUS_SR_IO_ERROR:
2323 case IOACCEL2_STATUS_SR_IO_ABORTED:
2324 case IOACCEL2_STATUS_SR_OVERRUN:
2325 retry = 1;
2326 break;
2327 case IOACCEL2_STATUS_SR_UNDERRUN:
2328 cmd->result = (DID_OK << 16); /* host byte */
2329 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2330 ioaccel2_resid = get_unaligned_le32(
2331 &c2->error_data.resid_cnt[0]);
2332 scsi_set_resid(cmd, ioaccel2_resid);
2333 break;
2334 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2335 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2336 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
Don Braceba74fdc2016-04-27 17:14:17 -05002337 /*
2338 * Did an HBA disk disappear? We will eventually
2339 * get a state change event from the controller but
2340 * in the meantime, we need to tell the OS that the
2341 * HBA disk is no longer there and stop I/O
2342 * from going down. This allows the potential re-insert
2343 * of the disk to get the same device node.
2344 */
2345 if (dev->physical_device && dev->expose_device) {
2346 cmd->result = DID_NO_CONNECT << 16;
2347 dev->removed = 1;
2348 h->drv_req_rescan = 1;
2349 dev_warn(&h->pdev->dev,
2350 "%s: device is gone!\n", __func__);
2351 } else
2352 /*
2353 * Retry by sending down the RAID path.
2354 * We will get an event from ctlr to
2355 * trigger rescan regardless.
2356 */
2357 retry = 1;
Joe Handzikc40820d2015-04-23 09:33:32 -05002358 break;
2359 default:
2360 retry = 1;
Joe Handzikc40820d2015-04-23 09:33:32 -05002361 }
Scott Teelc3497752014-02-18 13:56:34 -06002362 break;
2363 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2364 break;
2365 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2366 break;
2367 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
Scott Teela09c1442014-02-18 13:57:21 -06002368 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06002369 break;
2370 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
Scott Teelc3497752014-02-18 13:56:34 -06002371 break;
2372 default:
Scott Teela09c1442014-02-18 13:57:21 -06002373 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06002374 break;
2375 }
Scott Teela09c1442014-02-18 13:57:21 -06002376
2377 return retry; /* retry on raid path? */
Scott Teelc3497752014-02-18 13:56:34 -06002378}
2379
Webb Scalesa58e7e52015-04-23 09:34:16 -05002380static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2381 struct CommandList *c)
2382{
Webb Scalesd604f532015-04-23 09:35:22 -05002383 bool do_wake = false;
2384
Webb Scalesa58e7e52015-04-23 09:34:16 -05002385 /*
Don Brace08ec46f2017-05-04 17:51:49 -05002386 * Reset c->scsi_cmd here so that the reset handler will know
Webb Scalesd604f532015-04-23 09:35:22 -05002387 * this command has completed. Then, check to see if the handler is
Webb Scalesa58e7e52015-04-23 09:34:16 -05002388 * waiting for this command, and, if so, wake it.
2389 */
2390 c->scsi_cmd = SCSI_CMD_IDLE;
Webb Scalesd604f532015-04-23 09:35:22 -05002391 mb(); /* Declare command idle before checking for pending events. */
Webb Scalesd604f532015-04-23 09:35:22 -05002392 if (c->reset_pending) {
2393 unsigned long flags;
2394 struct hpsa_scsi_dev_t *dev;
2395
2396 /*
2397 * There appears to be a reset pending; lock the lock and
2398 * reconfirm. If so, then decrement the count of outstanding
2399 * commands and wake the reset command if this is the last one.
2400 */
2401 spin_lock_irqsave(&h->lock, flags);
2402 dev = c->reset_pending; /* Re-fetch under the lock. */
2403 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2404 do_wake = true;
2405 c->reset_pending = NULL;
2406 spin_unlock_irqrestore(&h->lock, flags);
2407 }
2408
2409 if (do_wake)
2410 wake_up_all(&h->event_sync_wait_queue);
Webb Scalesa58e7e52015-04-23 09:34:16 -05002411}
2412
Webb Scales73153fe2015-04-23 09:35:04 -05002413static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2414 struct CommandList *c)
2415{
2416 hpsa_cmd_resolve_events(h, c);
2417 cmd_tagged_free(h, c);
2418}
2419
Webb Scales8a0ff922015-04-23 09:34:11 -05002420static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2421 struct CommandList *c, struct scsi_cmnd *cmd)
2422{
Webb Scales73153fe2015-04-23 09:35:04 -05002423 hpsa_cmd_resolve_and_free(h, c);
Don Braced49c2072016-09-09 16:30:23 -05002424 if (cmd && cmd->scsi_done)
2425 cmd->scsi_done(cmd);
Webb Scales8a0ff922015-04-23 09:34:11 -05002426}
2427
2428static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2429{
2430 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2431 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2432}
2433
Scott Teelc3497752014-02-18 13:56:34 -06002434static void process_ioaccel2_completion(struct ctlr_info *h,
2435 struct CommandList *c, struct scsi_cmnd *cmd,
2436 struct hpsa_scsi_dev_t *dev)
2437{
2438 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2439
2440 /* check for good status */
2441 if (likely(c2->error_data.serv_response == 0 &&
Webb Scales8a0ff922015-04-23 09:34:11 -05002442 c2->error_data.status == 0))
2443 return hpsa_cmd_free_and_done(h, c, cmd);
Scott Teelc3497752014-02-18 13:56:34 -06002444
Webb Scales8a0ff922015-04-23 09:34:11 -05002445 /*
2446 * Any RAID offload error results in retry which will use
Scott Teelc3497752014-02-18 13:56:34 -06002447 * the normal I/O path so the controller can handle whatever's
2448 * wrong.
2449 */
Kevin Barnettf3f01732015-11-04 15:51:33 -06002450 if (is_logical_device(dev) &&
Scott Teelc3497752014-02-18 13:56:34 -06002451 c2->error_data.serv_response ==
2452 IOACCEL2_SERV_RESPONSE_FAILURE) {
Don Brace080ef1c2015-01-23 16:43:25 -06002453 if (c2->error_data.status ==
Don Brace064d1b12016-04-27 17:14:07 -05002454 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
Don Brace080ef1c2015-01-23 16:43:25 -06002455 dev->offload_enabled = 0;
Don Brace064d1b12016-04-27 17:14:07 -05002456 dev->offload_to_be_enabled = 0;
2457 }
Webb Scales8a0ff922015-04-23 09:34:11 -05002458
2459 return hpsa_retry_cmd(h, c);
Scott Teelc3497752014-02-18 13:56:34 -06002460 }
Don Brace080ef1c2015-01-23 16:43:25 -06002461
Don Braceba74fdc2016-04-27 17:14:17 -05002462 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
Webb Scales8a0ff922015-04-23 09:34:11 -05002463 return hpsa_retry_cmd(h, c);
Don Brace080ef1c2015-01-23 16:43:25 -06002464
Webb Scales8a0ff922015-04-23 09:34:11 -05002465 return hpsa_cmd_free_and_done(h, c, cmd);
Scott Teelc3497752014-02-18 13:56:34 -06002466}
2467
Stephen Cameron9437ac42015-04-23 09:32:16 -05002468/* Returns 0 on success, < 0 otherwise. */
2469static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2470 struct CommandList *cp)
2471{
2472 u8 tmf_status = cp->err_info->ScsiStatus;
2473
2474 switch (tmf_status) {
2475 case CISS_TMF_COMPLETE:
2476 /*
2477 * CISS_TMF_COMPLETE never happens, instead,
2478 * ei->CommandStatus == 0 for this case.
2479 */
2480 case CISS_TMF_SUCCESS:
2481 return 0;
2482 case CISS_TMF_INVALID_FRAME:
2483 case CISS_TMF_NOT_SUPPORTED:
2484 case CISS_TMF_FAILED:
2485 case CISS_TMF_WRONG_LUN:
2486 case CISS_TMF_OVERLAPPED_TAG:
2487 break;
2488 default:
2489 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2490 tmf_status);
2491 break;
2492 }
2493 return -tmf_status;
2494}
2495
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05002496static void complete_scsi_command(struct CommandList *cp)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002497{
2498 struct scsi_cmnd *cmd;
2499 struct ctlr_info *h;
2500 struct ErrorInfo *ei;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002501 struct hpsa_scsi_dev_t *dev;
Webb Scalesd9a729f2015-04-23 09:33:27 -05002502 struct io_accel2_cmd *c2;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002503
Stephen Cameron9437ac42015-04-23 09:32:16 -05002504 u8 sense_key;
2505 u8 asc; /* additional sense code */
2506 u8 ascq; /* additional sense code qualifier */
Stephen M. Camerondb111e12011-06-03 09:57:34 -05002507 unsigned long sense_data_size;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002508
2509 ei = cp->err_info;
Stephen Cameron7fa30302015-01-23 16:44:30 -06002510 cmd = cp->scsi_cmd;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002511 h = cp->h;
Don Braced49c2072016-09-09 16:30:23 -05002512
2513 if (!cmd->device) {
2514 cmd->result = DID_NO_CONNECT << 16;
2515 return hpsa_cmd_free_and_done(h, cp, cmd);
2516 }
2517
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002518 dev = cmd->device->hostdata;
Don Brace45e596c2016-09-09 16:30:42 -05002519 if (!dev) {
2520 cmd->result = DID_NO_CONNECT << 16;
2521 return hpsa_cmd_free_and_done(h, cp, cmd);
2522 }
Webb Scalesd9a729f2015-04-23 09:33:27 -05002523 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002524
2525 scsi_dma_unmap(cmd); /* undo the DMA mappings */
Matt Gatese1f7de02014-02-18 13:55:17 -06002526 if ((cp->cmd_type == CMD_SCSI) &&
Don Brace2b08b3e2015-01-23 16:41:09 -06002527 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002528 hpsa_unmap_sg_chain_block(h, cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002529
Webb Scalesd9a729f2015-04-23 09:33:27 -05002530 if ((cp->cmd_type == CMD_IOACCEL2) &&
2531 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2532 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2533
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002534 cmd->result = (DID_OK << 16); /* host byte */
2535 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
Scott Teelc3497752014-02-18 13:56:34 -06002536
Don Braced49c2072016-09-09 16:30:23 -05002537 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2538 if (dev->physical_device && dev->expose_device &&
2539 dev->removed) {
2540 cmd->result = DID_NO_CONNECT << 16;
2541 return hpsa_cmd_free_and_done(h, cp, cmd);
2542 }
2543 if (likely(cp->phys_disk != NULL))
2544 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2545 }
Don Brace03383732015-01-23 16:43:30 -06002546
Webb Scales25163bd2015-04-23 09:32:00 -05002547 /*
2548 * We check for lockup status here as it may be set for
2549 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2550 * fail_all_oustanding_cmds()
2551 */
2552 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2553 /* DID_NO_CONNECT will prevent a retry */
2554 cmd->result = DID_NO_CONNECT << 16;
Webb Scales8a0ff922015-04-23 09:34:11 -05002555 return hpsa_cmd_free_and_done(h, cp, cmd);
Webb Scales25163bd2015-04-23 09:32:00 -05002556 }
2557
Don Brace08ec46f2017-05-04 17:51:49 -05002558 if ((unlikely(hpsa_is_pending_event(cp))))
Webb Scalesd604f532015-04-23 09:35:22 -05002559 if (cp->reset_pending)
Don Bracebfd75462016-11-15 14:45:32 -06002560 return hpsa_cmd_free_and_done(h, cp, cmd);
Webb Scalesd604f532015-04-23 09:35:22 -05002561
Scott Teelc3497752014-02-18 13:56:34 -06002562 if (cp->cmd_type == CMD_IOACCEL2)
2563 return process_ioaccel2_completion(h, cp, cmd, dev);
2564
Robert Elliott6aa4c362014-07-03 10:18:19 -05002565 scsi_set_resid(cmd, ei->ResidualCnt);
Webb Scales8a0ff922015-04-23 09:34:11 -05002566 if (ei->CommandStatus == 0)
2567 return hpsa_cmd_free_and_done(h, cp, cmd);
Robert Elliott6aa4c362014-07-03 10:18:19 -05002568
Matt Gatese1f7de02014-02-18 13:55:17 -06002569 /* For I/O accelerator commands, copy over some fields to the normal
2570 * CISS header used below for error handling.
2571 */
2572 if (cp->cmd_type == CMD_IOACCEL1) {
2573 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
Don Brace2b08b3e2015-01-23 16:41:09 -06002574 cp->Header.SGList = scsi_sg_count(cmd);
2575 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2576 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2577 IOACCEL1_IOFLAGS_CDBLEN_MASK;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002578 cp->Header.tag = c->tag;
Matt Gatese1f7de02014-02-18 13:55:17 -06002579 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2580 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002581
2582 /* Any RAID offload error results in retry which will use
2583 * the normal I/O path so the controller can handle whatever's
2584 * wrong.
2585 */
Kevin Barnettf3f01732015-11-04 15:51:33 -06002586 if (is_logical_device(dev)) {
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002587 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2588 dev->offload_enabled = 0;
Webb Scalesd604f532015-04-23 09:35:22 -05002589 return hpsa_retry_cmd(h, cp);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002590 }
Matt Gatese1f7de02014-02-18 13:55:17 -06002591 }
2592
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002593 /* an error has occurred */
2594 switch (ei->CommandStatus) {
2595
2596 case CMD_TARGET_STATUS:
Stephen Cameron9437ac42015-04-23 09:32:16 -05002597 cmd->result |= ei->ScsiStatus;
2598 /* copy the sense data */
2599 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2600 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2601 else
2602 sense_data_size = sizeof(ei->SenseInfo);
2603 if (ei->SenseLen < sense_data_size)
2604 sense_data_size = ei->SenseLen;
2605 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2606 if (ei->ScsiStatus)
2607 decode_sense_data(ei->SenseInfo, sense_data_size,
2608 &sense_key, &asc, &ascq);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002609 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
Matt Gates1d3b3602010-02-04 08:43:00 -06002610 if (sense_key == ABORTED_COMMAND) {
Stephen M. Cameron2e311fb2013-09-23 13:33:41 -05002611 cmd->result |= DID_SOFT_ERROR << 16;
Matt Gates1d3b3602010-02-04 08:43:00 -06002612 break;
2613 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002614 break;
2615 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002616 /* Problem was not a check condition
2617 * Pass it up to the upper layers...
2618 */
2619 if (ei->ScsiStatus) {
2620 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2621 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2622 "Returning result: 0x%x\n",
2623 cp, ei->ScsiStatus,
2624 sense_key, asc, ascq,
2625 cmd->result);
2626 } else { /* scsi status is zero??? How??? */
2627 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2628 "Returning no connection.\n", cp),
2629
2630 /* Ordinarily, this case should never happen,
2631 * but there is a bug in some released firmware
2632 * revisions that allows it to happen if, for
2633 * example, a 4100 backplane loses power and
2634 * the tape drive is in it. We assume that
2635 * it's a fatal error of some kind because we
2636 * can't show that it wasn't. We will make it
2637 * look like selection timeout since that is
2638 * the most common reason for this to occur,
2639 * and it's severe enough.
2640 */
2641
2642 cmd->result = DID_NO_CONNECT << 16;
2643 }
2644 break;
2645
2646 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2647 break;
2648 case CMD_DATA_OVERRUN:
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002649 dev_warn(&h->pdev->dev,
2650 "CDB %16phN data overrun\n", cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002651 break;
2652 case CMD_INVALID: {
2653 /* print_bytes(cp, sizeof(*cp), 1, 0);
2654 print_cmd(cp); */
2655 /* We get CMD_INVALID if you address a non-existent device
2656 * instead of a selection timeout (no response). You will
2657 * see this if you yank out a drive, then try to access it.
2658 * This is kind of a shame because it means that any other
2659 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2660 * missing target. */
2661 cmd->result = DID_NO_CONNECT << 16;
2662 }
2663 break;
2664 case CMD_PROTOCOL_ERR:
Stephen M. Cameron256d0ea2012-09-14 16:34:25 -05002665 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002666 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2667 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002668 break;
2669 case CMD_HARDWARE_ERR:
2670 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002671 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2672 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002673 break;
2674 case CMD_CONNECTION_LOST:
2675 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002676 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2677 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002678 break;
2679 case CMD_ABORTED:
Don Brace08ec46f2017-05-04 17:51:49 -05002680 cmd->result = DID_ABORT << 16;
2681 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002682 case CMD_ABORT_FAILED:
2683 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002684 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2685 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002686 break;
2687 case CMD_UNSOLICITED_ABORT:
Stephen M. Cameronf6e76052011-07-26 11:08:52 -05002688 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002689 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2690 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002691 break;
2692 case CMD_TIMEOUT:
2693 cmd->result = DID_TIME_OUT << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002694 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2695 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002696 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002697 case CMD_UNABORTABLE:
2698 cmd->result = DID_ERROR << 16;
2699 dev_warn(&h->pdev->dev, "Command unabortable\n");
2700 break;
Stephen Cameron9437ac42015-04-23 09:32:16 -05002701 case CMD_TMF_STATUS:
2702 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2703 cmd->result = DID_ERROR << 16;
2704 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002705 case CMD_IOACCEL_DISABLED:
2706 /* This only handles the direct pass-through case since RAID
2707 * offload is handled above. Just attempt a retry.
2708 */
2709 cmd->result = DID_SOFT_ERROR << 16;
2710 dev_warn(&h->pdev->dev,
2711 "cp %p had HP SSD Smart Path error\n", cp);
2712 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002713 default:
2714 cmd->result = DID_ERROR << 16;
2715 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2716 cp, ei->CommandStatus);
2717 }
Webb Scales8a0ff922015-04-23 09:34:11 -05002718
2719 return hpsa_cmd_free_and_done(h, cp, cmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002720}
2721
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002722static void hpsa_pci_unmap(struct pci_dev *pdev,
2723 struct CommandList *c, int sg_used, int data_direction)
2724{
2725 int i;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002726
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002727 for (i = 0; i < sg_used; i++)
2728 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2729 le32_to_cpu(c->SG[i].Len),
2730 data_direction);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002731}
2732
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002733static int hpsa_map_one(struct pci_dev *pdev,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002734 struct CommandList *cp,
2735 unsigned char *buf,
2736 size_t buflen,
2737 int data_direction)
2738{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002739 u64 addr64;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002740
2741 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2742 cp->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002743 cp->Header.SGTotal = cpu_to_le16(0);
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002744 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002745 }
2746
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002747 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
Shuah Khaneceaae12013-02-20 11:24:34 -06002748 if (dma_mapping_error(&pdev->dev, addr64)) {
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002749 /* Prevent subsequent unmap of something never mapped */
Shuah Khaneceaae12013-02-20 11:24:34 -06002750 cp->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002751 cp->Header.SGTotal = cpu_to_le16(0);
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002752 return -1;
Shuah Khaneceaae12013-02-20 11:24:34 -06002753 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002754 cp->SG[0].Addr = cpu_to_le64(addr64);
2755 cp->SG[0].Len = cpu_to_le32(buflen);
2756 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2757 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2758 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002759 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002760}
2761
Webb Scales25163bd2015-04-23 09:32:00 -05002762#define NO_TIMEOUT ((unsigned long) -1)
2763#define DEFAULT_TIMEOUT 30000 /* milliseconds */
2764static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2765 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002766{
2767 DECLARE_COMPLETION_ONSTACK(wait);
2768
2769 c->waiting = &wait;
Webb Scales25163bd2015-04-23 09:32:00 -05002770 __enqueue_cmd_and_start_io(h, c, reply_queue);
2771 if (timeout_msecs == NO_TIMEOUT) {
2772 /* TODO: get rid of this no-timeout thing */
2773 wait_for_completion_io(&wait);
2774 return IO_OK;
2775 }
2776 if (!wait_for_completion_io_timeout(&wait,
2777 msecs_to_jiffies(timeout_msecs))) {
2778 dev_warn(&h->pdev->dev, "Command timed out.\n");
2779 return -ETIMEDOUT;
2780 }
2781 return IO_OK;
2782}
2783
2784static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2785 int reply_queue, unsigned long timeout_msecs)
2786{
2787 if (unlikely(lockup_detected(h))) {
2788 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2789 return IO_OK;
2790 }
2791 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002792}
2793
Stephen M. Cameron094963d2014-05-29 10:53:18 -05002794static u32 lockup_detected(struct ctlr_info *h)
2795{
2796 int cpu;
2797 u32 rc, *lockup_detected;
2798
2799 cpu = get_cpu();
2800 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2801 rc = *lockup_detected;
2802 put_cpu();
2803 return rc;
2804}
2805
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002806#define MAX_DRIVER_CMD_RETRIES 25
Webb Scales25163bd2015-04-23 09:32:00 -05002807static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2808 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002809{
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002810 int backoff_time = 10, retry_count = 0;
Webb Scales25163bd2015-04-23 09:32:00 -05002811 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002812
2813 do {
Joe Perches7630abd2011-05-08 23:32:40 -07002814 memset(c->err_info, 0, sizeof(*c->err_info));
Webb Scales25163bd2015-04-23 09:32:00 -05002815 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2816 timeout_msecs);
2817 if (rc)
2818 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002819 retry_count++;
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002820 if (retry_count > 3) {
2821 msleep(backoff_time);
2822 if (backoff_time < 1000)
2823 backoff_time *= 2;
2824 }
Matt Bondurant852af202012-05-01 11:42:35 -05002825 } while ((check_for_unit_attention(h, c) ||
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002826 check_for_busy(h, c)) &&
2827 retry_count <= MAX_DRIVER_CMD_RETRIES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002828 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
Webb Scales25163bd2015-04-23 09:32:00 -05002829 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2830 rc = -EIO;
2831 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002832}
2833
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002834static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2835 struct CommandList *c)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002836{
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002837 const u8 *cdb = c->Request.CDB;
2838 const u8 *lun = c->Header.LUN.LunAddrBytes;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002839
Rasmus Villemoes609a70d2016-11-30 23:35:47 +01002840 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2841 txt, lun, cdb);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002842}
2843
2844static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2845 struct CommandList *cp)
2846{
2847 const struct ErrorInfo *ei = cp->err_info;
2848 struct device *d = &cp->h->pdev->dev;
Stephen Cameron9437ac42015-04-23 09:32:16 -05002849 u8 sense_key, asc, ascq;
2850 int sense_len;
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002851
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002852 switch (ei->CommandStatus) {
2853 case CMD_TARGET_STATUS:
Stephen Cameron9437ac42015-04-23 09:32:16 -05002854 if (ei->SenseLen > sizeof(ei->SenseInfo))
2855 sense_len = sizeof(ei->SenseInfo);
2856 else
2857 sense_len = ei->SenseLen;
2858 decode_sense_data(ei->SenseInfo, sense_len,
2859 &sense_key, &asc, &ascq);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002860 hpsa_print_cmd(h, "SCSI status", cp);
2861 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
Stephen Cameron9437ac42015-04-23 09:32:16 -05002862 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2863 sense_key, asc, ascq);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002864 else
Stephen Cameron9437ac42015-04-23 09:32:16 -05002865 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002866 if (ei->ScsiStatus == 0)
2867 dev_warn(d, "SCSI status is abnormally zero. "
2868 "(probably indicates selection timeout "
2869 "reported incorrectly due to a known "
2870 "firmware bug, circa July, 2001.)\n");
2871 break;
2872 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002873 break;
2874 case CMD_DATA_OVERRUN:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002875 hpsa_print_cmd(h, "overrun condition", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002876 break;
2877 case CMD_INVALID: {
2878 /* controller unfortunately reports SCSI passthru's
2879 * to non-existent targets as invalid commands.
2880 */
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002881 hpsa_print_cmd(h, "invalid command", cp);
2882 dev_warn(d, "probably means device no longer present\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002883 }
2884 break;
2885 case CMD_PROTOCOL_ERR:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002886 hpsa_print_cmd(h, "protocol error", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002887 break;
2888 case CMD_HARDWARE_ERR:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002889 hpsa_print_cmd(h, "hardware error", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002890 break;
2891 case CMD_CONNECTION_LOST:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002892 hpsa_print_cmd(h, "connection lost", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002893 break;
2894 case CMD_ABORTED:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002895 hpsa_print_cmd(h, "aborted", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002896 break;
2897 case CMD_ABORT_FAILED:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002898 hpsa_print_cmd(h, "abort failed", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002899 break;
2900 case CMD_UNSOLICITED_ABORT:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002901 hpsa_print_cmd(h, "unsolicited abort", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002902 break;
2903 case CMD_TIMEOUT:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002904 hpsa_print_cmd(h, "timed out", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002905 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002906 case CMD_UNABORTABLE:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002907 hpsa_print_cmd(h, "unabortable", cp);
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002908 break;
Webb Scales25163bd2015-04-23 09:32:00 -05002909 case CMD_CTLR_LOCKUP:
2910 hpsa_print_cmd(h, "controller lockup detected", cp);
2911 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002912 default:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002913 hpsa_print_cmd(h, "unknown status", cp);
2914 dev_warn(d, "Unknown command status %x\n",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002915 ei->CommandStatus);
2916 }
2917}
2918
2919static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002920 u16 page, unsigned char *buf,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002921 unsigned char bufsize)
2922{
2923 int rc = IO_OK;
2924 struct CommandList *c;
2925 struct ErrorInfo *ei;
2926
Stephen Cameron45fcb862015-01-23 16:43:04 -06002927 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002928
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002929 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2930 page, scsi3addr, TYPE_CMD)) {
2931 rc = -1;
2932 goto out;
2933 }
Webb Scales25163bd2015-04-23 09:32:00 -05002934 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
Don Bracec448ecf2016-04-27 17:13:51 -05002935 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
Webb Scales25163bd2015-04-23 09:32:00 -05002936 if (rc)
2937 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002938 ei = c->err_info;
2939 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002940 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002941 rc = -1;
2942 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002943out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06002944 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002945 return rc;
2946}
2947
Scott Teelbf711ac2014-02-18 13:56:39 -06002948static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
Webb Scales25163bd2015-04-23 09:32:00 -05002949 u8 reset_type, int reply_queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002950{
2951 int rc = IO_OK;
2952 struct CommandList *c;
2953 struct ErrorInfo *ei;
2954
Stephen Cameron45fcb862015-01-23 16:43:04 -06002955 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002956
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002957
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002958 /* fill_cmd can't fail here, no data buffer to map. */
Scott Teel0b9b7b62015-11-04 15:51:02 -06002959 (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
Scott Teelbf711ac2014-02-18 13:56:39 -06002960 scsi3addr, TYPE_MSG);
Don Brace2ef28842017-03-10 14:35:23 -06002961 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
Webb Scales25163bd2015-04-23 09:32:00 -05002962 if (rc) {
2963 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2964 goto out;
2965 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002966 /* no unmap needed here because no data xfer. */
2967
2968 ei = c->err_info;
2969 if (ei->CommandStatus != 0) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002970 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002971 rc = -1;
2972 }
Webb Scales25163bd2015-04-23 09:32:00 -05002973out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06002974 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002975 return rc;
2976}
2977
Webb Scalesd604f532015-04-23 09:35:22 -05002978static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2979 struct hpsa_scsi_dev_t *dev,
2980 unsigned char *scsi3addr)
2981{
2982 int i;
2983 bool match = false;
2984 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2985 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2986
2987 if (hpsa_is_cmd_idle(c))
2988 return false;
2989
2990 switch (c->cmd_type) {
2991 case CMD_SCSI:
2992 case CMD_IOCTL_PEND:
2993 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2994 sizeof(c->Header.LUN.LunAddrBytes));
2995 break;
2996
2997 case CMD_IOACCEL1:
2998 case CMD_IOACCEL2:
2999 if (c->phys_disk == dev) {
3000 /* HBA mode match */
3001 match = true;
3002 } else {
3003 /* Possible RAID mode -- check each phys dev. */
3004 /* FIXME: Do we need to take out a lock here? If
3005 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
3006 * instead. */
3007 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3008 /* FIXME: an alternate test might be
3009 *
3010 * match = dev->phys_disk[i]->ioaccel_handle
3011 * == c2->scsi_nexus; */
3012 match = dev->phys_disk[i] == c->phys_disk;
3013 }
3014 }
3015 break;
3016
3017 case IOACCEL2_TMF:
3018 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3019 match = dev->phys_disk[i]->ioaccel_handle ==
3020 le32_to_cpu(ac->it_nexus);
3021 }
3022 break;
3023
3024 case 0: /* The command is in the middle of being initialized. */
3025 match = false;
3026 break;
3027
3028 default:
3029 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3030 c->cmd_type);
3031 BUG();
3032 }
3033
3034 return match;
3035}
3036
3037static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3038 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
3039{
3040 int i;
3041 int rc = 0;
3042
3043 /* We can really only handle one reset at a time */
3044 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3045 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3046 return -EINTR;
3047 }
3048
3049 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
3050
3051 for (i = 0; i < h->nr_cmds; i++) {
3052 struct CommandList *c = h->cmd_pool + i;
3053 int refcount = atomic_inc_return(&c->refcount);
3054
3055 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
3056 unsigned long flags;
3057
3058 /*
3059 * Mark the target command as having a reset pending,
3060 * then lock a lock so that the command cannot complete
3061 * while we're considering it. If the command is not
3062 * idle then count it; otherwise revoke the event.
3063 */
3064 c->reset_pending = dev;
3065 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
3066 if (!hpsa_is_cmd_idle(c))
3067 atomic_inc(&dev->reset_cmds_out);
3068 else
3069 c->reset_pending = NULL;
3070 spin_unlock_irqrestore(&h->lock, flags);
3071 }
3072
3073 cmd_free(h, c);
3074 }
3075
3076 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
3077 if (!rc)
3078 wait_event(h->event_sync_wait_queue,
3079 atomic_read(&dev->reset_cmds_out) == 0 ||
3080 lockup_detected(h));
3081
3082 if (unlikely(lockup_detected(h))) {
Don Brace77678d32015-07-18 11:12:22 -05003083 dev_warn(&h->pdev->dev,
3084 "Controller lockup detected during reset wait\n");
3085 rc = -ENODEV;
3086 }
Webb Scalesd604f532015-04-23 09:35:22 -05003087
3088 if (unlikely(rc))
3089 atomic_set(&dev->reset_cmds_out, 0);
Don Bracebfd75462016-11-15 14:45:32 -06003090 else
Don Brace8516a2d2017-05-04 17:50:58 -05003091 rc = wait_for_device_to_become_ready(h, scsi3addr, 0);
Webb Scalesd604f532015-04-23 09:35:22 -05003092
3093 mutex_unlock(&h->reset_mutex);
3094 return rc;
3095}
3096
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003097static void hpsa_get_raid_level(struct ctlr_info *h,
3098 unsigned char *scsi3addr, unsigned char *raid_level)
3099{
3100 int rc;
3101 unsigned char *buf;
3102
3103 *raid_level = RAID_UNKNOWN;
3104 buf = kzalloc(64, GFP_KERNEL);
3105 if (!buf)
3106 return;
Scott Teel83832782016-09-09 16:30:29 -05003107
3108 if (!hpsa_vpd_page_supported(h, scsi3addr,
3109 HPSA_VPD_LV_DEVICE_GEOMETRY))
3110 goto exit;
3111
3112 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3113 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3114
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003115 if (rc == 0)
3116 *raid_level = buf[8];
3117 if (*raid_level > RAID_UNKNOWN)
3118 *raid_level = RAID_UNKNOWN;
Scott Teel83832782016-09-09 16:30:29 -05003119exit:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003120 kfree(buf);
3121 return;
3122}
3123
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003124#define HPSA_MAP_DEBUG
3125#ifdef HPSA_MAP_DEBUG
3126static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3127 struct raid_map_data *map_buff)
3128{
3129 struct raid_map_disk_data *dd = &map_buff->data[0];
3130 int map, row, col;
3131 u16 map_cnt, row_cnt, disks_per_row;
3132
3133 if (rc != 0)
3134 return;
3135
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06003136 /* Show details only if debugging has been activated. */
3137 if (h->raid_offload_debug < 2)
3138 return;
3139
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003140 dev_info(&h->pdev->dev, "structure_size = %u\n",
3141 le32_to_cpu(map_buff->structure_size));
3142 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3143 le32_to_cpu(map_buff->volume_blk_size));
3144 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3145 le64_to_cpu(map_buff->volume_blk_cnt));
3146 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3147 map_buff->phys_blk_shift);
3148 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3149 map_buff->parity_rotation_shift);
3150 dev_info(&h->pdev->dev, "strip_size = %u\n",
3151 le16_to_cpu(map_buff->strip_size));
3152 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3153 le64_to_cpu(map_buff->disk_starting_blk));
3154 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3155 le64_to_cpu(map_buff->disk_blk_cnt));
3156 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3157 le16_to_cpu(map_buff->data_disks_per_row));
3158 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3159 le16_to_cpu(map_buff->metadata_disks_per_row));
3160 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3161 le16_to_cpu(map_buff->row_cnt));
3162 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3163 le16_to_cpu(map_buff->layout_map_count));
Don Brace2b08b3e2015-01-23 16:41:09 -06003164 dev_info(&h->pdev->dev, "flags = 0x%x\n",
Scott Teeldd0e19f2014-02-18 13:57:31 -06003165 le16_to_cpu(map_buff->flags));
Colin Ian Kingba82d91b2017-06-26 14:31:10 +01003166 dev_info(&h->pdev->dev, "encryption = %s\n",
Don Brace2b08b3e2015-01-23 16:41:09 -06003167 le16_to_cpu(map_buff->flags) &
3168 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
Scott Teeldd0e19f2014-02-18 13:57:31 -06003169 dev_info(&h->pdev->dev, "dekindex = %u\n",
3170 le16_to_cpu(map_buff->dekindex));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003171 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3172 for (map = 0; map < map_cnt; map++) {
3173 dev_info(&h->pdev->dev, "Map%u:\n", map);
3174 row_cnt = le16_to_cpu(map_buff->row_cnt);
3175 for (row = 0; row < row_cnt; row++) {
3176 dev_info(&h->pdev->dev, " Row%u:\n", row);
3177 disks_per_row =
3178 le16_to_cpu(map_buff->data_disks_per_row);
3179 for (col = 0; col < disks_per_row; col++, dd++)
3180 dev_info(&h->pdev->dev,
3181 " D%02u: h=0x%04x xor=%u,%u\n",
3182 col, dd->ioaccel_handle,
3183 dd->xor_mult[0], dd->xor_mult[1]);
3184 disks_per_row =
3185 le16_to_cpu(map_buff->metadata_disks_per_row);
3186 for (col = 0; col < disks_per_row; col++, dd++)
3187 dev_info(&h->pdev->dev,
3188 " M%02u: h=0x%04x xor=%u,%u\n",
3189 col, dd->ioaccel_handle,
3190 dd->xor_mult[0], dd->xor_mult[1]);
3191 }
3192 }
3193}
3194#else
3195static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3196 __attribute__((unused)) int rc,
3197 __attribute__((unused)) struct raid_map_data *map_buff)
3198{
3199}
3200#endif
3201
3202static int hpsa_get_raid_map(struct ctlr_info *h,
3203 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3204{
3205 int rc = 0;
3206 struct CommandList *c;
3207 struct ErrorInfo *ei;
3208
Stephen Cameron45fcb862015-01-23 16:43:04 -06003209 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05003210
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003211 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3212 sizeof(this_device->raid_map), 0,
3213 scsi3addr, TYPE_CMD)) {
Robert Elliott2dd02d72015-04-23 09:33:43 -05003214 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3215 cmd_free(h, c);
3216 return -1;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003217 }
Webb Scales25163bd2015-04-23 09:32:00 -05003218 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
Don Bracec448ecf2016-04-27 17:13:51 -05003219 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
Webb Scales25163bd2015-04-23 09:32:00 -05003220 if (rc)
3221 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003222 ei = c->err_info;
3223 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06003224 hpsa_scsi_interpret_error(h, c);
Webb Scales25163bd2015-04-23 09:32:00 -05003225 rc = -1;
3226 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003227 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06003228 cmd_free(h, c);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003229
3230 /* @todo in the future, dynamically allocate RAID map memory */
3231 if (le32_to_cpu(this_device->raid_map.structure_size) >
3232 sizeof(this_device->raid_map)) {
3233 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3234 rc = -1;
3235 }
3236 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3237 return rc;
Webb Scales25163bd2015-04-23 09:32:00 -05003238out:
3239 cmd_free(h, c);
3240 return rc;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003241}
3242
Kevin Barnettd04e62b2015-11-04 15:52:34 -06003243static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3244 unsigned char scsi3addr[], u16 bmic_device_index,
3245 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3246{
3247 int rc = IO_OK;
3248 struct CommandList *c;
3249 struct ErrorInfo *ei;
3250
3251 c = cmd_alloc(h);
3252
3253 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3254 0, RAID_CTLR_LUNID, TYPE_CMD);
3255 if (rc)
3256 goto out;
3257
3258 c->Request.CDB[2] = bmic_device_index & 0xff;
3259 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3260
3261 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
Don Bracec448ecf2016-04-27 17:13:51 -05003262 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
Kevin Barnettd04e62b2015-11-04 15:52:34 -06003263 if (rc)
3264 goto out;
3265 ei = c->err_info;
3266 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3267 hpsa_scsi_interpret_error(h, c);
3268 rc = -1;
3269 }
3270out:
3271 cmd_free(h, c);
3272 return rc;
3273}
3274
Scott Teel66749d02015-11-04 15:51:57 -06003275static int hpsa_bmic_id_controller(struct ctlr_info *h,
3276 struct bmic_identify_controller *buf, size_t bufsize)
3277{
3278 int rc = IO_OK;
3279 struct CommandList *c;
3280 struct ErrorInfo *ei;
3281
3282 c = cmd_alloc(h);
3283
3284 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3285 0, RAID_CTLR_LUNID, TYPE_CMD);
3286 if (rc)
3287 goto out;
3288
3289 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
Don Bracec448ecf2016-04-27 17:13:51 -05003290 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
Scott Teel66749d02015-11-04 15:51:57 -06003291 if (rc)
3292 goto out;
3293 ei = c->err_info;
3294 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3295 hpsa_scsi_interpret_error(h, c);
3296 rc = -1;
3297 }
3298out:
3299 cmd_free(h, c);
3300 return rc;
3301}
3302
Don Brace03383732015-01-23 16:43:30 -06003303static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3304 unsigned char scsi3addr[], u16 bmic_device_index,
3305 struct bmic_identify_physical_device *buf, size_t bufsize)
3306{
3307 int rc = IO_OK;
3308 struct CommandList *c;
3309 struct ErrorInfo *ei;
3310
3311 c = cmd_alloc(h);
3312 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3313 0, RAID_CTLR_LUNID, TYPE_CMD);
3314 if (rc)
3315 goto out;
3316
3317 c->Request.CDB[2] = bmic_device_index & 0xff;
3318 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3319
Webb Scales25163bd2015-04-23 09:32:00 -05003320 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
Don Bracec448ecf2016-04-27 17:13:51 -05003321 DEFAULT_TIMEOUT);
Don Brace03383732015-01-23 16:43:30 -06003322 ei = c->err_info;
3323 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3324 hpsa_scsi_interpret_error(h, c);
3325 rc = -1;
3326 }
3327out:
3328 cmd_free(h, c);
Kevin Barnettd04e62b2015-11-04 15:52:34 -06003329
Don Brace03383732015-01-23 16:43:30 -06003330 return rc;
3331}
3332
Don Bracecca8f132015-12-22 10:36:48 -06003333/*
3334 * get enclosure information
3335 * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
3336 * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
3337 * Uses id_physical_device to determine the box_index.
3338 */
3339static void hpsa_get_enclosure_info(struct ctlr_info *h,
3340 unsigned char *scsi3addr,
3341 struct ReportExtendedLUNdata *rlep, int rle_index,
3342 struct hpsa_scsi_dev_t *encl_dev)
3343{
3344 int rc = -1;
3345 struct CommandList *c = NULL;
3346 struct ErrorInfo *ei = NULL;
3347 struct bmic_sense_storage_box_params *bssbp = NULL;
3348 struct bmic_identify_physical_device *id_phys = NULL;
3349 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3350 u16 bmic_device_index = 0;
3351
3352 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3353
Don Brace5ac517b2017-05-04 17:50:50 -05003354 if (encl_dev->target == -1 || encl_dev->lun == -1) {
3355 rc = IO_OK;
3356 goto out;
3357 }
3358
Don Brace17a9e542016-02-23 15:16:09 -06003359 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3360 rc = IO_OK;
Don Bracecca8f132015-12-22 10:36:48 -06003361 goto out;
Don Brace17a9e542016-02-23 15:16:09 -06003362 }
Don Bracecca8f132015-12-22 10:36:48 -06003363
3364 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3365 if (!bssbp)
3366 goto out;
3367
3368 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3369 if (!id_phys)
3370 goto out;
3371
3372 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3373 id_phys, sizeof(*id_phys));
3374 if (rc) {
3375 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3376 __func__, encl_dev->external, bmic_device_index);
3377 goto out;
3378 }
3379
3380 c = cmd_alloc(h);
3381
3382 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3383 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3384
3385 if (rc)
3386 goto out;
3387
3388 if (id_phys->phys_connector[1] == 'E')
3389 c->Request.CDB[5] = id_phys->box_index;
3390 else
3391 c->Request.CDB[5] = 0;
3392
3393 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
Don Bracec448ecf2016-04-27 17:13:51 -05003394 DEFAULT_TIMEOUT);
Don Bracecca8f132015-12-22 10:36:48 -06003395 if (rc)
3396 goto out;
3397
3398 ei = c->err_info;
3399 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3400 rc = -1;
3401 goto out;
3402 }
3403
3404 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3405 memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3406 bssbp->phys_connector, sizeof(bssbp->phys_connector));
3407
3408 rc = IO_OK;
3409out:
3410 kfree(bssbp);
3411 kfree(id_phys);
3412
3413 if (c)
3414 cmd_free(h, c);
3415
3416 if (rc != IO_OK)
3417 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3418 "Error, could not get enclosure information\n");
3419}
3420
Kevin Barnettd04e62b2015-11-04 15:52:34 -06003421static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3422 unsigned char *scsi3addr)
3423{
3424 struct ReportExtendedLUNdata *physdev;
3425 u32 nphysicals;
3426 u64 sa = 0;
3427 int i;
3428
3429 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3430 if (!physdev)
3431 return 0;
3432
3433 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3434 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3435 kfree(physdev);
3436 return 0;
3437 }
3438 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3439
3440 for (i = 0; i < nphysicals; i++)
3441 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3442 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3443 break;
3444 }
3445
3446 kfree(physdev);
3447
3448 return sa;
3449}
3450
3451static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3452 struct hpsa_scsi_dev_t *dev)
3453{
3454 int rc;
3455 u64 sa = 0;
3456
3457 if (is_hba_lunid(scsi3addr)) {
3458 struct bmic_sense_subsystem_info *ssi;
3459
3460 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
Amit Kushwaha7e8a9482016-12-12 16:34:21 +05303461 if (!ssi)
Kevin Barnettd04e62b2015-11-04 15:52:34 -06003462 return;
Kevin Barnettd04e62b2015-11-04 15:52:34 -06003463
3464 rc = hpsa_bmic_sense_subsystem_information(h,
3465 scsi3addr, 0, ssi, sizeof(*ssi));
3466 if (rc == 0) {
3467 sa = get_unaligned_be64(ssi->primary_world_wide_id);
3468 h->sas_address = sa;
3469 }
3470
3471 kfree(ssi);
3472 } else
3473 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3474
3475 dev->sas_address = sa;
3476}
3477
3478/* Get a device id from inquiry page 0x83 */
Scott Teel83832782016-09-09 16:30:29 -05003479static bool hpsa_vpd_page_supported(struct ctlr_info *h,
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06003480 unsigned char scsi3addr[], u8 page)
3481{
3482 int rc;
3483 int i;
3484 int pages;
3485 unsigned char *buf, bufsize;
3486
3487 buf = kzalloc(256, GFP_KERNEL);
3488 if (!buf)
Scott Teel83832782016-09-09 16:30:29 -05003489 return false;
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06003490
3491 /* Get the size of the page list first */
3492 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3493 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3494 buf, HPSA_VPD_HEADER_SZ);
3495 if (rc != 0)
3496 goto exit_unsupported;
3497 pages = buf[3];
3498 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3499 bufsize = pages + HPSA_VPD_HEADER_SZ;
3500 else
3501 bufsize = 255;
3502
3503 /* Get the whole VPD page list */
3504 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3505 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3506 buf, bufsize);
3507 if (rc != 0)
3508 goto exit_unsupported;
3509
3510 pages = buf[3];
3511 for (i = 1; i <= pages; i++)
3512 if (buf[3 + i] == page)
3513 goto exit_supported;
3514exit_unsupported:
3515 kfree(buf);
Scott Teel83832782016-09-09 16:30:29 -05003516 return false;
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06003517exit_supported:
3518 kfree(buf);
Scott Teel83832782016-09-09 16:30:29 -05003519 return true;
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06003520}
3521
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003522static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3523 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3524{
3525 int rc;
3526 unsigned char *buf;
3527 u8 ioaccel_status;
3528
3529 this_device->offload_config = 0;
3530 this_device->offload_enabled = 0;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003531 this_device->offload_to_be_enabled = 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003532
3533 buf = kzalloc(64, GFP_KERNEL);
3534 if (!buf)
3535 return;
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06003536 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3537 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003538 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06003539 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003540 if (rc != 0)
3541 goto out;
3542
3543#define IOACCEL_STATUS_BYTE 4
3544#define OFFLOAD_CONFIGURED_BIT 0x01
3545#define OFFLOAD_ENABLED_BIT 0x02
3546 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3547 this_device->offload_config =
3548 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3549 if (this_device->offload_config) {
3550 this_device->offload_enabled =
3551 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3552 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3553 this_device->offload_enabled = 0;
3554 }
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003555 this_device->offload_to_be_enabled = this_device->offload_enabled;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003556out:
3557 kfree(buf);
3558 return;
3559}
3560
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003561/* Get the device id from inquiry page 0x83 */
3562static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
Don Brace75d23d82015-11-04 15:51:39 -06003563 unsigned char *device_id, int index, int buflen)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003564{
3565 int rc;
3566 unsigned char *buf;
3567
Scott Teel83832782016-09-09 16:30:29 -05003568 /* Does controller have VPD for device id? */
3569 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3570 return 1; /* not supported */
3571
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003572 buf = kzalloc(64, GFP_KERNEL);
3573 if (!buf)
Stephen M. Camerona84d7942014-05-29 10:54:20 -05003574 return -ENOMEM;
Scott Teel83832782016-09-09 16:30:29 -05003575
3576 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3577 HPSA_VPD_LV_DEVICE_ID, buf, 64);
3578 if (rc == 0) {
3579 if (buflen > 16)
3580 buflen = 16;
3581 memcpy(device_id, &buf[8], buflen);
3582 }
Don Brace75d23d82015-11-04 15:51:39 -06003583
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003584 kfree(buf);
Don Brace75d23d82015-11-04 15:51:39 -06003585
Scott Teel83832782016-09-09 16:30:29 -05003586 return rc; /*0 - got id, otherwise, didn't */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003587}
3588
3589static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
Don Brace03383732015-01-23 16:43:30 -06003590 void *buf, int bufsize,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003591 int extended_response)
3592{
3593 int rc = IO_OK;
3594 struct CommandList *c;
3595 unsigned char scsi3addr[8];
3596 struct ErrorInfo *ei;
3597
Stephen Cameron45fcb862015-01-23 16:43:04 -06003598 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05003599
Stephen M. Camerone89c0ae2010-02-04 08:42:04 -06003600 /* address the controller */
3601 memset(scsi3addr, 0, sizeof(scsi3addr));
Stephen M. Camerona2dac132013-02-20 11:24:41 -06003602 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3603 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
Hannes Reinecke45f769b2017-08-15 08:58:07 +02003604 rc = -EAGAIN;
Stephen M. Camerona2dac132013-02-20 11:24:41 -06003605 goto out;
3606 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003607 if (extended_response)
3608 c->Request.CDB[1] = extended_response;
Webb Scales25163bd2015-04-23 09:32:00 -05003609 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
Don Bracec448ecf2016-04-27 17:13:51 -05003610 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
Webb Scales25163bd2015-04-23 09:32:00 -05003611 if (rc)
3612 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003613 ei = c->err_info;
3614 if (ei->CommandStatus != 0 &&
3615 ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06003616 hpsa_scsi_interpret_error(h, c);
Hannes Reinecke45f769b2017-08-15 08:58:07 +02003617 rc = -EIO;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003618 } else {
Don Brace03383732015-01-23 16:43:30 -06003619 struct ReportLUNdata *rld = buf;
3620
3621 if (rld->extended_response_flag != extended_response) {
Hannes Reinecke45f769b2017-08-15 08:58:07 +02003622 if (!h->legacy_board) {
3623 dev_err(&h->pdev->dev,
3624 "report luns requested format %u, got %u\n",
3625 extended_response,
3626 rld->extended_response_flag);
3627 rc = -EINVAL;
3628 } else
3629 rc = -EOPNOTSUPP;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003630 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003631 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06003632out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06003633 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003634 return rc;
3635}
3636
3637static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
Don Brace03383732015-01-23 16:43:30 -06003638 struct ReportExtendedLUNdata *buf, int bufsize)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003639{
Hannes Reinecke2a80d542016-12-02 11:36:13 +01003640 int rc;
3641 struct ReportLUNdata *lbuf;
3642
3643 rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3644 HPSA_REPORT_PHYS_EXTENDED);
Hannes Reinecke45f769b2017-08-15 08:58:07 +02003645 if (!rc || rc != -EOPNOTSUPP)
Hannes Reinecke2a80d542016-12-02 11:36:13 +01003646 return rc;
3647
3648 /* REPORT PHYS EXTENDED is not supported */
3649 lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3650 if (!lbuf)
3651 return -ENOMEM;
3652
3653 rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3654 if (!rc) {
3655 int i;
3656 u32 nphys;
3657
3658 /* Copy ReportLUNdata header */
3659 memcpy(buf, lbuf, 8);
3660 nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3661 for (i = 0; i < nphys; i++)
3662 memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3663 }
3664 kfree(lbuf);
3665 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003666}
3667
3668static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3669 struct ReportLUNdata *buf, int bufsize)
3670{
3671 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3672}
3673
3674static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3675 int bus, int target, int lun)
3676{
3677 device->bus = bus;
3678 device->target = target;
3679 device->lun = lun;
3680}
3681
Stephen M. Cameron98465902014-02-21 16:25:00 -06003682/* Use VPD inquiry to get details of volume status */
3683static int hpsa_get_volume_status(struct ctlr_info *h,
3684 unsigned char scsi3addr[])
3685{
3686 int rc;
3687 int status;
3688 int size;
3689 unsigned char *buf;
3690
3691 buf = kzalloc(64, GFP_KERNEL);
3692 if (!buf)
3693 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3694
3695 /* Does controller have VPD for logical volume status? */
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05003696 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
Stephen M. Cameron98465902014-02-21 16:25:00 -06003697 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06003698
3699 /* Get the size of the VPD return buffer */
3700 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3701 buf, HPSA_VPD_HEADER_SZ);
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05003702 if (rc != 0)
Stephen M. Cameron98465902014-02-21 16:25:00 -06003703 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06003704 size = buf[3];
3705
3706 /* Now get the whole VPD buffer */
3707 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3708 buf, size + HPSA_VPD_HEADER_SZ);
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05003709 if (rc != 0)
Stephen M. Cameron98465902014-02-21 16:25:00 -06003710 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06003711 status = buf[4]; /* status byte */
3712
3713 kfree(buf);
3714 return status;
3715exit_failed:
3716 kfree(buf);
3717 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3718}
3719
3720/* Determine offline status of a volume.
3721 * Return either:
3722 * 0 (not offline)
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05003723 * 0xff (offline for unknown reasons)
Stephen M. Cameron98465902014-02-21 16:25:00 -06003724 * # (integer code indicating one of several NOT READY states
3725 * describing why a volume is to be kept offline)
3726 */
Don Brace85b29002017-03-10 14:35:11 -06003727static unsigned char hpsa_volume_offline(struct ctlr_info *h,
Stephen M. Cameron98465902014-02-21 16:25:00 -06003728 unsigned char scsi3addr[])
3729{
3730 struct CommandList *c;
Stephen Cameron9437ac42015-04-23 09:32:16 -05003731 unsigned char *sense;
3732 u8 sense_key, asc, ascq;
3733 int sense_len;
Webb Scales25163bd2015-04-23 09:32:00 -05003734 int rc, ldstat = 0;
Stephen M. Cameron98465902014-02-21 16:25:00 -06003735 u16 cmd_status;
3736 u8 scsi_status;
3737#define ASC_LUN_NOT_READY 0x04
3738#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3739#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3740
3741 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05003742
Stephen M. Cameron98465902014-02-21 16:25:00 -06003743 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
Don Bracec448ecf2016-04-27 17:13:51 -05003744 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3745 DEFAULT_TIMEOUT);
Webb Scales25163bd2015-04-23 09:32:00 -05003746 if (rc) {
3747 cmd_free(h, c);
Don Brace85b29002017-03-10 14:35:11 -06003748 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
Webb Scales25163bd2015-04-23 09:32:00 -05003749 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06003750 sense = c->err_info->SenseInfo;
Stephen Cameron9437ac42015-04-23 09:32:16 -05003751 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3752 sense_len = sizeof(c->err_info->SenseInfo);
3753 else
3754 sense_len = c->err_info->SenseLen;
3755 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
Stephen M. Cameron98465902014-02-21 16:25:00 -06003756 cmd_status = c->err_info->CommandStatus;
3757 scsi_status = c->err_info->ScsiStatus;
3758 cmd_free(h, c);
Stephen M. Cameron98465902014-02-21 16:25:00 -06003759
3760 /* Determine the reason for not ready state */
3761 ldstat = hpsa_get_volume_status(h, scsi3addr);
3762
3763 /* Keep volume offline in certain cases: */
3764 switch (ldstat) {
Don Brace85b29002017-03-10 14:35:11 -06003765 case HPSA_LV_FAILED:
Stephen M. Cameron98465902014-02-21 16:25:00 -06003766 case HPSA_LV_UNDERGOING_ERASE:
Scott Benesh5ca01202015-07-18 11:13:04 -05003767 case HPSA_LV_NOT_AVAILABLE:
Stephen M. Cameron98465902014-02-21 16:25:00 -06003768 case HPSA_LV_UNDERGOING_RPI:
3769 case HPSA_LV_PENDING_RPI:
3770 case HPSA_LV_ENCRYPTED_NO_KEY:
3771 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3772 case HPSA_LV_UNDERGOING_ENCRYPTION:
3773 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3774 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3775 return ldstat;
3776 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3777 /* If VPD status page isn't available,
3778 * use ASC/ASCQ to determine state
3779 */
3780 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3781 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3782 return ldstat;
3783 break;
3784 default:
3785 break;
3786 }
Don Brace85b29002017-03-10 14:35:11 -06003787 return HPSA_LV_OK;
Stephen M. Cameron98465902014-02-21 16:25:00 -06003788}
3789
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003790static int hpsa_update_device_info(struct ctlr_info *h,
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003791 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3792 unsigned char *is_OBDR_device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003793{
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003794
3795#define OBDR_SIG_OFFSET 43
3796#define OBDR_TAPE_SIG "$DR-10"
3797#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3798#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3799
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06003800 unsigned char *inq_buff;
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003801 unsigned char *obdr_sig;
Don Brace683fc442015-11-04 15:50:44 -06003802 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003803
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06003804 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
Don Brace683fc442015-11-04 15:50:44 -06003805 if (!inq_buff) {
3806 rc = -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003807 goto bail_out;
Don Brace683fc442015-11-04 15:50:44 -06003808 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003809
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003810 /* Do an inquiry to the device to see what it is. */
3811 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3812 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003813 dev_err(&h->pdev->dev,
Don Brace85b29002017-03-10 14:35:11 -06003814 "%s: inquiry failed, device will be skipped.\n",
3815 __func__);
3816 rc = HPSA_INQUIRY_FAILED;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003817 goto bail_out;
3818 }
3819
Don Brace4af61e42016-02-23 15:16:40 -06003820 scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3821 scsi_sanitize_inquiry_string(&inq_buff[16], 16);
Don Brace75d23d82015-11-04 15:51:39 -06003822
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003823 this_device->devtype = (inq_buff[0] & 0x1f);
3824 memcpy(this_device->scsi3addr, scsi3addr, 8);
3825 memcpy(this_device->vendor, &inq_buff[8],
3826 sizeof(this_device->vendor));
3827 memcpy(this_device->model, &inq_buff[16],
3828 sizeof(this_device->model));
Hannes Reinecke7630b3a2016-11-17 12:15:56 +01003829 this_device->rev = inq_buff[2];
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003830 memset(this_device->device_id, 0,
3831 sizeof(this_device->device_id));
Scott Teel83832782016-09-09 16:30:29 -05003832 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
Hannes Reinecke65e8697e2017-08-15 08:58:06 +02003833 sizeof(this_device->device_id) < 0))
Scott Teel83832782016-09-09 16:30:29 -05003834 dev_err(&h->pdev->dev,
3835 "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
3836 h->ctlr, __func__,
3837 h->scsi_host->host_no,
3838 this_device->target, this_device->lun,
3839 scsi_device_type(this_device->devtype),
3840 this_device->model);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003841
Don Braceaf15ed32016-02-23 15:16:15 -06003842 if ((this_device->devtype == TYPE_DISK ||
3843 this_device->devtype == TYPE_ZBC) &&
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003844 is_logical_dev_addr_mode(scsi3addr)) {
Don Brace85b29002017-03-10 14:35:11 -06003845 unsigned char volume_offline;
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05003846
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003847 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003848 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3849 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05003850 volume_offline = hpsa_volume_offline(h, scsi3addr);
Hannes Reinecke4d179442017-08-15 08:58:05 +02003851 if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
3852 h->legacy_board) {
3853 /*
3854 * Legacy boards might not support volume status
3855 */
3856 dev_info(&h->pdev->dev,
3857 "C0:T%d:L%d Volume status not available, assuming online.\n",
3858 this_device->target, this_device->lun);
3859 volume_offline = 0;
3860 }
Tomas Henzleb945882017-03-20 16:42:48 +01003861 this_device->volume_offline = volume_offline;
Don Brace85b29002017-03-10 14:35:11 -06003862 if (volume_offline == HPSA_LV_FAILED) {
3863 rc = HPSA_LV_FAILED;
3864 dev_err(&h->pdev->dev,
3865 "%s: LV failed, device will be skipped.\n",
3866 __func__);
3867 goto bail_out;
3868 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003869 } else {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003870 this_device->raid_level = RAID_UNKNOWN;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003871 this_device->offload_config = 0;
3872 this_device->offload_enabled = 0;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003873 this_device->offload_to_be_enabled = 0;
Joe Handzika3144e02015-04-23 09:32:59 -05003874 this_device->hba_ioaccel_enabled = 0;
Stephen M. Cameron98465902014-02-21 16:25:00 -06003875 this_device->volume_offline = 0;
Don Brace03383732015-01-23 16:43:30 -06003876 this_device->queue_depth = h->nr_cmds;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003877 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003878
Don Brace50864352017-05-04 17:51:28 -05003879 if (this_device->external)
3880 this_device->queue_depth = EXTERNAL_QD;
3881
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003882 if (is_OBDR_device) {
3883 /* See if this is a One-Button-Disaster-Recovery device
3884 * by looking for "$DR-10" at offset 43 in inquiry data.
3885 */
3886 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3887 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3888 strncmp(obdr_sig, OBDR_TAPE_SIG,
3889 OBDR_SIG_LEN) == 0);
3890 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003891 kfree(inq_buff);
3892 return 0;
3893
3894bail_out:
3895 kfree(inq_buff);
Don Brace683fc442015-11-04 15:50:44 -06003896 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003897}
3898
Kevin Barnettc7955052015-11-04 15:51:45 -06003899/*
3900 * Helper function to assign bus, target, lun mapping of devices.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003901 * Logical drive target and lun are assigned at this time, but
3902 * physical device lun and target assignment are deferred (assigned
3903 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
Kevin Barnettc7955052015-11-04 15:51:45 -06003904*/
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003905static void figure_bus_target_lun(struct ctlr_info *h,
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003906 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003907{
Kevin Barnettc7955052015-11-04 15:51:45 -06003908 u32 lunid = get_unaligned_le32(lunaddrbytes);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003909
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003910 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3911 /* physical device, target and lun filled in later */
Hannes Reinecke7630b3a2016-11-17 12:15:56 +01003912 if (is_hba_lunid(lunaddrbytes)) {
3913 int bus = HPSA_HBA_BUS;
3914
3915 if (!device->rev)
3916 bus = HPSA_LEGACY_HBA_BUS;
Kevin Barnettc7955052015-11-04 15:51:45 -06003917 hpsa_set_bus_target_lun(device,
Hannes Reinecke7630b3a2016-11-17 12:15:56 +01003918 bus, 0, lunid & 0x3fff);
3919 } else
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003920 /* defer target, lun assignment for physical devices */
Kevin Barnettc7955052015-11-04 15:51:45 -06003921 hpsa_set_bus_target_lun(device,
3922 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003923 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003924 }
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003925 /* It's a logical device */
Scott Teel66749d02015-11-04 15:51:57 -06003926 if (device->external) {
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003927 hpsa_set_bus_target_lun(device,
Kevin Barnettc7955052015-11-04 15:51:45 -06003928 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
3929 lunid & 0x00ff);
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003930 return;
3931 }
Kevin Barnettc7955052015-11-04 15:51:45 -06003932 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
3933 0, lunid & 0x3fff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003934}
3935
Scott Teel66749d02015-11-04 15:51:57 -06003936static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
3937 int i, int nphysicals, int nlocal_logicals)
3938{
3939 /* In report logicals, local logicals are listed first,
3940 * then any externals.
3941 */
3942 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3943
3944 if (i == raid_ctlr_position)
3945 return 0;
3946
3947 if (i < logicals_start)
3948 return 0;
3949
3950 /* i is in logicals range, but still within local logicals */
3951 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
3952 return 0;
3953
3954 return 1; /* it's an external lun */
3955}
3956
Scott Teel54b6e9e2014-02-18 13:56:45 -06003957/*
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003958 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3959 * logdev. The number of luns in physdev and logdev are returned in
3960 * *nphysicals and *nlogicals, respectively.
3961 * Returns 0 on success, -1 otherwise.
3962 */
3963static int hpsa_gather_lun_info(struct ctlr_info *h,
Don Brace03383732015-01-23 16:43:30 -06003964 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003965 struct ReportLUNdata *logdev, u32 *nlogicals)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003966{
Don Brace03383732015-01-23 16:43:30 -06003967 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003968 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3969 return -1;
3970 }
Don Brace03383732015-01-23 16:43:30 -06003971 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003972 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
Don Brace03383732015-01-23 16:43:30 -06003973 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3974 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003975 *nphysicals = HPSA_MAX_PHYS_LUN;
3976 }
Don Brace03383732015-01-23 16:43:30 -06003977 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003978 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3979 return -1;
3980 }
Stephen M. Cameron6df1e952010-02-04 08:42:19 -06003981 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003982 /* Reject Logicals in excess of our max capability. */
3983 if (*nlogicals > HPSA_MAX_LUN) {
3984 dev_warn(&h->pdev->dev,
3985 "maximum logical LUNs (%d) exceeded. "
3986 "%d LUNs ignored.\n", HPSA_MAX_LUN,
3987 *nlogicals - HPSA_MAX_LUN);
3988 *nlogicals = HPSA_MAX_LUN;
3989 }
3990 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3991 dev_warn(&h->pdev->dev,
3992 "maximum logical + physical LUNs (%d) exceeded. "
3993 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3994 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3995 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3996 }
3997 return 0;
3998}
3999
Don Brace42a91642014-11-14 17:26:27 -06004000static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4001 int i, int nphysicals, int nlogicals,
Matt Gatesa93aa1f2014-02-18 13:55:07 -06004002 struct ReportExtendedLUNdata *physdev_list,
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06004003 struct ReportLUNdata *logdev_list)
4004{
4005 /* Helper function, figure out where the LUN ID info is coming from
4006 * given index i, lists of physical and logical devices, where in
4007 * the list the raid controller is supposed to appear (first or last)
4008 */
4009
4010 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4011 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4012
4013 if (i == raid_ctlr_position)
4014 return RAID_CTLR_LUNID;
4015
4016 if (i < logicals_start)
Stephen M. Camerond5b5d962014-05-29 10:53:34 -05004017 return &physdev_list->LUN[i -
4018 (raid_ctlr_position == 0)].lunid[0];
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06004019
4020 if (i < last_device)
4021 return &logdev_list->LUN[i - nphysicals -
4022 (raid_ctlr_position == 0)][0];
4023 BUG();
4024 return NULL;
4025}
4026
Don Brace03383732015-01-23 16:43:30 -06004027/* get physical drive ioaccel handle and queue depth */
4028static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4029 struct hpsa_scsi_dev_t *dev,
Don Bracef2039b02015-11-04 15:51:08 -06004030 struct ReportExtendedLUNdata *rlep, int rle_index,
Don Brace03383732015-01-23 16:43:30 -06004031 struct bmic_identify_physical_device *id_phys)
4032{
4033 int rc;
Scott Teel4b6e5592016-09-09 16:30:36 -05004034 struct ext_report_lun_entry *rle;
4035
Scott Teel4b6e5592016-09-09 16:30:36 -05004036 rle = &rlep->LUN[rle_index];
Don Brace03383732015-01-23 16:43:30 -06004037
4038 dev->ioaccel_handle = rle->ioaccel_handle;
Don Bracef2039b02015-11-04 15:51:08 -06004039 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
Joe Handzika3144e02015-04-23 09:32:59 -05004040 dev->hba_ioaccel_enabled = 1;
Don Brace03383732015-01-23 16:43:30 -06004041 memset(id_phys, 0, sizeof(*id_phys));
Don Bracef2039b02015-11-04 15:51:08 -06004042 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4043 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
Don Brace03383732015-01-23 16:43:30 -06004044 sizeof(*id_phys));
4045 if (!rc)
4046 /* Reserve space for FW operations */
4047#define DRIVE_CMDS_RESERVED_FOR_FW 2
4048#define DRIVE_QUEUE_DEPTH 7
4049 dev->queue_depth =
4050 le16_to_cpu(id_phys->current_queue_depth_limit) -
4051 DRIVE_CMDS_RESERVED_FOR_FW;
4052 else
4053 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
Don Brace03383732015-01-23 16:43:30 -06004054}
4055
Joe Handzik8270b862015-07-18 11:12:43 -05004056static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
Don Bracef2039b02015-11-04 15:51:08 -06004057 struct ReportExtendedLUNdata *rlep, int rle_index,
Joe Handzik8270b862015-07-18 11:12:43 -05004058 struct bmic_identify_physical_device *id_phys)
4059{
Don Bracef2039b02015-11-04 15:51:08 -06004060 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
4061
4062 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
Joe Handzik8270b862015-07-18 11:12:43 -05004063 this_device->hba_ioaccel_enabled = 1;
4064
4065 memcpy(&this_device->active_path_index,
4066 &id_phys->active_path_number,
4067 sizeof(this_device->active_path_index));
4068 memcpy(&this_device->path_map,
4069 &id_phys->redundant_path_present_map,
4070 sizeof(this_device->path_map));
4071 memcpy(&this_device->box,
4072 &id_phys->alternate_paths_phys_box_on_port,
4073 sizeof(this_device->box));
4074 memcpy(&this_device->phys_connector,
4075 &id_phys->alternate_paths_phys_connector,
4076 sizeof(this_device->phys_connector));
4077 memcpy(&this_device->bay,
4078 &id_phys->phys_bay_in_box,
4079 sizeof(this_device->bay));
4080}
4081
Scott Teel66749d02015-11-04 15:51:57 -06004082/* get number of local logical disks. */
4083static int hpsa_set_local_logical_count(struct ctlr_info *h,
4084 struct bmic_identify_controller *id_ctlr,
4085 u32 *nlocals)
4086{
4087 int rc;
4088
4089 if (!id_ctlr) {
4090 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4091 __func__);
4092 return -ENOMEM;
4093 }
4094 memset(id_ctlr, 0, sizeof(*id_ctlr));
4095 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4096 if (!rc)
4097 if (id_ctlr->configured_logical_drive_count < 256)
4098 *nlocals = id_ctlr->configured_logical_drive_count;
4099 else
4100 *nlocals = le16_to_cpu(
4101 id_ctlr->extended_logical_unit_count);
4102 else
4103 *nlocals = -1;
4104 return rc;
4105}
4106
Don Brace64ce60c2016-07-01 13:37:31 -05004107static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4108{
4109 struct bmic_identify_physical_device *id_phys;
4110 bool is_spare = false;
4111 int rc;
4112
4113 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4114 if (!id_phys)
4115 return false;
4116
4117 rc = hpsa_bmic_id_physical_device(h,
4118 lunaddrbytes,
4119 GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4120 id_phys, sizeof(*id_phys));
4121 if (rc == 0)
4122 is_spare = (id_phys->more_flags >> 6) & 0x01;
4123
4124 kfree(id_phys);
4125 return is_spare;
4126}
4127
4128#define RPL_DEV_FLAG_NON_DISK 0x1
4129#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
4130#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
4131
4132#define BMIC_DEVICE_TYPE_ENCLOSURE 6
4133
4134static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4135 struct ext_report_lun_entry *rle)
4136{
4137 u8 device_flags;
4138 u8 device_type;
4139
4140 if (!MASKED_DEVICE(lunaddrbytes))
4141 return false;
4142
4143 device_flags = rle->device_flags;
4144 device_type = rle->device_type;
4145
4146 if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4147 if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4148 return false;
4149 return true;
4150 }
4151
4152 if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4153 return false;
4154
4155 if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4156 return false;
4157
4158 /*
4159 * Spares may be spun down, we do not want to
4160 * do an Inquiry to a RAID set spare drive as
4161 * that would have them spun up, that is a
4162 * performance hit because I/O to the RAID device
4163 * stops while the spin up occurs which can take
4164 * over 50 seconds.
4165 */
4166 if (hpsa_is_disk_spare(h, lunaddrbytes))
4167 return true;
4168
4169 return false;
4170}
Scott Teel66749d02015-11-04 15:51:57 -06004171
Don Brace8aa60682015-11-04 15:50:01 -06004172static void hpsa_update_scsi_devices(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004173{
4174 /* the idea here is we could get notified
4175 * that some devices have changed, so we do a report
4176 * physical luns and report logical luns cmd, and adjust
4177 * our list of devices accordingly.
4178 *
4179 * The scsi3addr's of devices won't change so long as the
4180 * adapter is not reset. That means we can rescan and
4181 * tell which devices we already know about, vs. new
4182 * devices, vs. disappearing devices.
4183 */
Matt Gatesa93aa1f2014-02-18 13:55:07 -06004184 struct ReportExtendedLUNdata *physdev_list = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004185 struct ReportLUNdata *logdev_list = NULL;
Don Brace03383732015-01-23 16:43:30 -06004186 struct bmic_identify_physical_device *id_phys = NULL;
Scott Teel66749d02015-11-04 15:51:57 -06004187 struct bmic_identify_controller *id_ctlr = NULL;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06004188 u32 nphysicals = 0;
4189 u32 nlogicals = 0;
Scott Teel66749d02015-11-04 15:51:57 -06004190 u32 nlocal_logicals = 0;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06004191 u32 ndev_allocated = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004192 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4193 int ncurrent = 0;
Scott Teel4f4eb9f2012-01-19 14:01:25 -06004194 int i, n_ext_target_devs, ndevs_to_allocate;
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06004195 int raid_ctlr_position;
Kevin Barnett04fa2f42015-11-04 15:51:27 -06004196 bool physical_device;
Scott Teelaca4a522012-01-19 14:01:19 -06004197 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004198
Scott Teelcfe5bad2011-10-26 16:21:07 -05004199 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameron92084712014-11-14 17:26:54 -06004200 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4201 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004202 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
Don Brace03383732015-01-23 16:43:30 -06004203 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
Scott Teel66749d02015-11-04 15:51:57 -06004204 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004205
Don Brace03383732015-01-23 16:43:30 -06004206 if (!currentsd || !physdev_list || !logdev_list ||
Scott Teel66749d02015-11-04 15:51:57 -06004207 !tmpdevice || !id_phys || !id_ctlr) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004208 dev_err(&h->pdev->dev, "out of memory\n");
4209 goto out;
4210 }
4211 memset(lunzerobits, 0, sizeof(lunzerobits));
4212
Don Brace853633e2015-11-04 15:50:37 -06004213 h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
4214
Don Brace03383732015-01-23 16:43:30 -06004215 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
Don Brace853633e2015-11-04 15:50:37 -06004216 logdev_list, &nlogicals)) {
4217 h->drv_req_rescan = 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004218 goto out;
Don Brace853633e2015-11-04 15:50:37 -06004219 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004220
Scott Teel66749d02015-11-04 15:51:57 -06004221 /* Set number of local logicals (non PTRAID) */
4222 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4223 dev_warn(&h->pdev->dev,
4224 "%s: Can't determine number of local logical devices.\n",
4225 __func__);
4226 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004227
Scott Teelaca4a522012-01-19 14:01:19 -06004228 /* We might see up to the maximum number of logical and physical disks
4229 * plus external target devices, and a device for the local RAID
4230 * controller.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004231 */
Scott Teelaca4a522012-01-19 14:01:19 -06004232 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004233
4234 /* Allocate the per device structures */
4235 for (i = 0; i < ndevs_to_allocate; i++) {
Scott Teelb7ec0212011-10-26 16:21:12 -05004236 if (i >= HPSA_MAX_DEVICES) {
4237 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4238 " %d devices ignored.\n", HPSA_MAX_DEVICES,
4239 ndevs_to_allocate - HPSA_MAX_DEVICES);
4240 break;
4241 }
4242
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004243 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4244 if (!currentsd[i]) {
Don Brace853633e2015-11-04 15:50:37 -06004245 h->drv_req_rescan = 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004246 goto out;
4247 }
4248 ndev_allocated++;
4249 }
4250
Stephen M. Cameron86452912014-05-29 10:53:49 -05004251 if (is_scsi_rev_5(h))
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06004252 raid_ctlr_position = 0;
4253 else
4254 raid_ctlr_position = nphysicals + nlogicals;
4255
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004256 /* adjust our table of devices */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06004257 n_ext_target_devs = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004258 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05004259 u8 *lunaddrbytes, is_OBDR = 0;
Don Brace683fc442015-11-04 15:50:44 -06004260 int rc = 0;
Don Bracef2039b02015-11-04 15:51:08 -06004261 int phys_dev_index = i - (raid_ctlr_position == 0);
Don Brace64ce60c2016-07-01 13:37:31 -05004262 bool skip_device = false;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004263
Kevin Barnett04fa2f42015-11-04 15:51:27 -06004264 physical_device = i < nphysicals + (raid_ctlr_position == 0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004265
4266 /* Figure out where the LUN ID info is coming from */
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06004267 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4268 i, nphysicals, nlogicals, physdev_list, logdev_list);
Stephen Cameron41ce4c32015-04-23 09:31:47 -05004269
Don Brace86cf7132016-09-09 16:30:17 -05004270 /* Determine if this is a lun from an external target array */
4271 tmpdevice->external =
4272 figure_external_status(h, raid_ctlr_position, i,
4273 nphysicals, nlocal_logicals);
4274
Don Brace64ce60c2016-07-01 13:37:31 -05004275 /*
4276 * Skip over some devices such as a spare.
4277 */
4278 if (!tmpdevice->external && physical_device) {
4279 skip_device = hpsa_skip_device(h, lunaddrbytes,
4280 &physdev_list->LUN[phys_dev_index]);
4281 if (skip_device)
4282 continue;
4283 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004284
4285 /* Get device type, vendor, model, device id */
Don Brace683fc442015-11-04 15:50:44 -06004286 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4287 &is_OBDR);
4288 if (rc == -ENOMEM) {
4289 dev_warn(&h->pdev->dev,
4290 "Out of memory, rescan deferred.\n");
Don Brace853633e2015-11-04 15:50:37 -06004291 h->drv_req_rescan = 1;
Don Brace683fc442015-11-04 15:50:44 -06004292 goto out;
Don Brace853633e2015-11-04 15:50:37 -06004293 }
Don Brace683fc442015-11-04 15:50:44 -06004294 if (rc) {
Don Brace85b29002017-03-10 14:35:11 -06004295 h->drv_req_rescan = 1;
Don Brace683fc442015-11-04 15:50:44 -06004296 continue;
4297 }
4298
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06004299 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004300 this_device = currentsd[ncurrent];
4301
Scott Teel34592252015-11-04 15:52:09 -06004302 /* Turn on discovery_polling if there are ext target devices.
4303 * Event-based change notification is unreliable for those.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004304 */
Scott Teel34592252015-11-04 15:52:09 -06004305 if (!h->discovery_polling) {
4306 if (tmpdevice->external) {
4307 h->discovery_polling = 1;
4308 dev_info(&h->pdev->dev,
4309 "External target, activate discovery polling.\n");
4310 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004311 }
4312
Scott Teel34592252015-11-04 15:52:09 -06004313
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004314 *this_device = *tmpdevice;
Kevin Barnett04fa2f42015-11-04 15:51:27 -06004315 this_device->physical_device = physical_device;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004316
Kevin Barnett04fa2f42015-11-04 15:51:27 -06004317 /*
4318 * Expose all devices except for physical devices that
4319 * are masked.
4320 */
4321 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
Kevin Barnett2a168202015-11-04 15:51:21 -06004322 this_device->expose_device = 0;
4323 else
4324 this_device->expose_device = 1;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05004325
Kevin Barnettd04e62b2015-11-04 15:52:34 -06004326
4327 /*
4328 * Get the SAS address for physical devices that are exposed.
4329 */
4330 if (this_device->physical_device && this_device->expose_device)
4331 hpsa_get_sas_address(h, lunaddrbytes, this_device);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004332
4333 switch (this_device->devtype) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05004334 case TYPE_ROM:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004335 /* We don't *really* support actual CD-ROM devices,
4336 * just "One Button Disaster Recovery" tape drive
4337 * which temporarily pretends to be a CD-ROM drive.
4338 * So we check that the device is really an OBDR tape
4339 * device by checking for "$DR-10" in bytes 43-48 of
4340 * the inquiry data.
4341 */
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05004342 if (is_OBDR)
4343 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004344 break;
4345 case TYPE_DISK:
Don Braceaf15ed32016-02-23 15:16:15 -06004346 case TYPE_ZBC:
Kevin Barnett04fa2f42015-11-04 15:51:27 -06004347 if (this_device->physical_device) {
Kevin Barnettb9092b72015-07-18 11:12:59 -05004348 /* The disk is in HBA mode. */
4349 /* Never use RAID mapper in HBA mode. */
Stephen M. Cameron316b2212014-02-21 16:25:15 -06004350 this_device->offload_enabled = 0;
Kevin Barnettb9092b72015-07-18 11:12:59 -05004351 hpsa_get_ioaccel_drive_info(h, this_device,
Don Bracef2039b02015-11-04 15:51:08 -06004352 physdev_list, phys_dev_index, id_phys);
4353 hpsa_get_path_info(this_device,
4354 physdev_list, phys_dev_index, id_phys);
Kevin Barnettb9092b72015-07-18 11:12:59 -05004355 }
Joe Handzikecf418d12015-04-23 09:33:04 -05004356 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004357 break;
4358 case TYPE_TAPE:
4359 case TYPE_MEDIUM_CHANGER:
Don Bracecca8f132015-12-22 10:36:48 -06004360 ncurrent++;
4361 break;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05004362 case TYPE_ENCLOSURE:
Don Brace17a9e542016-02-23 15:16:09 -06004363 if (!this_device->external)
4364 hpsa_get_enclosure_info(h, lunaddrbytes,
Don Bracecca8f132015-12-22 10:36:48 -06004365 physdev_list, phys_dev_index,
4366 this_device);
Kevin Barnettb9092b72015-07-18 11:12:59 -05004367 ncurrent++;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05004368 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004369 case TYPE_RAID:
4370 /* Only present the Smartarray HBA as a RAID controller.
4371 * If it's a RAID controller other than the HBA itself
4372 * (an external RAID controller, MSA500 or similar)
4373 * don't present it.
4374 */
4375 if (!is_hba_lunid(lunaddrbytes))
4376 break;
4377 ncurrent++;
4378 break;
4379 default:
4380 break;
4381 }
Scott Teelcfe5bad2011-10-26 16:21:07 -05004382 if (ncurrent >= HPSA_MAX_DEVICES)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004383 break;
4384 }
Kevin Barnettd04e62b2015-11-04 15:52:34 -06004385
4386 if (h->sas_host == NULL) {
4387 int rc = 0;
4388
4389 rc = hpsa_add_sas_host(h);
4390 if (rc) {
4391 dev_warn(&h->pdev->dev,
4392 "Could not add sas host %d\n", rc);
4393 goto out;
4394 }
4395 }
4396
Don Brace8aa60682015-11-04 15:50:01 -06004397 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004398out:
4399 kfree(tmpdevice);
4400 for (i = 0; i < ndev_allocated; i++)
4401 kfree(currentsd[i]);
4402 kfree(currentsd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004403 kfree(physdev_list);
4404 kfree(logdev_list);
Scott Teel66749d02015-11-04 15:51:57 -06004405 kfree(id_ctlr);
Don Brace03383732015-01-23 16:43:30 -06004406 kfree(id_phys);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004407}
4408
Webb Scalesec5cbf02015-01-23 16:44:45 -06004409static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4410 struct scatterlist *sg)
4411{
4412 u64 addr64 = (u64) sg_dma_address(sg);
4413 unsigned int len = sg_dma_len(sg);
4414
4415 desc->Addr = cpu_to_le64(addr64);
4416 desc->Len = cpu_to_le32(len);
4417 desc->Ext = 0;
4418}
4419
Webb Scalesc7ee65b2015-01-23 16:42:17 -06004420/*
4421 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004422 * dma mapping and fills in the scatter gather entries of the
4423 * hpsa command, cp.
4424 */
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06004425static int hpsa_scatter_gather(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004426 struct CommandList *cp,
4427 struct scsi_cmnd *cmd)
4428{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004429 struct scatterlist *sg;
Webb Scalesb3a7ba72015-04-23 09:34:27 -05004430 int use_sg, i, sg_limit, chained, last_sg;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06004431 struct SGDescriptor *curr_sg;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004432
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06004433 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004434
4435 use_sg = scsi_dma_map(cmd);
4436 if (use_sg < 0)
4437 return use_sg;
4438
4439 if (!use_sg)
4440 goto sglist_finished;
4441
Webb Scalesb3a7ba72015-04-23 09:34:27 -05004442 /*
4443 * If the number of entries is greater than the max for a single list,
4444 * then we have a chained list; we will set up all but one entry in the
4445 * first list (the last entry is saved for link information);
4446 * otherwise, we don't have a chained list and we'll set up at each of
4447 * the entries in the one list.
4448 */
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06004449 curr_sg = cp->SG;
Webb Scalesb3a7ba72015-04-23 09:34:27 -05004450 chained = use_sg > h->max_cmd_sg_entries;
4451 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4452 last_sg = scsi_sg_count(cmd) - 1;
4453 scsi_for_each_sg(cmd, sg, sg_limit, i) {
Webb Scalesec5cbf02015-01-23 16:44:45 -06004454 hpsa_set_sg_descriptor(curr_sg, sg);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06004455 curr_sg++;
4456 }
Webb Scalesec5cbf02015-01-23 16:44:45 -06004457
Webb Scalesb3a7ba72015-04-23 09:34:27 -05004458 if (chained) {
4459 /*
4460 * Continue with the chained list. Set curr_sg to the chained
4461 * list. Modify the limit to the total count less the entries
4462 * we've already set up. Resume the scan at the list entry
4463 * where the previous loop left off.
4464 */
4465 curr_sg = h->cmd_sg_list[cp->cmdindex];
4466 sg_limit = use_sg - sg_limit;
4467 for_each_sg(sg, sg, sg_limit, i) {
4468 hpsa_set_sg_descriptor(curr_sg, sg);
4469 curr_sg++;
4470 }
4471 }
4472
Webb Scalesec5cbf02015-01-23 16:44:45 -06004473 /* Back the pointer up to the last entry and mark it as "last". */
Webb Scalesb3a7ba72015-04-23 09:34:27 -05004474 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06004475
4476 if (use_sg + chained > h->maxSG)
4477 h->maxSG = use_sg + chained;
4478
4479 if (chained) {
4480 cp->Header.SGList = h->max_cmd_sg_entries;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004481 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06004482 if (hpsa_map_sg_chain_block(h, cp)) {
4483 scsi_dma_unmap(cmd);
4484 return -1;
4485 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06004486 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004487 }
4488
4489sglist_finished:
4490
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06004491 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
Webb Scalesc7ee65b2015-01-23 16:42:17 -06004492 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004493 return 0;
4494}
4495
Don Braceb63c64a2017-05-04 17:51:42 -05004496#define BUFLEN 128
4497static inline void warn_zero_length_transfer(struct ctlr_info *h,
4498 u8 *cdb, int cdb_len,
4499 const char *func)
4500{
4501 char buf[BUFLEN];
4502 int outlen;
4503 int i;
4504
4505 outlen = scnprintf(buf, BUFLEN,
4506 "%s: Blocking zero-length request: CDB:", func);
4507 for (i = 0; i < cdb_len; i++)
4508 outlen += scnprintf(buf+outlen, BUFLEN - outlen,
4509 "%02hhx", cdb[i]);
4510 dev_warn(&h->pdev->dev, "%s\n", buf);
4511}
4512
4513#define IO_ACCEL_INELIGIBLE 1
4514/* zero-length transfers trigger hardware errors. */
4515static bool is_zero_length_transfer(u8 *cdb)
4516{
4517 u32 block_cnt;
4518
4519 /* Block zero-length transfer sizes on certain commands. */
4520 switch (cdb[0]) {
4521 case READ_10:
4522 case WRITE_10:
4523 case VERIFY: /* 0x2F */
4524 case WRITE_VERIFY: /* 0x2E */
4525 block_cnt = get_unaligned_be16(&cdb[7]);
4526 break;
4527 case READ_12:
4528 case WRITE_12:
4529 case VERIFY_12: /* 0xAF */
4530 case WRITE_VERIFY_12: /* 0xAE */
4531 block_cnt = get_unaligned_be32(&cdb[6]);
4532 break;
4533 case READ_16:
4534 case WRITE_16:
4535 case VERIFY_16: /* 0x8F */
4536 block_cnt = get_unaligned_be32(&cdb[10]);
4537 break;
4538 default:
4539 return false;
4540 }
4541
4542 return block_cnt == 0;
4543}
4544
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004545static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4546{
4547 int is_write = 0;
4548 u32 block;
4549 u32 block_cnt;
4550
4551 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4552 switch (cdb[0]) {
4553 case WRITE_6:
4554 case WRITE_12:
4555 is_write = 1;
4556 case READ_6:
4557 case READ_12:
4558 if (*cdb_len == 6) {
Mahesh Rajashekharaabbada72016-09-16 14:54:23 -05004559 block = (((cdb[1] & 0x1F) << 16) |
4560 (cdb[2] << 8) |
4561 cdb[3]);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004562 block_cnt = cdb[4];
Don Bracec8a6c9a2015-11-04 15:50:50 -06004563 if (block_cnt == 0)
4564 block_cnt = 256;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004565 } else {
4566 BUG_ON(*cdb_len != 12);
Don Bracec8a6c9a2015-11-04 15:50:50 -06004567 block = get_unaligned_be32(&cdb[2]);
4568 block_cnt = get_unaligned_be32(&cdb[6]);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004569 }
4570 if (block_cnt > 0xffff)
4571 return IO_ACCEL_INELIGIBLE;
4572
4573 cdb[0] = is_write ? WRITE_10 : READ_10;
4574 cdb[1] = 0;
4575 cdb[2] = (u8) (block >> 24);
4576 cdb[3] = (u8) (block >> 16);
4577 cdb[4] = (u8) (block >> 8);
4578 cdb[5] = (u8) (block);
4579 cdb[6] = 0;
4580 cdb[7] = (u8) (block_cnt >> 8);
4581 cdb[8] = (u8) (block_cnt);
4582 cdb[9] = 0;
4583 *cdb_len = 10;
4584 break;
4585 }
4586 return 0;
4587}
4588
Scott Teelc3497752014-02-18 13:56:34 -06004589static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004590 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -06004591 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
Matt Gatese1f7de02014-02-18 13:55:17 -06004592{
4593 struct scsi_cmnd *cmd = c->scsi_cmd;
Matt Gatese1f7de02014-02-18 13:55:17 -06004594 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4595 unsigned int len;
4596 unsigned int total_len = 0;
4597 struct scatterlist *sg;
4598 u64 addr64;
4599 int use_sg, i;
4600 struct SGDescriptor *curr_sg;
4601 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4602
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004603 /* TODO: implement chaining support */
Don Brace03383732015-01-23 16:43:30 -06004604 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4605 atomic_dec(&phys_disk->ioaccel_cmds_out);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004606 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06004607 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004608
Matt Gatese1f7de02014-02-18 13:55:17 -06004609 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4610
Don Braceb63c64a2017-05-04 17:51:42 -05004611 if (is_zero_length_transfer(cdb)) {
4612 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4613 atomic_dec(&phys_disk->ioaccel_cmds_out);
4614 return IO_ACCEL_INELIGIBLE;
4615 }
4616
Don Brace03383732015-01-23 16:43:30 -06004617 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4618 atomic_dec(&phys_disk->ioaccel_cmds_out);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004619 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06004620 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004621
Matt Gatese1f7de02014-02-18 13:55:17 -06004622 c->cmd_type = CMD_IOACCEL1;
4623
4624 /* Adjust the DMA address to point to the accelerated command buffer */
4625 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4626 (c->cmdindex * sizeof(*cp));
4627 BUG_ON(c->busaddr & 0x0000007F);
4628
4629 use_sg = scsi_dma_map(cmd);
Don Brace03383732015-01-23 16:43:30 -06004630 if (use_sg < 0) {
4631 atomic_dec(&phys_disk->ioaccel_cmds_out);
Matt Gatese1f7de02014-02-18 13:55:17 -06004632 return use_sg;
Don Brace03383732015-01-23 16:43:30 -06004633 }
Matt Gatese1f7de02014-02-18 13:55:17 -06004634
4635 if (use_sg) {
4636 curr_sg = cp->SG;
4637 scsi_for_each_sg(cmd, sg, use_sg, i) {
4638 addr64 = (u64) sg_dma_address(sg);
4639 len = sg_dma_len(sg);
4640 total_len += len;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004641 curr_sg->Addr = cpu_to_le64(addr64);
4642 curr_sg->Len = cpu_to_le32(len);
4643 curr_sg->Ext = cpu_to_le32(0);
Matt Gatese1f7de02014-02-18 13:55:17 -06004644 curr_sg++;
4645 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004646 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
Matt Gatese1f7de02014-02-18 13:55:17 -06004647
4648 switch (cmd->sc_data_direction) {
4649 case DMA_TO_DEVICE:
4650 control |= IOACCEL1_CONTROL_DATA_OUT;
4651 break;
4652 case DMA_FROM_DEVICE:
4653 control |= IOACCEL1_CONTROL_DATA_IN;
4654 break;
4655 case DMA_NONE:
4656 control |= IOACCEL1_CONTROL_NODATAXFER;
4657 break;
4658 default:
4659 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4660 cmd->sc_data_direction);
4661 BUG();
4662 break;
4663 }
4664 } else {
4665 control |= IOACCEL1_CONTROL_NODATAXFER;
4666 }
4667
Scott Teelc3497752014-02-18 13:56:34 -06004668 c->Header.SGList = use_sg;
Matt Gatese1f7de02014-02-18 13:55:17 -06004669 /* Fill out the command structure to submit */
Don Brace2b08b3e2015-01-23 16:41:09 -06004670 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4671 cp->transfer_len = cpu_to_le32(total_len);
4672 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4673 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4674 cp->control = cpu_to_le32(control);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004675 memcpy(cp->CDB, cdb, cdb_len);
4676 memcpy(cp->CISS_LUN, scsi3addr, 8);
Scott Teelc3497752014-02-18 13:56:34 -06004677 /* Tag was already set at init time. */
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004678 enqueue_cmd_and_start_io(h, c);
Matt Gatese1f7de02014-02-18 13:55:17 -06004679 return 0;
4680}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004681
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004682/*
4683 * Queue a command directly to a device behind the controller using the
4684 * I/O accelerator path.
4685 */
4686static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4687 struct CommandList *c)
4688{
4689 struct scsi_cmnd *cmd = c->scsi_cmd;
4690 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4691
Don Brace45e596c2016-09-09 16:30:42 -05004692 if (!dev)
4693 return -1;
4694
Don Brace03383732015-01-23 16:43:30 -06004695 c->phys_disk = dev;
4696
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004697 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
Don Brace03383732015-01-23 16:43:30 -06004698 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004699}
4700
Scott Teeldd0e19f2014-02-18 13:57:31 -06004701/*
4702 * Set encryption parameters for the ioaccel2 request
4703 */
4704static void set_encrypt_ioaccel2(struct ctlr_info *h,
4705 struct CommandList *c, struct io_accel2_cmd *cp)
4706{
4707 struct scsi_cmnd *cmd = c->scsi_cmd;
4708 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4709 struct raid_map_data *map = &dev->raid_map;
4710 u64 first_block;
4711
Scott Teeldd0e19f2014-02-18 13:57:31 -06004712 /* Are we doing encryption on this device */
Don Brace2b08b3e2015-01-23 16:41:09 -06004713 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
Scott Teeldd0e19f2014-02-18 13:57:31 -06004714 return;
4715 /* Set the data encryption key index. */
4716 cp->dekindex = map->dekindex;
4717
4718 /* Set the encryption enable flag, encoded into direction field. */
4719 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4720
4721 /* Set encryption tweak values based on logical block address
4722 * If block size is 512, tweak value is LBA.
4723 * For other block sizes, tweak is (LBA * block size)/ 512)
4724 */
4725 switch (cmd->cmnd[0]) {
4726 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
Scott Teeldd0e19f2014-02-18 13:57:31 -06004727 case READ_6:
Mahesh Rajashekharaabbada72016-09-16 14:54:23 -05004728 case WRITE_6:
4729 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4730 (cmd->cmnd[2] << 8) |
4731 cmd->cmnd[3]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06004732 break;
4733 case WRITE_10:
4734 case READ_10:
Scott Teeldd0e19f2014-02-18 13:57:31 -06004735 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4736 case WRITE_12:
4737 case READ_12:
Don Brace2b08b3e2015-01-23 16:41:09 -06004738 first_block = get_unaligned_be32(&cmd->cmnd[2]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06004739 break;
4740 case WRITE_16:
4741 case READ_16:
Don Brace2b08b3e2015-01-23 16:41:09 -06004742 first_block = get_unaligned_be64(&cmd->cmnd[2]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06004743 break;
4744 default:
4745 dev_err(&h->pdev->dev,
Don Brace2b08b3e2015-01-23 16:41:09 -06004746 "ERROR: %s: size (0x%x) not supported for encryption\n",
4747 __func__, cmd->cmnd[0]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06004748 BUG();
4749 break;
4750 }
Don Brace2b08b3e2015-01-23 16:41:09 -06004751
4752 if (le32_to_cpu(map->volume_blk_size) != 512)
4753 first_block = first_block *
4754 le32_to_cpu(map->volume_blk_size)/512;
4755
4756 cp->tweak_lower = cpu_to_le32(first_block);
4757 cp->tweak_upper = cpu_to_le32(first_block >> 32);
Scott Teeldd0e19f2014-02-18 13:57:31 -06004758}
4759
Scott Teelc3497752014-02-18 13:56:34 -06004760static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4761 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -06004762 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
Scott Teelc3497752014-02-18 13:56:34 -06004763{
4764 struct scsi_cmnd *cmd = c->scsi_cmd;
4765 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4766 struct ioaccel2_sg_element *curr_sg;
4767 int use_sg, i;
4768 struct scatterlist *sg;
4769 u64 addr64;
4770 u32 len;
4771 u32 total_len = 0;
4772
Don Brace45e596c2016-09-09 16:30:42 -05004773 if (!cmd->device)
4774 return -1;
4775
4776 if (!cmd->device->hostdata)
4777 return -1;
4778
Webb Scalesd9a729f2015-04-23 09:33:27 -05004779 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
Scott Teelc3497752014-02-18 13:56:34 -06004780
Don Braceb63c64a2017-05-04 17:51:42 -05004781 if (is_zero_length_transfer(cdb)) {
4782 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4783 atomic_dec(&phys_disk->ioaccel_cmds_out);
4784 return IO_ACCEL_INELIGIBLE;
4785 }
4786
Don Brace03383732015-01-23 16:43:30 -06004787 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4788 atomic_dec(&phys_disk->ioaccel_cmds_out);
Scott Teelc3497752014-02-18 13:56:34 -06004789 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06004790 }
4791
Scott Teelc3497752014-02-18 13:56:34 -06004792 c->cmd_type = CMD_IOACCEL2;
4793 /* Adjust the DMA address to point to the accelerated command buffer */
4794 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4795 (c->cmdindex * sizeof(*cp));
4796 BUG_ON(c->busaddr & 0x0000007F);
4797
4798 memset(cp, 0, sizeof(*cp));
4799 cp->IU_type = IOACCEL2_IU_TYPE;
4800
4801 use_sg = scsi_dma_map(cmd);
Don Brace03383732015-01-23 16:43:30 -06004802 if (use_sg < 0) {
4803 atomic_dec(&phys_disk->ioaccel_cmds_out);
Scott Teelc3497752014-02-18 13:56:34 -06004804 return use_sg;
Don Brace03383732015-01-23 16:43:30 -06004805 }
Scott Teelc3497752014-02-18 13:56:34 -06004806
4807 if (use_sg) {
Scott Teelc3497752014-02-18 13:56:34 -06004808 curr_sg = cp->sg;
Webb Scalesd9a729f2015-04-23 09:33:27 -05004809 if (use_sg > h->ioaccel_maxsg) {
4810 addr64 = le64_to_cpu(
4811 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4812 curr_sg->address = cpu_to_le64(addr64);
4813 curr_sg->length = 0;
4814 curr_sg->reserved[0] = 0;
4815 curr_sg->reserved[1] = 0;
4816 curr_sg->reserved[2] = 0;
4817 curr_sg->chain_indicator = 0x80;
4818
4819 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4820 }
Scott Teelc3497752014-02-18 13:56:34 -06004821 scsi_for_each_sg(cmd, sg, use_sg, i) {
4822 addr64 = (u64) sg_dma_address(sg);
4823 len = sg_dma_len(sg);
4824 total_len += len;
4825 curr_sg->address = cpu_to_le64(addr64);
4826 curr_sg->length = cpu_to_le32(len);
4827 curr_sg->reserved[0] = 0;
4828 curr_sg->reserved[1] = 0;
4829 curr_sg->reserved[2] = 0;
4830 curr_sg->chain_indicator = 0;
4831 curr_sg++;
4832 }
4833
4834 switch (cmd->sc_data_direction) {
4835 case DMA_TO_DEVICE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06004836 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4837 cp->direction |= IOACCEL2_DIR_DATA_OUT;
Scott Teelc3497752014-02-18 13:56:34 -06004838 break;
4839 case DMA_FROM_DEVICE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06004840 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4841 cp->direction |= IOACCEL2_DIR_DATA_IN;
Scott Teelc3497752014-02-18 13:56:34 -06004842 break;
4843 case DMA_NONE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06004844 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4845 cp->direction |= IOACCEL2_DIR_NO_DATA;
Scott Teelc3497752014-02-18 13:56:34 -06004846 break;
4847 default:
4848 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4849 cmd->sc_data_direction);
4850 BUG();
4851 break;
4852 }
4853 } else {
Scott Teeldd0e19f2014-02-18 13:57:31 -06004854 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4855 cp->direction |= IOACCEL2_DIR_NO_DATA;
Scott Teelc3497752014-02-18 13:56:34 -06004856 }
Scott Teeldd0e19f2014-02-18 13:57:31 -06004857
4858 /* Set encryption parameters, if necessary */
4859 set_encrypt_ioaccel2(h, c, cp);
4860
Don Brace2b08b3e2015-01-23 16:41:09 -06004861 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
Don Bracef2405db2015-01-23 16:43:09 -06004862 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
Scott Teelc3497752014-02-18 13:56:34 -06004863 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
Scott Teelc3497752014-02-18 13:56:34 -06004864
Scott Teelc3497752014-02-18 13:56:34 -06004865 cp->data_len = cpu_to_le32(total_len);
4866 cp->err_ptr = cpu_to_le64(c->busaddr +
4867 offsetof(struct io_accel2_cmd, error_data));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004868 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
Scott Teelc3497752014-02-18 13:56:34 -06004869
Webb Scalesd9a729f2015-04-23 09:33:27 -05004870 /* fill in sg elements */
4871 if (use_sg > h->ioaccel_maxsg) {
4872 cp->sg_count = 1;
Don Bracea736e9b2015-11-04 15:51:14 -06004873 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
Webb Scalesd9a729f2015-04-23 09:33:27 -05004874 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4875 atomic_dec(&phys_disk->ioaccel_cmds_out);
4876 scsi_dma_unmap(cmd);
4877 return -1;
4878 }
4879 } else
4880 cp->sg_count = (u8) use_sg;
4881
Scott Teelc3497752014-02-18 13:56:34 -06004882 enqueue_cmd_and_start_io(h, c);
4883 return 0;
4884}
4885
4886/*
4887 * Queue a command to the correct I/O accelerator path.
4888 */
4889static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4890 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -06004891 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
Scott Teelc3497752014-02-18 13:56:34 -06004892{
Don Brace45e596c2016-09-09 16:30:42 -05004893 if (!c->scsi_cmd->device)
4894 return -1;
4895
4896 if (!c->scsi_cmd->device->hostdata)
4897 return -1;
4898
Don Brace03383732015-01-23 16:43:30 -06004899 /* Try to honor the device's queue depth */
4900 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4901 phys_disk->queue_depth) {
4902 atomic_dec(&phys_disk->ioaccel_cmds_out);
4903 return IO_ACCEL_INELIGIBLE;
4904 }
Scott Teelc3497752014-02-18 13:56:34 -06004905 if (h->transMethod & CFGTBL_Trans_io_accel1)
4906 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
Don Brace03383732015-01-23 16:43:30 -06004907 cdb, cdb_len, scsi3addr,
4908 phys_disk);
Scott Teelc3497752014-02-18 13:56:34 -06004909 else
4910 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
Don Brace03383732015-01-23 16:43:30 -06004911 cdb, cdb_len, scsi3addr,
4912 phys_disk);
Scott Teelc3497752014-02-18 13:56:34 -06004913}
4914
Scott Teel6b80b182014-02-18 13:56:55 -06004915static void raid_map_helper(struct raid_map_data *map,
4916 int offload_to_mirror, u32 *map_index, u32 *current_group)
4917{
4918 if (offload_to_mirror == 0) {
4919 /* use physical disk in the first mirrored group. */
Don Brace2b08b3e2015-01-23 16:41:09 -06004920 *map_index %= le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004921 return;
4922 }
4923 do {
4924 /* determine mirror group that *map_index indicates */
Don Brace2b08b3e2015-01-23 16:41:09 -06004925 *current_group = *map_index /
4926 le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004927 if (offload_to_mirror == *current_group)
4928 continue;
Don Brace2b08b3e2015-01-23 16:41:09 -06004929 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
Scott Teel6b80b182014-02-18 13:56:55 -06004930 /* select map index from next group */
Don Brace2b08b3e2015-01-23 16:41:09 -06004931 *map_index += le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004932 (*current_group)++;
4933 } else {
4934 /* select map index from first group */
Don Brace2b08b3e2015-01-23 16:41:09 -06004935 *map_index %= le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004936 *current_group = 0;
4937 }
4938 } while (offload_to_mirror != *current_group);
4939}
4940
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004941/*
4942 * Attempt to perform offload RAID mapping for a logical volume I/O.
4943 */
4944static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4945 struct CommandList *c)
4946{
4947 struct scsi_cmnd *cmd = c->scsi_cmd;
4948 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4949 struct raid_map_data *map = &dev->raid_map;
4950 struct raid_map_disk_data *dd = &map->data[0];
4951 int is_write = 0;
4952 u32 map_index;
4953 u64 first_block, last_block;
4954 u32 block_cnt;
4955 u32 blocks_per_row;
4956 u64 first_row, last_row;
4957 u32 first_row_offset, last_row_offset;
4958 u32 first_column, last_column;
Scott Teel6b80b182014-02-18 13:56:55 -06004959 u64 r0_first_row, r0_last_row;
4960 u32 r5or6_blocks_per_row;
4961 u64 r5or6_first_row, r5or6_last_row;
4962 u32 r5or6_first_row_offset, r5or6_last_row_offset;
4963 u32 r5or6_first_column, r5or6_last_column;
4964 u32 total_disks_per_row;
4965 u32 stripesize;
4966 u32 first_group, last_group, current_group;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004967 u32 map_row;
4968 u32 disk_handle;
4969 u64 disk_block;
4970 u32 disk_block_cnt;
4971 u8 cdb[16];
4972 u8 cdb_len;
Don Brace2b08b3e2015-01-23 16:41:09 -06004973 u16 strip_size;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004974#if BITS_PER_LONG == 32
4975 u64 tmpdiv;
4976#endif
Scott Teel6b80b182014-02-18 13:56:55 -06004977 int offload_to_mirror;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004978
Don Brace45e596c2016-09-09 16:30:42 -05004979 if (!dev)
4980 return -1;
4981
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004982 /* check for valid opcode, get LBA and block count */
4983 switch (cmd->cmnd[0]) {
4984 case WRITE_6:
4985 is_write = 1;
4986 case READ_6:
Mahesh Rajashekharaabbada72016-09-16 14:54:23 -05004987 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4988 (cmd->cmnd[2] << 8) |
4989 cmd->cmnd[3]);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004990 block_cnt = cmd->cmnd[4];
Stephen M. Cameron3fa89a02014-07-03 10:18:14 -05004991 if (block_cnt == 0)
4992 block_cnt = 256;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004993 break;
4994 case WRITE_10:
4995 is_write = 1;
4996 case READ_10:
4997 first_block =
4998 (((u64) cmd->cmnd[2]) << 24) |
4999 (((u64) cmd->cmnd[3]) << 16) |
5000 (((u64) cmd->cmnd[4]) << 8) |
5001 cmd->cmnd[5];
5002 block_cnt =
5003 (((u32) cmd->cmnd[7]) << 8) |
5004 cmd->cmnd[8];
5005 break;
5006 case WRITE_12:
5007 is_write = 1;
5008 case READ_12:
5009 first_block =
5010 (((u64) cmd->cmnd[2]) << 24) |
5011 (((u64) cmd->cmnd[3]) << 16) |
5012 (((u64) cmd->cmnd[4]) << 8) |
5013 cmd->cmnd[5];
5014 block_cnt =
5015 (((u32) cmd->cmnd[6]) << 24) |
5016 (((u32) cmd->cmnd[7]) << 16) |
5017 (((u32) cmd->cmnd[8]) << 8) |
5018 cmd->cmnd[9];
5019 break;
5020 case WRITE_16:
5021 is_write = 1;
5022 case READ_16:
5023 first_block =
5024 (((u64) cmd->cmnd[2]) << 56) |
5025 (((u64) cmd->cmnd[3]) << 48) |
5026 (((u64) cmd->cmnd[4]) << 40) |
5027 (((u64) cmd->cmnd[5]) << 32) |
5028 (((u64) cmd->cmnd[6]) << 24) |
5029 (((u64) cmd->cmnd[7]) << 16) |
5030 (((u64) cmd->cmnd[8]) << 8) |
5031 cmd->cmnd[9];
5032 block_cnt =
5033 (((u32) cmd->cmnd[10]) << 24) |
5034 (((u32) cmd->cmnd[11]) << 16) |
5035 (((u32) cmd->cmnd[12]) << 8) |
5036 cmd->cmnd[13];
5037 break;
5038 default:
5039 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
5040 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005041 last_block = first_block + block_cnt - 1;
5042
5043 /* check for write to non-RAID-0 */
5044 if (is_write && dev->raid_level != 0)
5045 return IO_ACCEL_INELIGIBLE;
5046
5047 /* check for invalid block or wraparound */
Don Brace2b08b3e2015-01-23 16:41:09 -06005048 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5049 last_block < first_block)
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005050 return IO_ACCEL_INELIGIBLE;
5051
5052 /* calculate stripe information for the request */
Don Brace2b08b3e2015-01-23 16:41:09 -06005053 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5054 le16_to_cpu(map->strip_size);
5055 strip_size = le16_to_cpu(map->strip_size);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005056#if BITS_PER_LONG == 32
5057 tmpdiv = first_block;
5058 (void) do_div(tmpdiv, blocks_per_row);
5059 first_row = tmpdiv;
5060 tmpdiv = last_block;
5061 (void) do_div(tmpdiv, blocks_per_row);
5062 last_row = tmpdiv;
5063 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5064 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5065 tmpdiv = first_row_offset;
Don Brace2b08b3e2015-01-23 16:41:09 -06005066 (void) do_div(tmpdiv, strip_size);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005067 first_column = tmpdiv;
5068 tmpdiv = last_row_offset;
Don Brace2b08b3e2015-01-23 16:41:09 -06005069 (void) do_div(tmpdiv, strip_size);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005070 last_column = tmpdiv;
5071#else
5072 first_row = first_block / blocks_per_row;
5073 last_row = last_block / blocks_per_row;
5074 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5075 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
Don Brace2b08b3e2015-01-23 16:41:09 -06005076 first_column = first_row_offset / strip_size;
5077 last_column = last_row_offset / strip_size;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005078#endif
5079
5080 /* if this isn't a single row/column then give to the controller */
5081 if ((first_row != last_row) || (first_column != last_column))
5082 return IO_ACCEL_INELIGIBLE;
5083
5084 /* proceeding with driver mapping */
Don Brace2b08b3e2015-01-23 16:41:09 -06005085 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5086 le16_to_cpu(map->metadata_disks_per_row);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005087 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
Don Brace2b08b3e2015-01-23 16:41:09 -06005088 le16_to_cpu(map->row_cnt);
Scott Teel6b80b182014-02-18 13:56:55 -06005089 map_index = (map_row * total_disks_per_row) + first_column;
5090
5091 switch (dev->raid_level) {
5092 case HPSA_RAID_0:
5093 break; /* nothing special to do */
5094 case HPSA_RAID_1:
5095 /* Handles load balance across RAID 1 members.
5096 * (2-drive R1 and R10 with even # of drives.)
5097 * Appropriate for SSDs, not optimal for HDDs
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005098 */
Don Brace2b08b3e2015-01-23 16:41:09 -06005099 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005100 if (dev->offload_to_mirror)
Don Brace2b08b3e2015-01-23 16:41:09 -06005101 map_index += le16_to_cpu(map->data_disks_per_row);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005102 dev->offload_to_mirror = !dev->offload_to_mirror;
Scott Teel6b80b182014-02-18 13:56:55 -06005103 break;
5104 case HPSA_RAID_ADM:
5105 /* Handles N-way mirrors (R1-ADM)
5106 * and R10 with # of drives divisible by 3.)
5107 */
Don Brace2b08b3e2015-01-23 16:41:09 -06005108 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
Scott Teel6b80b182014-02-18 13:56:55 -06005109
5110 offload_to_mirror = dev->offload_to_mirror;
5111 raid_map_helper(map, offload_to_mirror,
5112 &map_index, &current_group);
5113 /* set mirror group to use next time */
5114 offload_to_mirror =
Don Brace2b08b3e2015-01-23 16:41:09 -06005115 (offload_to_mirror >=
5116 le16_to_cpu(map->layout_map_count) - 1)
Scott Teel6b80b182014-02-18 13:56:55 -06005117 ? 0 : offload_to_mirror + 1;
Scott Teel6b80b182014-02-18 13:56:55 -06005118 dev->offload_to_mirror = offload_to_mirror;
5119 /* Avoid direct use of dev->offload_to_mirror within this
5120 * function since multiple threads might simultaneously
5121 * increment it beyond the range of dev->layout_map_count -1.
5122 */
5123 break;
5124 case HPSA_RAID_5:
5125 case HPSA_RAID_6:
Don Brace2b08b3e2015-01-23 16:41:09 -06005126 if (le16_to_cpu(map->layout_map_count) <= 1)
Scott Teel6b80b182014-02-18 13:56:55 -06005127 break;
5128
5129 /* Verify first and last block are in same RAID group */
5130 r5or6_blocks_per_row =
Don Brace2b08b3e2015-01-23 16:41:09 -06005131 le16_to_cpu(map->strip_size) *
5132 le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06005133 BUG_ON(r5or6_blocks_per_row == 0);
Don Brace2b08b3e2015-01-23 16:41:09 -06005134 stripesize = r5or6_blocks_per_row *
5135 le16_to_cpu(map->layout_map_count);
Scott Teel6b80b182014-02-18 13:56:55 -06005136#if BITS_PER_LONG == 32
5137 tmpdiv = first_block;
5138 first_group = do_div(tmpdiv, stripesize);
5139 tmpdiv = first_group;
5140 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5141 first_group = tmpdiv;
5142 tmpdiv = last_block;
5143 last_group = do_div(tmpdiv, stripesize);
5144 tmpdiv = last_group;
5145 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5146 last_group = tmpdiv;
5147#else
5148 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5149 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
Scott Teel6b80b182014-02-18 13:56:55 -06005150#endif
Stephen M. Cameron000ff7c2014-03-13 17:12:50 -05005151 if (first_group != last_group)
Scott Teel6b80b182014-02-18 13:56:55 -06005152 return IO_ACCEL_INELIGIBLE;
5153
5154 /* Verify request is in a single row of RAID 5/6 */
5155#if BITS_PER_LONG == 32
5156 tmpdiv = first_block;
5157 (void) do_div(tmpdiv, stripesize);
5158 first_row = r5or6_first_row = r0_first_row = tmpdiv;
5159 tmpdiv = last_block;
5160 (void) do_div(tmpdiv, stripesize);
5161 r5or6_last_row = r0_last_row = tmpdiv;
5162#else
5163 first_row = r5or6_first_row = r0_first_row =
5164 first_block / stripesize;
5165 r5or6_last_row = r0_last_row = last_block / stripesize;
5166#endif
5167 if (r5or6_first_row != r5or6_last_row)
5168 return IO_ACCEL_INELIGIBLE;
5169
5170
5171 /* Verify request is in a single column */
5172#if BITS_PER_LONG == 32
5173 tmpdiv = first_block;
5174 first_row_offset = do_div(tmpdiv, stripesize);
5175 tmpdiv = first_row_offset;
5176 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5177 r5or6_first_row_offset = first_row_offset;
5178 tmpdiv = last_block;
5179 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5180 tmpdiv = r5or6_last_row_offset;
5181 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5182 tmpdiv = r5or6_first_row_offset;
5183 (void) do_div(tmpdiv, map->strip_size);
5184 first_column = r5or6_first_column = tmpdiv;
5185 tmpdiv = r5or6_last_row_offset;
5186 (void) do_div(tmpdiv, map->strip_size);
5187 r5or6_last_column = tmpdiv;
5188#else
5189 first_row_offset = r5or6_first_row_offset =
5190 (u32)((first_block % stripesize) %
5191 r5or6_blocks_per_row);
5192
5193 r5or6_last_row_offset =
5194 (u32)((last_block % stripesize) %
5195 r5or6_blocks_per_row);
5196
5197 first_column = r5or6_first_column =
Don Brace2b08b3e2015-01-23 16:41:09 -06005198 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
Scott Teel6b80b182014-02-18 13:56:55 -06005199 r5or6_last_column =
Don Brace2b08b3e2015-01-23 16:41:09 -06005200 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
Scott Teel6b80b182014-02-18 13:56:55 -06005201#endif
5202 if (r5or6_first_column != r5or6_last_column)
5203 return IO_ACCEL_INELIGIBLE;
5204
5205 /* Request is eligible */
5206 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
Don Brace2b08b3e2015-01-23 16:41:09 -06005207 le16_to_cpu(map->row_cnt);
Scott Teel6b80b182014-02-18 13:56:55 -06005208
5209 map_index = (first_group *
Don Brace2b08b3e2015-01-23 16:41:09 -06005210 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
Scott Teel6b80b182014-02-18 13:56:55 -06005211 (map_row * total_disks_per_row) + first_column;
5212 break;
5213 default:
5214 return IO_ACCEL_INELIGIBLE;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005215 }
Scott Teel6b80b182014-02-18 13:56:55 -06005216
Stephen Cameron07543e02015-01-23 16:44:14 -06005217 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5218 return IO_ACCEL_INELIGIBLE;
5219
Don Brace03383732015-01-23 16:43:30 -06005220 c->phys_disk = dev->phys_disk[map_index];
Don Bracec3390df2016-02-23 15:16:34 -06005221 if (!c->phys_disk)
5222 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06005223
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005224 disk_handle = dd[map_index].ioaccel_handle;
Don Brace2b08b3e2015-01-23 16:41:09 -06005225 disk_block = le64_to_cpu(map->disk_starting_blk) +
5226 first_row * le16_to_cpu(map->strip_size) +
5227 (first_row_offset - first_column *
5228 le16_to_cpu(map->strip_size));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005229 disk_block_cnt = block_cnt;
5230
5231 /* handle differing logical/physical block sizes */
5232 if (map->phys_blk_shift) {
5233 disk_block <<= map->phys_blk_shift;
5234 disk_block_cnt <<= map->phys_blk_shift;
5235 }
5236 BUG_ON(disk_block_cnt > 0xffff);
5237
5238 /* build the new CDB for the physical disk I/O */
5239 if (disk_block > 0xffffffff) {
5240 cdb[0] = is_write ? WRITE_16 : READ_16;
5241 cdb[1] = 0;
5242 cdb[2] = (u8) (disk_block >> 56);
5243 cdb[3] = (u8) (disk_block >> 48);
5244 cdb[4] = (u8) (disk_block >> 40);
5245 cdb[5] = (u8) (disk_block >> 32);
5246 cdb[6] = (u8) (disk_block >> 24);
5247 cdb[7] = (u8) (disk_block >> 16);
5248 cdb[8] = (u8) (disk_block >> 8);
5249 cdb[9] = (u8) (disk_block);
5250 cdb[10] = (u8) (disk_block_cnt >> 24);
5251 cdb[11] = (u8) (disk_block_cnt >> 16);
5252 cdb[12] = (u8) (disk_block_cnt >> 8);
5253 cdb[13] = (u8) (disk_block_cnt);
5254 cdb[14] = 0;
5255 cdb[15] = 0;
5256 cdb_len = 16;
5257 } else {
5258 cdb[0] = is_write ? WRITE_10 : READ_10;
5259 cdb[1] = 0;
5260 cdb[2] = (u8) (disk_block >> 24);
5261 cdb[3] = (u8) (disk_block >> 16);
5262 cdb[4] = (u8) (disk_block >> 8);
5263 cdb[5] = (u8) (disk_block);
5264 cdb[6] = 0;
5265 cdb[7] = (u8) (disk_block_cnt >> 8);
5266 cdb[8] = (u8) (disk_block_cnt);
5267 cdb[9] = 0;
5268 cdb_len = 10;
5269 }
5270 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
Don Brace03383732015-01-23 16:43:30 -06005271 dev->scsi3addr,
5272 dev->phys_disk[map_index]);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005273}
5274
Webb Scales25163bd2015-04-23 09:32:00 -05005275/*
5276 * Submit commands down the "normal" RAID stack path
5277 * All callers to hpsa_ciss_submit must check lockup_detected
5278 * beforehand, before (opt.) and after calling cmd_alloc
5279 */
Stephen Cameron574f05d2015-01-23 16:43:20 -06005280static int hpsa_ciss_submit(struct ctlr_info *h,
5281 struct CommandList *c, struct scsi_cmnd *cmd,
5282 unsigned char scsi3addr[])
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005283{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005284 cmd->host_scribble = (unsigned char *) c;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005285 c->cmd_type = CMD_SCSI;
5286 c->scsi_cmd = cmd;
5287 c->Header.ReplyQueue = 0; /* unused in simple mode */
5288 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
Don Bracef2405db2015-01-23 16:43:09 -06005289 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005290
5291 /* Fill in the request block... */
5292
5293 c->Request.Timeout = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005294 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5295 c->Request.CDBLen = cmd->cmd_len;
5296 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005297 switch (cmd->sc_data_direction) {
5298 case DMA_TO_DEVICE:
Stephen M. Camerona505b862014-11-14 17:27:04 -06005299 c->Request.type_attr_dir =
5300 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005301 break;
5302 case DMA_FROM_DEVICE:
Stephen M. Camerona505b862014-11-14 17:27:04 -06005303 c->Request.type_attr_dir =
5304 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005305 break;
5306 case DMA_NONE:
Stephen M. Camerona505b862014-11-14 17:27:04 -06005307 c->Request.type_attr_dir =
5308 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005309 break;
5310 case DMA_BIDIRECTIONAL:
5311 /* This can happen if a buggy application does a scsi passthru
5312 * and sets both inlen and outlen to non-zero. ( see
5313 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
5314 */
5315
Stephen M. Camerona505b862014-11-14 17:27:04 -06005316 c->Request.type_attr_dir =
5317 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005318 /* This is technically wrong, and hpsa controllers should
5319 * reject it with CMD_INVALID, which is the most correct
5320 * response, but non-fibre backends appear to let it
5321 * slide by, and give the same results as if this field
5322 * were set correctly. Either way is acceptable for
5323 * our purposes here.
5324 */
5325
5326 break;
5327
5328 default:
5329 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5330 cmd->sc_data_direction);
5331 BUG();
5332 break;
5333 }
5334
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06005335 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
Webb Scales73153fe2015-04-23 09:35:04 -05005336 hpsa_cmd_resolve_and_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005337 return SCSI_MLQUEUE_HOST_BUSY;
5338 }
5339 enqueue_cmd_and_start_io(h, c);
5340 /* the cmd'll come back via intr handler in complete_scsi_command() */
5341 return 0;
5342}
5343
Stephen Cameron360c73b2015-04-23 09:32:32 -05005344static void hpsa_cmd_init(struct ctlr_info *h, int index,
5345 struct CommandList *c)
5346{
5347 dma_addr_t cmd_dma_handle, err_dma_handle;
5348
5349 /* Zero out all of commandlist except the last field, refcount */
5350 memset(c, 0, offsetof(struct CommandList, refcount));
5351 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5352 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5353 c->err_info = h->errinfo_pool + index;
5354 memset(c->err_info, 0, sizeof(*c->err_info));
5355 err_dma_handle = h->errinfo_pool_dhandle
5356 + index * sizeof(*c->err_info);
5357 c->cmdindex = index;
5358 c->busaddr = (u32) cmd_dma_handle;
5359 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5360 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5361 c->h = h;
Webb Scalesa58e7e52015-04-23 09:34:16 -05005362 c->scsi_cmd = SCSI_CMD_IDLE;
Stephen Cameron360c73b2015-04-23 09:32:32 -05005363}
5364
5365static void hpsa_preinitialize_commands(struct ctlr_info *h)
5366{
5367 int i;
5368
5369 for (i = 0; i < h->nr_cmds; i++) {
5370 struct CommandList *c = h->cmd_pool + i;
5371
5372 hpsa_cmd_init(h, i, c);
5373 atomic_set(&c->refcount, 0);
5374 }
5375}
5376
5377static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5378 struct CommandList *c)
5379{
5380 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5381
Webb Scales73153fe2015-04-23 09:35:04 -05005382 BUG_ON(c->cmdindex != index);
5383
Stephen Cameron360c73b2015-04-23 09:32:32 -05005384 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5385 memset(c->err_info, 0, sizeof(*c->err_info));
5386 c->busaddr = (u32) cmd_dma_handle;
5387}
5388
Webb Scales592a0ad2015-04-23 09:32:48 -05005389static int hpsa_ioaccel_submit(struct ctlr_info *h,
5390 struct CommandList *c, struct scsi_cmnd *cmd,
5391 unsigned char *scsi3addr)
5392{
5393 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5394 int rc = IO_ACCEL_INELIGIBLE;
5395
Don Brace45e596c2016-09-09 16:30:42 -05005396 if (!dev)
5397 return SCSI_MLQUEUE_HOST_BUSY;
5398
Webb Scales592a0ad2015-04-23 09:32:48 -05005399 cmd->host_scribble = (unsigned char *) c;
5400
5401 if (dev->offload_enabled) {
5402 hpsa_cmd_init(h, c->cmdindex, c);
5403 c->cmd_type = CMD_SCSI;
5404 c->scsi_cmd = cmd;
5405 rc = hpsa_scsi_ioaccel_raid_map(h, c);
5406 if (rc < 0) /* scsi_dma_map failed. */
5407 rc = SCSI_MLQUEUE_HOST_BUSY;
Joe Handzika3144e02015-04-23 09:32:59 -05005408 } else if (dev->hba_ioaccel_enabled) {
Webb Scales592a0ad2015-04-23 09:32:48 -05005409 hpsa_cmd_init(h, c->cmdindex, c);
5410 c->cmd_type = CMD_SCSI;
5411 c->scsi_cmd = cmd;
5412 rc = hpsa_scsi_ioaccel_direct_map(h, c);
5413 if (rc < 0) /* scsi_dma_map failed. */
5414 rc = SCSI_MLQUEUE_HOST_BUSY;
5415 }
5416 return rc;
5417}
5418
Don Brace080ef1c2015-01-23 16:43:25 -06005419static void hpsa_command_resubmit_worker(struct work_struct *work)
5420{
5421 struct scsi_cmnd *cmd;
5422 struct hpsa_scsi_dev_t *dev;
Webb Scales8a0ff922015-04-23 09:34:11 -05005423 struct CommandList *c = container_of(work, struct CommandList, work);
Don Brace080ef1c2015-01-23 16:43:25 -06005424
5425 cmd = c->scsi_cmd;
5426 dev = cmd->device->hostdata;
5427 if (!dev) {
5428 cmd->result = DID_NO_CONNECT << 16;
Webb Scales8a0ff922015-04-23 09:34:11 -05005429 return hpsa_cmd_free_and_done(c->h, c, cmd);
Don Brace080ef1c2015-01-23 16:43:25 -06005430 }
Webb Scalesd604f532015-04-23 09:35:22 -05005431 if (c->reset_pending)
Don Braced2315ce2017-05-04 17:51:16 -05005432 return hpsa_cmd_free_and_done(c->h, c, cmd);
Webb Scales592a0ad2015-04-23 09:32:48 -05005433 if (c->cmd_type == CMD_IOACCEL2) {
5434 struct ctlr_info *h = c->h;
5435 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5436 int rc;
5437
5438 if (c2->error_data.serv_response ==
5439 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5440 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
5441 if (rc == 0)
5442 return;
5443 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5444 /*
5445 * If we get here, it means dma mapping failed.
5446 * Try again via scsi mid layer, which will
5447 * then get SCSI_MLQUEUE_HOST_BUSY.
5448 */
5449 cmd->result = DID_IMM_RETRY << 16;
Webb Scales8a0ff922015-04-23 09:34:11 -05005450 return hpsa_cmd_free_and_done(h, c, cmd);
Webb Scales592a0ad2015-04-23 09:32:48 -05005451 }
5452 /* else, fall thru and resubmit down CISS path */
5453 }
5454 }
Stephen Cameron360c73b2015-04-23 09:32:32 -05005455 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
Don Brace080ef1c2015-01-23 16:43:25 -06005456 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
5457 /*
5458 * If we get here, it means dma mapping failed. Try
5459 * again via scsi mid layer, which will then get
5460 * SCSI_MLQUEUE_HOST_BUSY.
Webb Scales592a0ad2015-04-23 09:32:48 -05005461 *
5462 * hpsa_ciss_submit will have already freed c
5463 * if it encountered a dma mapping failure.
Don Brace080ef1c2015-01-23 16:43:25 -06005464 */
5465 cmd->result = DID_IMM_RETRY << 16;
5466 cmd->scsi_done(cmd);
5467 }
5468}
5469
Stephen Cameron574f05d2015-01-23 16:43:20 -06005470/* Running in struct Scsi_Host->host_lock less mode */
5471static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5472{
5473 struct ctlr_info *h;
5474 struct hpsa_scsi_dev_t *dev;
5475 unsigned char scsi3addr[8];
5476 struct CommandList *c;
5477 int rc = 0;
5478
5479 /* Get the ptr to our adapter structure out of cmd->host. */
5480 h = sdev_to_hba(cmd->device);
Webb Scales73153fe2015-04-23 09:35:04 -05005481
5482 BUG_ON(cmd->request->tag < 0);
5483
Stephen Cameron574f05d2015-01-23 16:43:20 -06005484 dev = cmd->device->hostdata;
5485 if (!dev) {
Hannes Reinecke1ccde702016-11-18 08:32:47 +01005486 cmd->result = DID_NO_CONNECT << 16;
Don Braceba74fdc2016-04-27 17:14:17 -05005487 cmd->scsi_done(cmd);
5488 return 0;
5489 }
5490
5491 if (dev->removed) {
Stephen Cameron574f05d2015-01-23 16:43:20 -06005492 cmd->result = DID_NO_CONNECT << 16;
5493 cmd->scsi_done(cmd);
5494 return 0;
5495 }
Webb Scales73153fe2015-04-23 09:35:04 -05005496
Stephen Cameron574f05d2015-01-23 16:43:20 -06005497 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
5498
5499 if (unlikely(lockup_detected(h))) {
Webb Scales25163bd2015-04-23 09:32:00 -05005500 cmd->result = DID_NO_CONNECT << 16;
Stephen Cameron574f05d2015-01-23 16:43:20 -06005501 cmd->scsi_done(cmd);
5502 return 0;
5503 }
Webb Scales73153fe2015-04-23 09:35:04 -05005504 c = cmd_tagged_alloc(h, cmd);
Stephen Cameron574f05d2015-01-23 16:43:20 -06005505
Stephen Cameron407863c2015-01-23 16:44:19 -06005506 /*
5507 * Call alternate submit routine for I/O accelerated commands.
Stephen Cameron574f05d2015-01-23 16:43:20 -06005508 * Retries always go down the normal I/O path.
5509 */
5510 if (likely(cmd->retries == 0 &&
Christoph Hellwig57292b52017-01-31 16:57:29 +01005511 !blk_rq_is_passthrough(cmd->request) &&
5512 h->acciopath_status)) {
Webb Scales592a0ad2015-04-23 09:32:48 -05005513 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
5514 if (rc == 0)
5515 return 0;
5516 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
Webb Scales73153fe2015-04-23 09:35:04 -05005517 hpsa_cmd_resolve_and_free(h, c);
Webb Scales592a0ad2015-04-23 09:32:48 -05005518 return SCSI_MLQUEUE_HOST_BUSY;
Stephen Cameron574f05d2015-01-23 16:43:20 -06005519 }
5520 }
5521 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
5522}
5523
Webb Scales8ebc9242015-01-23 16:44:50 -06005524static void hpsa_scan_complete(struct ctlr_info *h)
Stephen M. Cameron5f389362014-02-18 13:55:48 -06005525{
5526 unsigned long flags;
5527
Webb Scales8ebc9242015-01-23 16:44:50 -06005528 spin_lock_irqsave(&h->scan_lock, flags);
5529 h->scan_finished = 1;
Don Brace87b9e6a2017-03-10 14:35:17 -06005530 wake_up(&h->scan_wait_queue);
Webb Scales8ebc9242015-01-23 16:44:50 -06005531 spin_unlock_irqrestore(&h->scan_lock, flags);
Stephen M. Cameron5f389362014-02-18 13:55:48 -06005532}
5533
Stephen M. Camerona08a8472010-02-04 08:43:16 -06005534static void hpsa_scan_start(struct Scsi_Host *sh)
5535{
5536 struct ctlr_info *h = shost_to_hba(sh);
5537 unsigned long flags;
5538
Webb Scales8ebc9242015-01-23 16:44:50 -06005539 /*
5540 * Don't let rescans be initiated on a controller known to be locked
5541 * up. If the controller locks up *during* a rescan, that thread is
5542 * probably hosed, but at least we can prevent new rescan threads from
5543 * piling up on a locked up controller.
5544 */
5545 if (unlikely(lockup_detected(h)))
5546 return hpsa_scan_complete(h);
Stephen M. Cameron5f389362014-02-18 13:55:48 -06005547
Don Brace87b9e6a2017-03-10 14:35:17 -06005548 /*
5549 * If a scan is already waiting to run, no need to add another
5550 */
5551 spin_lock_irqsave(&h->scan_lock, flags);
5552 if (h->scan_waiting) {
5553 spin_unlock_irqrestore(&h->scan_lock, flags);
5554 return;
5555 }
5556
5557 spin_unlock_irqrestore(&h->scan_lock, flags);
5558
Stephen M. Camerona08a8472010-02-04 08:43:16 -06005559 /* wait until any scan already in progress is finished. */
5560 while (1) {
5561 spin_lock_irqsave(&h->scan_lock, flags);
5562 if (h->scan_finished)
5563 break;
Don Brace87b9e6a2017-03-10 14:35:17 -06005564 h->scan_waiting = 1;
Stephen M. Camerona08a8472010-02-04 08:43:16 -06005565 spin_unlock_irqrestore(&h->scan_lock, flags);
5566 wait_event(h->scan_wait_queue, h->scan_finished);
5567 /* Note: We don't need to worry about a race between this
5568 * thread and driver unload because the midlayer will
5569 * have incremented the reference count, so unload won't
5570 * happen if we're in here.
5571 */
5572 }
5573 h->scan_finished = 0; /* mark scan as in progress */
Don Brace87b9e6a2017-03-10 14:35:17 -06005574 h->scan_waiting = 0;
Stephen M. Camerona08a8472010-02-04 08:43:16 -06005575 spin_unlock_irqrestore(&h->scan_lock, flags);
5576
Webb Scales8ebc9242015-01-23 16:44:50 -06005577 if (unlikely(lockup_detected(h)))
5578 return hpsa_scan_complete(h);
Stephen M. Cameron5f389362014-02-18 13:55:48 -06005579
Don Bracebfd75462016-11-15 14:45:32 -06005580 /*
5581 * Do the scan after a reset completion
5582 */
Don Bracec59d04f2017-05-04 17:51:22 -05005583 spin_lock_irqsave(&h->reset_lock, flags);
Don Bracebfd75462016-11-15 14:45:32 -06005584 if (h->reset_in_progress) {
5585 h->drv_req_rescan = 1;
Don Bracec59d04f2017-05-04 17:51:22 -05005586 spin_unlock_irqrestore(&h->reset_lock, flags);
Don Brace3b476aa22017-05-04 17:51:10 -05005587 hpsa_scan_complete(h);
Don Bracebfd75462016-11-15 14:45:32 -06005588 return;
5589 }
Don Bracec59d04f2017-05-04 17:51:22 -05005590 spin_unlock_irqrestore(&h->reset_lock, flags);
Don Bracebfd75462016-11-15 14:45:32 -06005591
Don Brace8aa60682015-11-04 15:50:01 -06005592 hpsa_update_scsi_devices(h);
Stephen M. Camerona08a8472010-02-04 08:43:16 -06005593
Webb Scales8ebc9242015-01-23 16:44:50 -06005594 hpsa_scan_complete(h);
Stephen M. Camerona08a8472010-02-04 08:43:16 -06005595}
5596
Don Brace7c0a0222015-01-23 16:41:30 -06005597static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5598{
Don Brace03383732015-01-23 16:43:30 -06005599 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5600
5601 if (!logical_drive)
5602 return -ENODEV;
Don Brace7c0a0222015-01-23 16:41:30 -06005603
5604 if (qdepth < 1)
5605 qdepth = 1;
Don Brace03383732015-01-23 16:43:30 -06005606 else if (qdepth > logical_drive->queue_depth)
5607 qdepth = logical_drive->queue_depth;
5608
5609 return scsi_change_queue_depth(sdev, qdepth);
Don Brace7c0a0222015-01-23 16:41:30 -06005610}
5611
Stephen M. Camerona08a8472010-02-04 08:43:16 -06005612static int hpsa_scan_finished(struct Scsi_Host *sh,
5613 unsigned long elapsed_time)
5614{
5615 struct ctlr_info *h = shost_to_hba(sh);
5616 unsigned long flags;
5617 int finished;
5618
5619 spin_lock_irqsave(&h->scan_lock, flags);
5620 finished = h->scan_finished;
5621 spin_unlock_irqrestore(&h->scan_lock, flags);
5622 return finished;
5623}
5624
Robert Elliott2946e822015-04-23 09:35:09 -05005625static int hpsa_scsi_host_alloc(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005626{
Stephen M. Cameronb7056902012-01-19 14:00:53 -06005627 struct Scsi_Host *sh;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005628
Stephen M. Cameronb7056902012-01-19 14:00:53 -06005629 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
Robert Elliott2946e822015-04-23 09:35:09 -05005630 if (sh == NULL) {
5631 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5632 return -ENOMEM;
5633 }
Stephen M. Cameronb7056902012-01-19 14:00:53 -06005634
5635 sh->io_port = 0;
5636 sh->n_io_port = 0;
5637 sh->this_id = -1;
5638 sh->max_channel = 3;
5639 sh->max_cmd_len = MAX_COMMAND_SIZE;
5640 sh->max_lun = HPSA_MAX_LUN;
5641 sh->max_id = HPSA_MAX_LUN;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05005642 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
Don Brace03383732015-01-23 16:43:30 -06005643 sh->cmd_per_lun = sh->can_queue;
Stephen M. Cameronb7056902012-01-19 14:00:53 -06005644 sh->sg_tablesize = h->maxsgentries;
Kevin Barnettd04e62b2015-11-04 15:52:34 -06005645 sh->transportt = hpsa_sas_transport_template;
Stephen M. Cameronb7056902012-01-19 14:00:53 -06005646 sh->hostdata[0] = (unsigned long) h;
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08005647 sh->irq = pci_irq_vector(h->pdev, 0);
Stephen M. Cameronb7056902012-01-19 14:00:53 -06005648 sh->unique_id = sh->irq;
Christoph Hellwig64d513a2015-10-08 09:28:04 +01005649
Robert Elliott2946e822015-04-23 09:35:09 -05005650 h->scsi_host = sh;
Stephen M. Cameronb7056902012-01-19 14:00:53 -06005651 return 0;
Robert Elliott2946e822015-04-23 09:35:09 -05005652}
Stephen M. Cameronb7056902012-01-19 14:00:53 -06005653
Robert Elliott2946e822015-04-23 09:35:09 -05005654static int hpsa_scsi_add_host(struct ctlr_info *h)
5655{
5656 int rv;
5657
5658 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5659 if (rv) {
5660 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5661 return rv;
5662 }
5663 scsi_scan_host(h->scsi_host);
5664 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005665}
5666
Webb Scalesb69324f2015-04-23 09:34:22 -05005667/*
Webb Scales73153fe2015-04-23 09:35:04 -05005668 * The block layer has already gone to the trouble of picking out a unique,
5669 * small-integer tag for this request. We use an offset from that value as
5670 * an index to select our command block. (The offset allows us to reserve the
5671 * low-numbered entries for our own uses.)
5672 */
5673static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5674{
5675 int idx = scmd->request->tag;
5676
5677 if (idx < 0)
5678 return idx;
5679
5680 /* Offset to leave space for internal cmds. */
5681 return idx += HPSA_NRESERVED_CMDS;
5682}
5683
5684/*
Webb Scalesb69324f2015-04-23 09:34:22 -05005685 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5686 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5687 */
5688static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5689 struct CommandList *c, unsigned char lunaddr[],
5690 int reply_queue)
5691{
5692 int rc;
5693
5694 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5695 (void) fill_cmd(c, TEST_UNIT_READY, h,
5696 NULL, 0, 0, lunaddr, TYPE_CMD);
Don Bracec448ecf2016-04-27 17:13:51 -05005697 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
Webb Scalesb69324f2015-04-23 09:34:22 -05005698 if (rc)
5699 return rc;
5700 /* no unmap needed here because no data xfer. */
5701
5702 /* Check if the unit is already ready. */
5703 if (c->err_info->CommandStatus == CMD_SUCCESS)
5704 return 0;
5705
5706 /*
5707 * The first command sent after reset will receive "unit attention" to
5708 * indicate that the LUN has been reset...this is actually what we're
5709 * looking for (but, success is good too).
5710 */
5711 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5712 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5713 (c->err_info->SenseInfo[2] == NO_SENSE ||
5714 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5715 return 0;
5716
5717 return 1;
5718}
5719
5720/*
5721 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5722 * returns zero when the unit is ready, and non-zero when giving up.
5723 */
5724static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5725 struct CommandList *c,
5726 unsigned char lunaddr[], int reply_queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005727{
Tomas Henzl89193582014-02-21 16:25:05 -06005728 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005729 int count = 0;
5730 int waittime = 1; /* seconds */
Webb Scalesb69324f2015-04-23 09:34:22 -05005731
5732 /* Send test unit ready until device ready, or give up. */
5733 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5734
5735 /*
5736 * Wait for a bit. do this first, because if we send
5737 * the TUR right away, the reset will just abort it.
5738 */
5739 msleep(1000 * waittime);
5740
5741 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5742 if (!rc)
5743 break;
5744
5745 /* Increase wait time with each try, up to a point. */
5746 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5747 waittime *= 2;
5748
5749 dev_warn(&h->pdev->dev,
5750 "waiting %d secs for device to become ready.\n",
5751 waittime);
5752 }
5753
5754 return rc;
5755}
5756
5757static int wait_for_device_to_become_ready(struct ctlr_info *h,
5758 unsigned char lunaddr[],
5759 int reply_queue)
5760{
5761 int first_queue;
5762 int last_queue;
5763 int rq;
5764 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005765 struct CommandList *c;
5766
Stephen Cameron45fcb862015-01-23 16:43:04 -06005767 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005768
Webb Scalesb69324f2015-04-23 09:34:22 -05005769 /*
5770 * If no specific reply queue was requested, then send the TUR
5771 * repeatedly, requesting a reply on each reply queue; otherwise execute
5772 * the loop exactly once using only the specified queue.
5773 */
5774 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5775 first_queue = 0;
5776 last_queue = h->nreply_queues - 1;
5777 } else {
5778 first_queue = reply_queue;
5779 last_queue = reply_queue;
5780 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005781
Webb Scalesb69324f2015-04-23 09:34:22 -05005782 for (rq = first_queue; rq <= last_queue; rq++) {
5783 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
Webb Scales25163bd2015-04-23 09:32:00 -05005784 if (rc)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005785 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005786 }
5787
5788 if (rc)
5789 dev_warn(&h->pdev->dev, "giving up on device.\n");
5790 else
5791 dev_warn(&h->pdev->dev, "device is ready.\n");
5792
Stephen Cameron45fcb862015-01-23 16:43:04 -06005793 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005794 return rc;
5795}
5796
5797/* Need at least one of these error handlers to keep ../scsi/hosts.c from
5798 * complaining. Doing a host- or bus-reset can't do anything good here.
5799 */
5800static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5801{
Don Bracec59d04f2017-05-04 17:51:22 -05005802 int rc = SUCCESS;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005803 struct ctlr_info *h;
5804 struct hpsa_scsi_dev_t *dev;
Scott Teel0b9b7b62015-11-04 15:51:02 -06005805 u8 reset_type;
Dan Carpenter2dc127b2015-06-04 17:47:56 +03005806 char msg[48];
Don Bracec59d04f2017-05-04 17:51:22 -05005807 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005808
5809 /* find the controller to which the command to be aborted was sent */
5810 h = sdev_to_hba(scsicmd->device);
5811 if (h == NULL) /* paranoia */
5812 return FAILED;
Don Bracee3458932015-01-23 16:44:24 -06005813
Don Bracec59d04f2017-05-04 17:51:22 -05005814 spin_lock_irqsave(&h->reset_lock, flags);
5815 h->reset_in_progress = 1;
5816 spin_unlock_irqrestore(&h->reset_lock, flags);
5817
5818 if (lockup_detected(h)) {
5819 rc = FAILED;
5820 goto return_reset_status;
5821 }
Don Bracee3458932015-01-23 16:44:24 -06005822
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005823 dev = scsicmd->device->hostdata;
5824 if (!dev) {
Webb Scalesd604f532015-04-23 09:35:22 -05005825 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
Don Bracec59d04f2017-05-04 17:51:22 -05005826 rc = FAILED;
5827 goto return_reset_status;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005828 }
Webb Scales25163bd2015-04-23 09:32:00 -05005829
Don Bracec59d04f2017-05-04 17:51:22 -05005830 if (dev->devtype == TYPE_ENCLOSURE) {
5831 rc = SUCCESS;
5832 goto return_reset_status;
5833 }
Don Braceef8a5202017-05-04 17:51:04 -05005834
Webb Scales25163bd2015-04-23 09:32:00 -05005835 /* if controller locked up, we can guarantee command won't complete */
5836 if (lockup_detected(h)) {
Dan Carpenter2dc127b2015-06-04 17:47:56 +03005837 snprintf(msg, sizeof(msg),
5838 "cmd %d RESET FAILED, lockup detected",
5839 hpsa_get_cmd_index(scsicmd));
Webb Scales73153fe2015-04-23 09:35:04 -05005840 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
Don Bracec59d04f2017-05-04 17:51:22 -05005841 rc = FAILED;
5842 goto return_reset_status;
Webb Scales25163bd2015-04-23 09:32:00 -05005843 }
5844
5845 /* this reset request might be the result of a lockup; check */
5846 if (detect_controller_lockup(h)) {
Dan Carpenter2dc127b2015-06-04 17:47:56 +03005847 snprintf(msg, sizeof(msg),
5848 "cmd %d RESET FAILED, new lockup detected",
5849 hpsa_get_cmd_index(scsicmd));
Webb Scales73153fe2015-04-23 09:35:04 -05005850 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
Don Bracec59d04f2017-05-04 17:51:22 -05005851 rc = FAILED;
5852 goto return_reset_status;
Webb Scales25163bd2015-04-23 09:32:00 -05005853 }
5854
Webb Scalesd604f532015-04-23 09:35:22 -05005855 /* Do not attempt on controller */
Don Bracec59d04f2017-05-04 17:51:22 -05005856 if (is_hba_lunid(dev->scsi3addr)) {
5857 rc = SUCCESS;
5858 goto return_reset_status;
5859 }
Webb Scalesd604f532015-04-23 09:35:22 -05005860
Scott Teel0b9b7b62015-11-04 15:51:02 -06005861 if (is_logical_dev_addr_mode(dev->scsi3addr))
5862 reset_type = HPSA_DEVICE_RESET_MSG;
5863 else
5864 reset_type = HPSA_PHYS_TARGET_RESET;
5865
5866 sprintf(msg, "resetting %s",
5867 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
5868 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
Webb Scales25163bd2015-04-23 09:32:00 -05005869
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005870 /* send a reset to the SCSI LUN which the command was sent to */
Scott Teel0b9b7b62015-11-04 15:51:02 -06005871 rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
Webb Scalesd604f532015-04-23 09:35:22 -05005872 DEFAULT_REPLY_QUEUE);
Don Bracec59d04f2017-05-04 17:51:22 -05005873 if (rc == 0)
5874 rc = SUCCESS;
5875 else
5876 rc = FAILED;
5877
Scott Teel0b9b7b62015-11-04 15:51:02 -06005878 sprintf(msg, "reset %s %s",
5879 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
Don Bracec59d04f2017-05-04 17:51:22 -05005880 rc == SUCCESS ? "completed successfully" : "failed");
Webb Scalesd604f532015-04-23 09:35:22 -05005881 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
Don Bracec59d04f2017-05-04 17:51:22 -05005882
5883return_reset_status:
5884 spin_lock_irqsave(&h->reset_lock, flags);
Don Braceda03ded2015-11-04 15:50:56 -06005885 h->reset_in_progress = 0;
Don Bracec59d04f2017-05-04 17:51:22 -05005886 spin_unlock_irqrestore(&h->reset_lock, flags);
5887 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005888}
5889
5890/*
Webb Scales73153fe2015-04-23 09:35:04 -05005891 * For operations with an associated SCSI command, a command block is allocated
5892 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5893 * block request tag as an index into a table of entries. cmd_tagged_free() is
5894 * the complement, although cmd_free() may be called instead.
5895 */
5896static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
5897 struct scsi_cmnd *scmd)
5898{
5899 int idx = hpsa_get_cmd_index(scmd);
5900 struct CommandList *c = h->cmd_pool + idx;
5901
5902 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
5903 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
5904 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
5905 /* The index value comes from the block layer, so if it's out of
5906 * bounds, it's probably not our bug.
5907 */
5908 BUG();
5909 }
5910
5911 atomic_inc(&c->refcount);
5912 if (unlikely(!hpsa_is_cmd_idle(c))) {
5913 /*
5914 * We expect that the SCSI layer will hand us a unique tag
5915 * value. Thus, there should never be a collision here between
5916 * two requests...because if the selected command isn't idle
5917 * then someone is going to be very disappointed.
5918 */
5919 dev_err(&h->pdev->dev,
5920 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5921 idx);
5922 if (c->scsi_cmd != NULL)
5923 scsi_print_command(c->scsi_cmd);
5924 scsi_print_command(scmd);
5925 }
5926
5927 hpsa_cmd_partial_init(h, idx, c);
5928 return c;
5929}
5930
5931static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
5932{
5933 /*
5934 * Release our reference to the block. We don't need to do anything
Don Brace08ec46f2017-05-04 17:51:49 -05005935 * else to free it, because it is accessed by index.
Webb Scales73153fe2015-04-23 09:35:04 -05005936 */
5937 (void)atomic_dec(&c->refcount);
5938}
5939
5940/*
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005941 * For operations that cannot sleep, a command block is allocated at init,
5942 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5943 * which ones are free or in use. Lock must be held when calling this.
5944 * cmd_free() is the complement.
Robert Elliottbf43caf2015-04-23 09:33:38 -05005945 * This function never gives up and returns NULL. If it hangs,
5946 * another thread must call cmd_free() to free some tags.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005947 */
Webb Scales281a7fd2015-01-23 16:43:35 -06005948
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005949static struct CommandList *cmd_alloc(struct ctlr_info *h)
5950{
5951 struct CommandList *c;
Stephen Cameron360c73b2015-04-23 09:32:32 -05005952 int refcount, i;
Webb Scales73153fe2015-04-23 09:35:04 -05005953 int offset = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005954
Robert Elliott33811022015-01-23 16:43:41 -06005955 /*
5956 * There is some *extremely* small but non-zero chance that that
Stephen M. Cameron4c413122014-11-14 17:27:29 -06005957 * multiple threads could get in here, and one thread could
5958 * be scanning through the list of bits looking for a free
5959 * one, but the free ones are always behind him, and other
5960 * threads sneak in behind him and eat them before he can
5961 * get to them, so that while there is always a free one, a
5962 * very unlucky thread might be starved anyway, never able to
5963 * beat the other threads. In reality, this happens so
5964 * infrequently as to be indistinguishable from never.
Webb Scales73153fe2015-04-23 09:35:04 -05005965 *
5966 * Note that we start allocating commands before the SCSI host structure
5967 * is initialized. Since the search starts at bit zero, this
5968 * all works, since we have at least one command structure available;
5969 * however, it means that the structures with the low indexes have to be
5970 * reserved for driver-initiated requests, while requests from the block
5971 * layer will use the higher indexes.
Stephen M. Cameron4c413122014-11-14 17:27:29 -06005972 */
5973
Webb Scales281a7fd2015-01-23 16:43:35 -06005974 for (;;) {
Webb Scales73153fe2015-04-23 09:35:04 -05005975 i = find_next_zero_bit(h->cmd_pool_bits,
5976 HPSA_NRESERVED_CMDS,
5977 offset);
5978 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
Webb Scales281a7fd2015-01-23 16:43:35 -06005979 offset = 0;
5980 continue;
5981 }
5982 c = h->cmd_pool + i;
5983 refcount = atomic_inc_return(&c->refcount);
5984 if (unlikely(refcount > 1)) {
5985 cmd_free(h, c); /* already in use */
Webb Scales73153fe2015-04-23 09:35:04 -05005986 offset = (i + 1) % HPSA_NRESERVED_CMDS;
Webb Scales281a7fd2015-01-23 16:43:35 -06005987 continue;
5988 }
5989 set_bit(i & (BITS_PER_LONG - 1),
5990 h->cmd_pool_bits + (i / BITS_PER_LONG));
5991 break; /* it's ours now. */
5992 }
Stephen Cameron360c73b2015-04-23 09:32:32 -05005993 hpsa_cmd_partial_init(h, i, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005994 return c;
5995}
5996
Webb Scales73153fe2015-04-23 09:35:04 -05005997/*
5998 * This is the complementary operation to cmd_alloc(). Note, however, in some
5999 * corner cases it may also be used to free blocks allocated by
6000 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6001 * the clear-bit is harmless.
6002 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006003static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6004{
Webb Scales281a7fd2015-01-23 16:43:35 -06006005 if (atomic_dec_and_test(&c->refcount)) {
6006 int i;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006007
Webb Scales281a7fd2015-01-23 16:43:35 -06006008 i = c - h->cmd_pool;
6009 clear_bit(i & (BITS_PER_LONG - 1),
6010 h->cmd_pool_bits + (i / BITS_PER_LONG));
6011 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006012}
6013
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006014#ifdef CONFIG_COMPAT
6015
Don Brace42a91642014-11-14 17:26:27 -06006016static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
6017 void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006018{
6019 IOCTL32_Command_struct __user *arg32 =
6020 (IOCTL32_Command_struct __user *) arg;
6021 IOCTL_Command_struct arg64;
6022 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
6023 int err;
6024 u32 cp;
6025
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06006026 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006027 err = 0;
6028 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6029 sizeof(arg64.LUN_info));
6030 err |= copy_from_user(&arg64.Request, &arg32->Request,
6031 sizeof(arg64.Request));
6032 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6033 sizeof(arg64.error_info));
6034 err |= get_user(arg64.buf_size, &arg32->buf_size);
6035 err |= get_user(cp, &arg32->buf);
6036 arg64.buf = compat_ptr(cp);
6037 err |= copy_to_user(p, &arg64, sizeof(arg64));
6038
6039 if (err)
6040 return -EFAULT;
6041
Don Brace42a91642014-11-14 17:26:27 -06006042 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006043 if (err)
6044 return err;
6045 err |= copy_in_user(&arg32->error_info, &p->error_info,
6046 sizeof(arg32->error_info));
6047 if (err)
6048 return -EFAULT;
6049 return err;
6050}
6051
6052static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
Don Brace42a91642014-11-14 17:26:27 -06006053 int cmd, void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006054{
6055 BIG_IOCTL32_Command_struct __user *arg32 =
6056 (BIG_IOCTL32_Command_struct __user *) arg;
6057 BIG_IOCTL_Command_struct arg64;
6058 BIG_IOCTL_Command_struct __user *p =
6059 compat_alloc_user_space(sizeof(arg64));
6060 int err;
6061 u32 cp;
6062
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06006063 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006064 err = 0;
6065 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6066 sizeof(arg64.LUN_info));
6067 err |= copy_from_user(&arg64.Request, &arg32->Request,
6068 sizeof(arg64.Request));
6069 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6070 sizeof(arg64.error_info));
6071 err |= get_user(arg64.buf_size, &arg32->buf_size);
6072 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
6073 err |= get_user(cp, &arg32->buf);
6074 arg64.buf = compat_ptr(cp);
6075 err |= copy_to_user(p, &arg64, sizeof(arg64));
6076
6077 if (err)
6078 return -EFAULT;
6079
Don Brace42a91642014-11-14 17:26:27 -06006080 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006081 if (err)
6082 return err;
6083 err |= copy_in_user(&arg32->error_info, &p->error_info,
6084 sizeof(arg32->error_info));
6085 if (err)
6086 return -EFAULT;
6087 return err;
6088}
Stephen M. Cameron71fe75a2010-02-04 08:43:51 -06006089
Don Brace42a91642014-11-14 17:26:27 -06006090static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
Stephen M. Cameron71fe75a2010-02-04 08:43:51 -06006091{
6092 switch (cmd) {
6093 case CCISS_GETPCIINFO:
6094 case CCISS_GETINTINFO:
6095 case CCISS_SETINTINFO:
6096 case CCISS_GETNODENAME:
6097 case CCISS_SETNODENAME:
6098 case CCISS_GETHEARTBEAT:
6099 case CCISS_GETBUSTYPES:
6100 case CCISS_GETFIRMVER:
6101 case CCISS_GETDRIVVER:
6102 case CCISS_REVALIDVOLS:
6103 case CCISS_DEREGDISK:
6104 case CCISS_REGNEWDISK:
6105 case CCISS_REGNEWD:
6106 case CCISS_RESCANDISK:
6107 case CCISS_GETLUNINFO:
6108 return hpsa_ioctl(dev, cmd, arg);
6109
6110 case CCISS_PASSTHRU32:
6111 return hpsa_ioctl32_passthru(dev, cmd, arg);
6112 case CCISS_BIG_PASSTHRU32:
6113 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6114
6115 default:
6116 return -ENOIOCTLCMD;
6117 }
6118}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006119#endif
6120
6121static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6122{
6123 struct hpsa_pci_info pciinfo;
6124
6125 if (!argp)
6126 return -EINVAL;
6127 pciinfo.domain = pci_domain_nr(h->pdev->bus);
6128 pciinfo.bus = h->pdev->bus->number;
6129 pciinfo.dev_fn = h->pdev->devfn;
6130 pciinfo.board_id = h->board_id;
6131 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6132 return -EFAULT;
6133 return 0;
6134}
6135
6136static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6137{
6138 DriverVer_type DriverVer;
6139 unsigned char vmaj, vmin, vsubmin;
6140 int rc;
6141
6142 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6143 &vmaj, &vmin, &vsubmin);
6144 if (rc != 3) {
6145 dev_info(&h->pdev->dev, "driver version string '%s' "
6146 "unrecognized.", HPSA_DRIVER_VERSION);
6147 vmaj = 0;
6148 vmin = 0;
6149 vsubmin = 0;
6150 }
6151 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6152 if (!argp)
6153 return -EINVAL;
6154 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6155 return -EFAULT;
6156 return 0;
6157}
6158
6159static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6160{
6161 IOCTL_Command_struct iocommand;
6162 struct CommandList *c;
6163 char *buff = NULL;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006164 u64 temp64;
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06006165 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006166
6167 if (!argp)
6168 return -EINVAL;
6169 if (!capable(CAP_SYS_RAWIO))
6170 return -EPERM;
6171 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6172 return -EFAULT;
6173 if ((iocommand.buf_size < 1) &&
6174 (iocommand.Request.Type.Direction != XFER_NONE)) {
6175 return -EINVAL;
6176 }
6177 if (iocommand.buf_size > 0) {
6178 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6179 if (buff == NULL)
Robert Elliott2dd02d72015-04-23 09:33:43 -05006180 return -ENOMEM;
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05006181 if (iocommand.Request.Type.Direction & XFER_WRITE) {
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06006182 /* Copy the data into the buffer we created */
6183 if (copy_from_user(buff, iocommand.buf,
6184 iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06006185 rc = -EFAULT;
6186 goto out_kfree;
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06006187 }
6188 } else {
6189 memset(buff, 0, iocommand.buf_size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006190 }
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06006191 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06006192 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05006193
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006194 /* Fill in the command type */
6195 c->cmd_type = CMD_IOCTL_PEND;
Webb Scalesa58e7e52015-04-23 09:34:16 -05006196 c->scsi_cmd = SCSI_CMD_BUSY;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006197 /* Fill in Command Header */
6198 c->Header.ReplyQueue = 0; /* unused in simple mode */
6199 if (iocommand.buf_size > 0) { /* buffer to fill */
6200 c->Header.SGList = 1;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006201 c->Header.SGTotal = cpu_to_le16(1);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006202 } else { /* no buffers to fill */
6203 c->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006204 c->Header.SGTotal = cpu_to_le16(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006205 }
6206 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006207
6208 /* Fill in Request block */
6209 memcpy(&c->Request, &iocommand.Request,
6210 sizeof(c->Request));
6211
6212 /* Fill in the scatter gather information */
6213 if (iocommand.buf_size > 0) {
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006214 temp64 = pci_map_single(h->pdev, buff,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006215 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006216 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6217 c->SG[0].Addr = cpu_to_le64(0);
6218 c->SG[0].Len = cpu_to_le32(0);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06006219 rc = -ENOMEM;
6220 goto out;
6221 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006222 c->SG[0].Addr = cpu_to_le64(temp64);
6223 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6224 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006225 }
Don Bracec448ecf2016-04-27 17:13:51 -05006226 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
Don Brace3fb134c2016-07-01 13:37:38 -05006227 NO_TIMEOUT);
Stephen M. Cameronc2dd32e2011-06-03 09:57:29 -05006228 if (iocommand.buf_size > 0)
6229 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006230 check_ioctl_unit_attention(h, c);
Webb Scales25163bd2015-04-23 09:32:00 -05006231 if (rc) {
6232 rc = -EIO;
6233 goto out;
6234 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006235
6236 /* Copy the error information out */
6237 memcpy(&iocommand.error_info, c->err_info,
6238 sizeof(iocommand.error_info));
6239 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06006240 rc = -EFAULT;
6241 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006242 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05006243 if ((iocommand.Request.Type.Direction & XFER_READ) &&
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06006244 iocommand.buf_size > 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006245 /* Copy the data out of the buffer we created */
6246 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06006247 rc = -EFAULT;
6248 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006249 }
6250 }
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06006251out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06006252 cmd_free(h, c);
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06006253out_kfree:
6254 kfree(buff);
6255 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006256}
6257
6258static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6259{
6260 BIG_IOCTL_Command_struct *ioc;
6261 struct CommandList *c;
6262 unsigned char **buff = NULL;
6263 int *buff_size = NULL;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006264 u64 temp64;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006265 BYTE sg_used = 0;
6266 int status = 0;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06006267 u32 left;
6268 u32 sz;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006269 BYTE __user *data_ptr;
6270
6271 if (!argp)
6272 return -EINVAL;
6273 if (!capable(CAP_SYS_RAWIO))
6274 return -EPERM;
Javier Martinez Canillas19be606b2016-10-13 13:10:08 -03006275 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006276 if (!ioc) {
6277 status = -ENOMEM;
6278 goto cleanup1;
6279 }
6280 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6281 status = -EFAULT;
6282 goto cleanup1;
6283 }
6284 if ((ioc->buf_size < 1) &&
6285 (ioc->Request.Type.Direction != XFER_NONE)) {
6286 status = -EINVAL;
6287 goto cleanup1;
6288 }
6289 /* Check kmalloc limits using all SGs */
6290 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6291 status = -EINVAL;
6292 goto cleanup1;
6293 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06006294 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006295 status = -EINVAL;
6296 goto cleanup1;
6297 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06006298 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006299 if (!buff) {
6300 status = -ENOMEM;
6301 goto cleanup1;
6302 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06006303 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006304 if (!buff_size) {
6305 status = -ENOMEM;
6306 goto cleanup1;
6307 }
6308 left = ioc->buf_size;
6309 data_ptr = ioc->buf;
6310 while (left) {
6311 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6312 buff_size[sg_used] = sz;
6313 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6314 if (buff[sg_used] == NULL) {
6315 status = -ENOMEM;
6316 goto cleanup1;
6317 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05006318 if (ioc->Request.Type.Direction & XFER_WRITE) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006319 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
Stephen M. Cameron0758f4f2014-07-03 10:18:03 -05006320 status = -EFAULT;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006321 goto cleanup1;
6322 }
6323 } else
6324 memset(buff[sg_used], 0, sz);
6325 left -= sz;
6326 data_ptr += sz;
6327 sg_used++;
6328 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06006329 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05006330
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006331 c->cmd_type = CMD_IOCTL_PEND;
Webb Scalesa58e7e52015-04-23 09:34:16 -05006332 c->scsi_cmd = SCSI_CMD_BUSY;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006333 c->Header.ReplyQueue = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006334 c->Header.SGList = (u8) sg_used;
6335 c->Header.SGTotal = cpu_to_le16(sg_used);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006336 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006337 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6338 if (ioc->buf_size > 0) {
6339 int i;
6340 for (i = 0; i < sg_used; i++) {
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006341 temp64 = pci_map_single(h->pdev, buff[i],
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006342 buff_size[i], PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006343 if (dma_mapping_error(&h->pdev->dev,
6344 (dma_addr_t) temp64)) {
6345 c->SG[i].Addr = cpu_to_le64(0);
6346 c->SG[i].Len = cpu_to_le32(0);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06006347 hpsa_pci_unmap(h->pdev, c, i,
6348 PCI_DMA_BIDIRECTIONAL);
6349 status = -ENOMEM;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05006350 goto cleanup0;
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06006351 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006352 c->SG[i].Addr = cpu_to_le64(temp64);
6353 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6354 c->SG[i].Ext = cpu_to_le32(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006355 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006356 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006357 }
Don Bracec448ecf2016-04-27 17:13:51 -05006358 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
Don Brace3fb134c2016-07-01 13:37:38 -05006359 NO_TIMEOUT);
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06006360 if (sg_used)
6361 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006362 check_ioctl_unit_attention(h, c);
Webb Scales25163bd2015-04-23 09:32:00 -05006363 if (status) {
6364 status = -EIO;
6365 goto cleanup0;
6366 }
6367
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006368 /* Copy the error information out */
6369 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6370 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006371 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05006372 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006373 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05006374 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
Don Brace2b08b3e2015-01-23 16:41:09 -06006375 int i;
6376
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006377 /* Copy the data out of the buffer we created */
6378 BYTE __user *ptr = ioc->buf;
6379 for (i = 0; i < sg_used; i++) {
6380 if (copy_to_user(ptr, buff[i], buff_size[i])) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006381 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05006382 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006383 }
6384 ptr += buff_size[i];
6385 }
6386 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006387 status = 0;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05006388cleanup0:
Stephen Cameron45fcb862015-01-23 16:43:04 -06006389 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006390cleanup1:
6391 if (buff) {
Don Brace2b08b3e2015-01-23 16:41:09 -06006392 int i;
6393
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006394 for (i = 0; i < sg_used; i++)
6395 kfree(buff[i]);
6396 kfree(buff);
6397 }
6398 kfree(buff_size);
6399 kfree(ioc);
6400 return status;
6401}
6402
6403static void check_ioctl_unit_attention(struct ctlr_info *h,
6404 struct CommandList *c)
6405{
6406 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6407 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6408 (void) check_for_unit_attention(h, c);
6409}
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006410
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006411/*
6412 * ioctl
6413 */
Don Brace42a91642014-11-14 17:26:27 -06006414static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006415{
6416 struct ctlr_info *h;
6417 void __user *argp = (void __user *)arg;
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006418 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006419
6420 h = sdev_to_hba(dev);
6421
6422 switch (cmd) {
6423 case CCISS_DEREGDISK:
6424 case CCISS_REGNEWDISK:
6425 case CCISS_REGNEWD:
Stephen M. Camerona08a8472010-02-04 08:43:16 -06006426 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006427 return 0;
6428 case CCISS_GETPCIINFO:
6429 return hpsa_getpciinfo_ioctl(h, argp);
6430 case CCISS_GETDRIVVER:
6431 return hpsa_getdrivver_ioctl(h, argp);
6432 case CCISS_PASSTHRU:
Don Brace34f0c622015-01-23 16:43:46 -06006433 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006434 return -EAGAIN;
6435 rc = hpsa_passthru_ioctl(h, argp);
Don Brace34f0c622015-01-23 16:43:46 -06006436 atomic_inc(&h->passthru_cmds_avail);
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006437 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006438 case CCISS_BIG_PASSTHRU:
Don Brace34f0c622015-01-23 16:43:46 -06006439 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006440 return -EAGAIN;
6441 rc = hpsa_big_passthru_ioctl(h, argp);
Don Brace34f0c622015-01-23 16:43:46 -06006442 atomic_inc(&h->passthru_cmds_avail);
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006443 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006444 default:
6445 return -ENOTTY;
6446 }
6447}
6448
Robert Elliottbf43caf2015-04-23 09:33:38 -05006449static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006450 u8 reset_type)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006451{
6452 struct CommandList *c;
6453
6454 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05006455
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006456 /* fill_cmd can't fail here, no data buffer to map */
6457 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006458 RAID_CTLR_LUNID, TYPE_MSG);
6459 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6460 c->waiting = NULL;
6461 enqueue_cmd_and_start_io(h, c);
6462 /* Don't wait for completion, the reset won't complete. Don't free
6463 * the command either. This is the last command we will send before
6464 * re-initializing everything, so it doesn't matter and won't leak.
6465 */
Robert Elliottbf43caf2015-04-23 09:33:38 -05006466 return;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006467}
6468
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006469static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06006470 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006471 int cmd_type)
6472{
6473 int pci_dir = XFER_NONE;
6474
6475 c->cmd_type = CMD_IOCTL_PEND;
Webb Scalesa58e7e52015-04-23 09:34:16 -05006476 c->scsi_cmd = SCSI_CMD_BUSY;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006477 c->Header.ReplyQueue = 0;
6478 if (buff != NULL && size > 0) {
6479 c->Header.SGList = 1;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006480 c->Header.SGTotal = cpu_to_le16(1);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006481 } else {
6482 c->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006483 c->Header.SGTotal = cpu_to_le16(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006484 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006485 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6486
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006487 if (cmd_type == TYPE_CMD) {
6488 switch (cmd) {
6489 case HPSA_INQUIRY:
6490 /* are we trying to read a vital product page */
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06006491 if (page_code & VPD_PAGE) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006492 c->Request.CDB[1] = 0x01;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06006493 c->Request.CDB[2] = (page_code & 0xff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006494 }
6495 c->Request.CDBLen = 6;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006496 c->Request.type_attr_dir =
6497 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006498 c->Request.Timeout = 0;
6499 c->Request.CDB[0] = HPSA_INQUIRY;
6500 c->Request.CDB[4] = size & 0xFF;
6501 break;
6502 case HPSA_REPORT_LOG:
6503 case HPSA_REPORT_PHYS:
6504 /* Talking to controller so It's a physical command
6505 mode = 00 target = 0. Nothing to write.
6506 */
6507 c->Request.CDBLen = 12;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006508 c->Request.type_attr_dir =
6509 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006510 c->Request.Timeout = 0;
6511 c->Request.CDB[0] = cmd;
6512 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6513 c->Request.CDB[7] = (size >> 16) & 0xFF;
6514 c->Request.CDB[8] = (size >> 8) & 0xFF;
6515 c->Request.CDB[9] = size & 0xFF;
6516 break;
Scott Teelc2adae42015-11-04 15:52:16 -06006517 case BMIC_SENSE_DIAG_OPTIONS:
6518 c->Request.CDBLen = 16;
6519 c->Request.type_attr_dir =
6520 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6521 c->Request.Timeout = 0;
6522 /* Spec says this should be BMIC_WRITE */
6523 c->Request.CDB[0] = BMIC_READ;
6524 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6525 break;
6526 case BMIC_SET_DIAG_OPTIONS:
6527 c->Request.CDBLen = 16;
6528 c->Request.type_attr_dir =
6529 TYPE_ATTR_DIR(cmd_type,
6530 ATTR_SIMPLE, XFER_WRITE);
6531 c->Request.Timeout = 0;
6532 c->Request.CDB[0] = BMIC_WRITE;
6533 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6534 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006535 case HPSA_CACHE_FLUSH:
6536 c->Request.CDBLen = 12;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006537 c->Request.type_attr_dir =
6538 TYPE_ATTR_DIR(cmd_type,
6539 ATTR_SIMPLE, XFER_WRITE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006540 c->Request.Timeout = 0;
6541 c->Request.CDB[0] = BMIC_WRITE;
6542 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
Stephen M. Cameronbb158ea2011-10-26 16:21:17 -05006543 c->Request.CDB[7] = (size >> 8) & 0xFF;
6544 c->Request.CDB[8] = size & 0xFF;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006545 break;
6546 case TEST_UNIT_READY:
6547 c->Request.CDBLen = 6;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006548 c->Request.type_attr_dir =
6549 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006550 c->Request.Timeout = 0;
6551 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006552 case HPSA_GET_RAID_MAP:
6553 c->Request.CDBLen = 12;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006554 c->Request.type_attr_dir =
6555 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006556 c->Request.Timeout = 0;
6557 c->Request.CDB[0] = HPSA_CISS_READ;
6558 c->Request.CDB[1] = cmd;
6559 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6560 c->Request.CDB[7] = (size >> 16) & 0xFF;
6561 c->Request.CDB[8] = (size >> 8) & 0xFF;
6562 c->Request.CDB[9] = size & 0xFF;
6563 break;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06006564 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6565 c->Request.CDBLen = 10;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006566 c->Request.type_attr_dir =
6567 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameron316b2212014-02-21 16:25:15 -06006568 c->Request.Timeout = 0;
6569 c->Request.CDB[0] = BMIC_READ;
6570 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6571 c->Request.CDB[7] = (size >> 16) & 0xFF;
6572 c->Request.CDB[8] = (size >> 8) & 0xFF;
6573 break;
Don Brace03383732015-01-23 16:43:30 -06006574 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6575 c->Request.CDBLen = 10;
6576 c->Request.type_attr_dir =
6577 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6578 c->Request.Timeout = 0;
6579 c->Request.CDB[0] = BMIC_READ;
6580 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6581 c->Request.CDB[7] = (size >> 16) & 0xFF;
6582 c->Request.CDB[8] = (size >> 8) & 0XFF;
6583 break;
Kevin Barnettd04e62b2015-11-04 15:52:34 -06006584 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6585 c->Request.CDBLen = 10;
6586 c->Request.type_attr_dir =
6587 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6588 c->Request.Timeout = 0;
6589 c->Request.CDB[0] = BMIC_READ;
6590 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6591 c->Request.CDB[7] = (size >> 16) & 0xFF;
6592 c->Request.CDB[8] = (size >> 8) & 0XFF;
6593 break;
Don Bracecca8f132015-12-22 10:36:48 -06006594 case BMIC_SENSE_STORAGE_BOX_PARAMS:
6595 c->Request.CDBLen = 10;
6596 c->Request.type_attr_dir =
6597 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6598 c->Request.Timeout = 0;
6599 c->Request.CDB[0] = BMIC_READ;
6600 c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
6601 c->Request.CDB[7] = (size >> 16) & 0xFF;
6602 c->Request.CDB[8] = (size >> 8) & 0XFF;
6603 break;
Scott Teel66749d02015-11-04 15:51:57 -06006604 case BMIC_IDENTIFY_CONTROLLER:
6605 c->Request.CDBLen = 10;
6606 c->Request.type_attr_dir =
6607 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6608 c->Request.Timeout = 0;
6609 c->Request.CDB[0] = BMIC_READ;
6610 c->Request.CDB[1] = 0;
6611 c->Request.CDB[2] = 0;
6612 c->Request.CDB[3] = 0;
6613 c->Request.CDB[4] = 0;
6614 c->Request.CDB[5] = 0;
6615 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6616 c->Request.CDB[7] = (size >> 16) & 0xFF;
6617 c->Request.CDB[8] = (size >> 8) & 0XFF;
6618 c->Request.CDB[9] = 0;
6619 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006620 default:
6621 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6622 BUG();
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006623 }
6624 } else if (cmd_type == TYPE_MSG) {
6625 switch (cmd) {
6626
Scott Teel0b9b7b62015-11-04 15:51:02 -06006627 case HPSA_PHYS_TARGET_RESET:
6628 c->Request.CDBLen = 16;
6629 c->Request.type_attr_dir =
6630 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6631 c->Request.Timeout = 0; /* Don't time out */
6632 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6633 c->Request.CDB[0] = HPSA_RESET;
6634 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6635 /* Physical target reset needs no control bytes 4-7*/
6636 c->Request.CDB[4] = 0x00;
6637 c->Request.CDB[5] = 0x00;
6638 c->Request.CDB[6] = 0x00;
6639 c->Request.CDB[7] = 0x00;
6640 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006641 case HPSA_DEVICE_RESET_MSG:
6642 c->Request.CDBLen = 16;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006643 c->Request.type_attr_dir =
6644 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006645 c->Request.Timeout = 0; /* Don't time out */
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006646 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6647 c->Request.CDB[0] = cmd;
Stephen M. Cameron21e89af2012-07-26 11:34:10 -05006648 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006649 /* If bytes 4-7 are zero, it means reset the */
6650 /* LunID device */
6651 c->Request.CDB[4] = 0x00;
6652 c->Request.CDB[5] = 0x00;
6653 c->Request.CDB[6] = 0x00;
6654 c->Request.CDB[7] = 0x00;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05006655 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006656 default:
6657 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6658 cmd);
6659 BUG();
6660 }
6661 } else {
6662 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6663 BUG();
6664 }
6665
Stephen M. Camerona505b862014-11-14 17:27:04 -06006666 switch (GET_DIR(c->Request.type_attr_dir)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006667 case XFER_READ:
6668 pci_dir = PCI_DMA_FROMDEVICE;
6669 break;
6670 case XFER_WRITE:
6671 pci_dir = PCI_DMA_TODEVICE;
6672 break;
6673 case XFER_NONE:
6674 pci_dir = PCI_DMA_NONE;
6675 break;
6676 default:
6677 pci_dir = PCI_DMA_BIDIRECTIONAL;
6678 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006679 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6680 return -1;
6681 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006682}
6683
6684/*
6685 * Map (physical) PCI mem into (virtual) kernel space
6686 */
6687static void __iomem *remap_pci_mem(ulong base, ulong size)
6688{
6689 ulong page_base = ((ulong) base) & PAGE_MASK;
6690 ulong page_offs = ((ulong) base) - page_base;
Stephen M. Cameron088ba34c2012-07-26 11:34:23 -05006691 void __iomem *page_remapped = ioremap_nocache(page_base,
6692 page_offs + size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006693
6694 return page_remapped ? (page_remapped + page_offs) : NULL;
6695}
6696
Matt Gates254f7962012-05-01 11:43:06 -05006697static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006698{
Matt Gates254f7962012-05-01 11:43:06 -05006699 return h->access.command_completed(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006700}
6701
Stephen M. Cameron900c5442010-02-04 08:42:35 -06006702static inline bool interrupt_pending(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006703{
6704 return h->access.intr_pending(h);
6705}
6706
6707static inline long interrupt_not_for_us(struct ctlr_info *h)
6708{
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006709 return (h->access.intr_pending(h) == 0) ||
6710 (h->interrupts_enabled == 0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006711}
6712
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06006713static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6714 u32 raw_tag)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006715{
6716 if (unlikely(tag_index >= h->nr_cmds)) {
6717 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6718 return 1;
6719 }
6720 return 0;
6721}
6722
Stephen M. Cameron5a3d16f2012-05-01 11:42:46 -05006723static inline void finish_cmd(struct CommandList *c)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006724{
Stephen M. Camerone85c5972012-05-01 11:43:42 -05006725 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
Scott Teelc3497752014-02-18 13:56:34 -06006726 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6727 || c->cmd_type == CMD_IOACCEL2))
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05006728 complete_scsi_command(c);
Stephen Cameron8be986c2015-04-23 09:34:06 -05006729 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006730 complete(c->waiting);
Stephen M. Camerona104c992010-02-04 08:42:24 -06006731}
6732
Don Brace303932f2010-02-04 08:42:40 -06006733/* process completion of an indexed ("direct lookup") command */
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05006734static inline void process_indexed_cmd(struct ctlr_info *h,
Don Brace303932f2010-02-04 08:42:40 -06006735 u32 raw_tag)
6736{
6737 u32 tag_index;
6738 struct CommandList *c;
6739
Don Bracef2405db2015-01-23 16:43:09 -06006740 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05006741 if (!bad_tag(h, tag_index, raw_tag)) {
6742 c = h->cmd_pool + tag_index;
6743 finish_cmd(c);
6744 }
Don Brace303932f2010-02-04 08:42:40 -06006745}
6746
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006747/* Some controllers, like p400, will give us one interrupt
6748 * after a soft reset, even if we turned interrupts off.
6749 * Only need to check for this in the hpsa_xxx_discard_completions
6750 * functions.
6751 */
6752static int ignore_bogus_interrupt(struct ctlr_info *h)
6753{
6754 if (likely(!reset_devices))
6755 return 0;
6756
6757 if (likely(h->interrupts_enabled))
6758 return 0;
6759
6760 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6761 "(known firmware bug.) Ignoring.\n");
6762
6763 return 1;
6764}
6765
Matt Gates254f7962012-05-01 11:43:06 -05006766/*
6767 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6768 * Relies on (h-q[x] == x) being true for x such that
6769 * 0 <= x < MAX_REPLY_QUEUES.
6770 */
6771static struct ctlr_info *queue_to_hba(u8 *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006772{
Matt Gates254f7962012-05-01 11:43:06 -05006773 return container_of((queue - *queue), struct ctlr_info, q[0]);
6774}
6775
6776static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6777{
6778 struct ctlr_info *h = queue_to_hba(queue);
6779 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006780 u32 raw_tag;
6781
6782 if (ignore_bogus_interrupt(h))
6783 return IRQ_NONE;
6784
6785 if (interrupt_not_for_us(h))
6786 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006787 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006788 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05006789 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006790 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05006791 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006792 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006793 return IRQ_HANDLED;
6794}
6795
Matt Gates254f7962012-05-01 11:43:06 -05006796static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006797{
Matt Gates254f7962012-05-01 11:43:06 -05006798 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006799 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05006800 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006801
6802 if (ignore_bogus_interrupt(h))
6803 return IRQ_NONE;
6804
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006805 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05006806 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006807 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05006808 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006809 return IRQ_HANDLED;
6810}
6811
Matt Gates254f7962012-05-01 11:43:06 -05006812static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006813{
Matt Gates254f7962012-05-01 11:43:06 -05006814 struct ctlr_info *h = queue_to_hba((u8 *) queue);
Don Brace303932f2010-02-04 08:42:40 -06006815 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05006816 u8 q = *(u8 *) queue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006817
6818 if (interrupt_not_for_us(h))
6819 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006820 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006821 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05006822 raw_tag = get_next_completion(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006823 while (raw_tag != FIFO_EMPTY) {
Don Bracef2405db2015-01-23 16:43:09 -06006824 process_indexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05006825 raw_tag = next_command(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006826 }
6827 }
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006828 return IRQ_HANDLED;
6829}
6830
Matt Gates254f7962012-05-01 11:43:06 -05006831static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006832{
Matt Gates254f7962012-05-01 11:43:06 -05006833 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006834 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05006835 u8 q = *(u8 *) queue;
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006836
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006837 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05006838 raw_tag = get_next_completion(h, q);
Don Brace303932f2010-02-04 08:42:40 -06006839 while (raw_tag != FIFO_EMPTY) {
Don Bracef2405db2015-01-23 16:43:09 -06006840 process_indexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05006841 raw_tag = next_command(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006842 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006843 return IRQ_HANDLED;
6844}
6845
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006846/* Send a message CDB to the firmware. Careful, this only works
6847 * in simple mode, not performant mode due to the tag lookup.
6848 * We only ever use this immediately after a controller reset.
6849 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006850static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6851 unsigned char type)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006852{
6853 struct Command {
6854 struct CommandListHeader CommandHeader;
6855 struct RequestBlock Request;
6856 struct ErrDescriptor ErrorDescriptor;
6857 };
6858 struct Command *cmd;
6859 static const size_t cmd_sz = sizeof(*cmd) +
6860 sizeof(cmd->ErrorDescriptor);
6861 dma_addr_t paddr64;
Don Brace2b08b3e2015-01-23 16:41:09 -06006862 __le32 paddr32;
6863 u32 tag;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006864 void __iomem *vaddr;
6865 int i, err;
6866
6867 vaddr = pci_ioremap_bar(pdev, 0);
6868 if (vaddr == NULL)
6869 return -ENOMEM;
6870
6871 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6872 * CCISS commands, so they must be allocated from the lower 4GiB of
6873 * memory.
6874 */
6875 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6876 if (err) {
6877 iounmap(vaddr);
Robert Elliott1eaec8f2015-01-23 16:42:37 -06006878 return err;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006879 }
6880
6881 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
6882 if (cmd == NULL) {
6883 iounmap(vaddr);
6884 return -ENOMEM;
6885 }
6886
6887 /* This must fit, because of the 32-bit consistent DMA mask. Also,
6888 * although there's no guarantee, we assume that the address is at
6889 * least 4-byte aligned (most likely, it's page-aligned).
6890 */
Don Brace2b08b3e2015-01-23 16:41:09 -06006891 paddr32 = cpu_to_le32(paddr64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006892
6893 cmd->CommandHeader.ReplyQueue = 0;
6894 cmd->CommandHeader.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006895 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
Don Brace2b08b3e2015-01-23 16:41:09 -06006896 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006897 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
6898
6899 cmd->Request.CDBLen = 16;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006900 cmd->Request.type_attr_dir =
6901 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006902 cmd->Request.Timeout = 0; /* Don't time out */
6903 cmd->Request.CDB[0] = opcode;
6904 cmd->Request.CDB[1] = type;
6905 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006906 cmd->ErrorDescriptor.Addr =
Don Brace2b08b3e2015-01-23 16:41:09 -06006907 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006908 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006909
Don Brace2b08b3e2015-01-23 16:41:09 -06006910 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006911
6912 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
6913 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
Don Brace2b08b3e2015-01-23 16:41:09 -06006914 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006915 break;
6916 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
6917 }
6918
6919 iounmap(vaddr);
6920
6921 /* we leak the DMA buffer here ... no choice since the controller could
6922 * still complete the command.
6923 */
6924 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
6925 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
6926 opcode, type);
6927 return -ETIMEDOUT;
6928 }
6929
6930 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
6931
6932 if (tag & HPSA_ERROR_BIT) {
6933 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
6934 opcode, type);
6935 return -EIO;
6936 }
6937
6938 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
6939 opcode, type);
6940 return 0;
6941}
6942
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006943#define hpsa_noop(p) hpsa_message(p, 3, 0)
6944
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006945static int hpsa_controller_hard_reset(struct pci_dev *pdev,
Don Brace42a91642014-11-14 17:26:27 -06006946 void __iomem *vaddr, u32 use_doorbell)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006947{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006948
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006949 if (use_doorbell) {
6950 /* For everything after the P600, the PCI power state method
6951 * of resetting the controller doesn't work, so we have this
6952 * other way using the doorbell register.
6953 */
6954 dev_info(&pdev->dev, "using doorbell to reset controller\n");
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006955 writel(use_doorbell, vaddr + SA5_DOORBELL);
Stephen M. Cameron85009232013-09-23 13:33:36 -05006956
Justin Lindley00701a92014-05-29 10:52:47 -05006957 /* PMC hardware guys tell us we need a 10 second delay after
Stephen M. Cameron85009232013-09-23 13:33:36 -05006958 * doorbell reset and before any attempt to talk to the board
6959 * at all to ensure that this actually works and doesn't fall
6960 * over in some weird corner cases.
6961 */
Justin Lindley00701a92014-05-29 10:52:47 -05006962 msleep(10000);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006963 } else { /* Try to do it the PCI power state way */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006964
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006965 /* Quoting from the Open CISS Specification: "The Power
6966 * Management Control/Status Register (CSR) controls the power
6967 * state of the device. The normal operating state is D0,
6968 * CSR=00h. The software off state is D3, CSR=03h. To reset
6969 * the controller, place the interface device in D3 then to D0,
6970 * this causes a secondary PCI reset which will reset the
6971 * controller." */
6972
Don Brace2662cab2015-01-23 16:41:25 -06006973 int rc = 0;
6974
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006975 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
Don Brace2662cab2015-01-23 16:41:25 -06006976
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006977 /* enter the D3hot power management state */
Don Brace2662cab2015-01-23 16:41:25 -06006978 rc = pci_set_power_state(pdev, PCI_D3hot);
6979 if (rc)
6980 return rc;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006981
6982 msleep(500);
6983
6984 /* enter the D0 power management state */
Don Brace2662cab2015-01-23 16:41:25 -06006985 rc = pci_set_power_state(pdev, PCI_D0);
6986 if (rc)
6987 return rc;
Mike Millerc4853ef2011-10-21 08:19:43 +02006988
6989 /*
6990 * The P600 requires a small delay when changing states.
6991 * Otherwise we may think the board did not reset and we bail.
6992 * This for kdump only and is particular to the P600.
6993 */
6994 msleep(500);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006995 }
6996 return 0;
6997}
6998
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006999static void init_driver_version(char *driver_version, int len)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05007000{
7001 memset(driver_version, 0, len);
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06007002 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
Stephen M. Cameron580ada32011-05-03 14:59:10 -05007003}
7004
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007005static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05007006{
7007 char *driver_version;
7008 int i, size = sizeof(cfgtable->driver_version);
7009
7010 driver_version = kmalloc(size, GFP_KERNEL);
7011 if (!driver_version)
7012 return -ENOMEM;
7013
7014 init_driver_version(driver_version, size);
7015 for (i = 0; i < size; i++)
7016 writeb(driver_version[i], &cfgtable->driver_version[i]);
7017 kfree(driver_version);
7018 return 0;
7019}
7020
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007021static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7022 unsigned char *driver_ver)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05007023{
7024 int i;
7025
7026 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7027 driver_ver[i] = readb(&cfgtable->driver_version[i]);
7028}
7029
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007030static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05007031{
7032
7033 char *driver_ver, *old_driver_ver;
7034 int rc, size = sizeof(cfgtable->driver_version);
7035
7036 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
7037 if (!old_driver_ver)
7038 return -ENOMEM;
7039 driver_ver = old_driver_ver + size;
7040
7041 /* After a reset, the 32 bytes of "driver version" in the cfgtable
7042 * should have been changed, otherwise we know the reset failed.
7043 */
7044 init_driver_version(old_driver_ver, size);
7045 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7046 rc = !memcmp(driver_ver, old_driver_ver, size);
7047 kfree(old_driver_ver);
7048 return rc;
7049}
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007050/* This does a hard reset of the controller using PCI power management
7051 * states or the using the doorbell register.
7052 */
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02007053static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007054{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007055 u64 cfg_offset;
7056 u32 cfg_base_addr;
7057 u64 cfg_base_addr_index;
7058 void __iomem *vaddr;
7059 unsigned long paddr;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05007060 u32 misc_fw_support;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06007061 int rc;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007062 struct CfgTable __iomem *cfgtable;
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05007063 u32 use_doorbell;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06007064 u16 command_register;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007065
7066 /* For controllers as old as the P600, this is very nearly
7067 * the same thing as
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007068 *
7069 * pci_save_state(pci_dev);
7070 * pci_set_power_state(pci_dev, PCI_D3hot);
7071 * pci_set_power_state(pci_dev, PCI_D0);
7072 * pci_restore_state(pci_dev);
7073 *
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007074 * For controllers newer than the P600, the pci power state
7075 * method of resetting doesn't work so we have another way
7076 * using the doorbell register.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007077 */
Stephen M. Cameron18867652010-06-16 13:51:45 -05007078
Robert Elliott60f923b2015-01-23 16:42:06 -06007079 if (!ctlr_is_resettable(board_id)) {
7080 dev_warn(&pdev->dev, "Controller not resettable\n");
Stephen M. Cameron25c1e56a2011-01-06 14:48:18 -06007081 return -ENODEV;
7082 }
Stephen M. Cameron46380782011-05-03 15:00:01 -05007083
7084 /* if controller is soft- but not hard resettable... */
7085 if (!ctlr_is_hard_resettable(board_id))
7086 return -ENOTSUPP; /* try soft reset later. */
Stephen M. Cameron18867652010-06-16 13:51:45 -05007087
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06007088 /* Save the PCI command register */
7089 pci_read_config_word(pdev, 4, &command_register);
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06007090 pci_save_state(pdev);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007091
7092 /* find the first memory BAR, so we can find the cfg table */
7093 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7094 if (rc)
7095 return rc;
7096 vaddr = remap_pci_mem(paddr, 0x250);
7097 if (!vaddr)
7098 return -ENOMEM;
7099
7100 /* find cfgtable in order to check if reset via doorbell is supported */
7101 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7102 &cfg_base_addr_index, &cfg_offset);
7103 if (rc)
7104 goto unmap_vaddr;
7105 cfgtable = remap_pci_mem(pci_resource_start(pdev,
7106 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7107 if (!cfgtable) {
7108 rc = -ENOMEM;
7109 goto unmap_vaddr;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007110 }
Stephen M. Cameron580ada32011-05-03 14:59:10 -05007111 rc = write_driver_ver_to_cfgtable(cfgtable);
7112 if (rc)
Tomas Henzl03741d92015-01-23 16:41:14 -06007113 goto unmap_cfgtable;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007114
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05007115 /* If reset via doorbell register is supported, use that.
7116 * There are two such methods. Favor the newest method.
7117 */
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007118 misc_fw_support = readl(&cfgtable->misc_fw_support);
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05007119 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7120 if (use_doorbell) {
7121 use_doorbell = DOORBELL_CTLR_RESET2;
7122 } else {
7123 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7124 if (use_doorbell) {
Stephen Cameron050f7142015-01-23 16:42:22 -06007125 dev_warn(&pdev->dev,
7126 "Soft reset not supported. Firmware update is required.\n");
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007127 rc = -ENOTSUPP; /* try soft reset */
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05007128 goto unmap_cfgtable;
7129 }
7130 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007131
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007132 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7133 if (rc)
7134 goto unmap_cfgtable;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007135
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06007136 pci_restore_state(pdev);
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06007137 pci_write_config_word(pdev, 4, command_register);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007138
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007139 /* Some devices (notably the HP Smart Array 5i Controller)
7140 need a little pause here */
7141 msleep(HPSA_POST_RESET_PAUSE_MSECS);
7142
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007143 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7144 if (rc) {
7145 dev_warn(&pdev->dev,
Stephen Cameron050f7142015-01-23 16:42:22 -06007146 "Failed waiting for board to become ready after hard reset\n");
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007147 goto unmap_cfgtable;
7148 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007149
Stephen M. Cameron580ada32011-05-03 14:59:10 -05007150 rc = controller_reset_failed(vaddr);
7151 if (rc < 0)
7152 goto unmap_cfgtable;
7153 if (rc) {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007154 dev_warn(&pdev->dev, "Unable to successfully reset "
7155 "controller. Will try soft reset.\n");
7156 rc = -ENOTSUPP;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05007157 } else {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007158 dev_info(&pdev->dev, "board ready after hard reset.\n");
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007159 }
7160
7161unmap_cfgtable:
7162 iounmap(cfgtable);
7163
7164unmap_vaddr:
7165 iounmap(vaddr);
7166 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007167}
7168
7169/*
7170 * We cannot read the structure directly, for portability we must use
7171 * the io functions.
7172 * This is for debug only.
7173 */
Don Brace42a91642014-11-14 17:26:27 -06007174static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007175{
Stephen M. Cameron58f86652010-05-27 15:13:58 -05007176#ifdef HPSA_DEBUG
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007177 int i;
7178 char temp_name[17];
7179
7180 dev_info(dev, "Controller Configuration information\n");
7181 dev_info(dev, "------------------------------------\n");
7182 for (i = 0; i < 4; i++)
7183 temp_name[i] = readb(&(tb->Signature[i]));
7184 temp_name[4] = '\0';
7185 dev_info(dev, " Signature = %s\n", temp_name);
7186 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
7187 dev_info(dev, " Transport methods supported = 0x%x\n",
7188 readl(&(tb->TransportSupport)));
7189 dev_info(dev, " Transport methods active = 0x%x\n",
7190 readl(&(tb->TransportActive)));
7191 dev_info(dev, " Requested transport Method = 0x%x\n",
7192 readl(&(tb->HostWrite.TransportRequest)));
7193 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
7194 readl(&(tb->HostWrite.CoalIntDelay)));
7195 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
7196 readl(&(tb->HostWrite.CoalIntCount)));
Robert Elliott69d6e332015-01-23 16:41:56 -06007197 dev_info(dev, " Max outstanding commands = %d\n",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007198 readl(&(tb->CmdsOutMax)));
7199 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7200 for (i = 0; i < 16; i++)
7201 temp_name[i] = readb(&(tb->ServerName[i]));
7202 temp_name[16] = '\0';
7203 dev_info(dev, " Server Name = %s\n", temp_name);
7204 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
7205 readl(&(tb->HeartBeat)));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007206#endif /* HPSA_DEBUG */
Stephen M. Cameron58f86652010-05-27 15:13:58 -05007207}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007208
7209static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7210{
7211 int i, offset, mem_type, bar_type;
7212
7213 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
7214 return 0;
7215 offset = 0;
7216 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7217 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7218 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7219 offset += 4;
7220 else {
7221 mem_type = pci_resource_flags(pdev, i) &
7222 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7223 switch (mem_type) {
7224 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7225 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7226 offset += 4; /* 32 bit */
7227 break;
7228 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7229 offset += 8;
7230 break;
7231 default: /* reserved in PCI 2.2 */
7232 dev_warn(&pdev->dev,
7233 "base address is invalid\n");
7234 return -1;
7235 break;
7236 }
7237 }
7238 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7239 return i + 1;
7240 }
7241 return -1;
7242}
7243
Robert Elliottcc64c812015-04-23 09:33:12 -05007244static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7245{
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007246 pci_free_irq_vectors(h->pdev);
7247 h->msix_vectors = 0;
Robert Elliottcc64c812015-04-23 09:33:12 -05007248}
7249
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007250/* If MSI/MSI-X is supported by the kernel we will try to enable it on
Stephen Cameron050f7142015-01-23 16:42:22 -06007251 * controllers that are capable. If not, we use legacy INTx mode.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007252 */
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007253static int hpsa_interrupt_mode(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007254{
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007255 unsigned int flags = PCI_IRQ_LEGACY;
7256 int ret;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007257
7258 /* Some boards advertise MSI but don't really support it */
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007259 switch (h->board_id) {
7260 case 0x40700E11:
7261 case 0x40800E11:
7262 case 0x40820E11:
7263 case 0x40830E11:
7264 break;
7265 default:
7266 ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
7267 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
7268 if (ret > 0) {
7269 h->msix_vectors = ret;
7270 return 0;
Hannes Reineckeeee0f032014-01-15 13:30:53 +01007271 }
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007272
7273 flags |= PCI_IRQ_MSI;
7274 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007275 }
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007276
7277 ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
7278 if (ret < 0)
7279 return ret;
7280 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007281}
7282
Hannes Reinecke135ae6e2017-08-15 08:58:04 +02007283static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
7284 bool *legacy_board)
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05007285{
7286 int i;
7287 u32 subsystem_vendor_id, subsystem_device_id;
7288
7289 subsystem_vendor_id = pdev->subsystem_vendor;
7290 subsystem_device_id = pdev->subsystem_device;
7291 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7292 subsystem_vendor_id;
7293
Hannes Reinecke135ae6e2017-08-15 08:58:04 +02007294 if (legacy_board)
7295 *legacy_board = false;
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05007296 for (i = 0; i < ARRAY_SIZE(products); i++)
Hannes Reinecke135ae6e2017-08-15 08:58:04 +02007297 if (*board_id == products[i].board_id) {
7298 if (products[i].access != &SA5A_access &&
7299 products[i].access != &SA5B_access)
7300 return i;
7301 if (hpsa_allow_any) {
7302 dev_warn(&pdev->dev,
7303 "legacy board ID: 0x%08x\n",
7304 *board_id);
7305 if (legacy_board)
7306 *legacy_board = true;
7307 return i;
7308 }
7309 }
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05007310
Stephen M. Cameron6798cc02010-06-16 13:51:20 -05007311 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
7312 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
7313 !hpsa_allow_any) {
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05007314 dev_warn(&pdev->dev, "unrecognized board ID: "
7315 "0x%08x, ignoring.\n", *board_id);
7316 return -ENODEV;
7317 }
Hannes Reinecke135ae6e2017-08-15 08:58:04 +02007318 if (legacy_board)
7319 *legacy_board = true;
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05007320 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7321}
7322
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007323static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7324 unsigned long *memory_bar)
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05007325{
7326 int i;
7327
7328 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05007329 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05007330 /* addressing mode bits already removed */
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05007331 *memory_bar = pci_resource_start(pdev, i);
7332 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05007333 *memory_bar);
7334 return 0;
7335 }
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05007336 dev_warn(&pdev->dev, "no memory BAR found\n");
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05007337 return -ENODEV;
7338}
7339
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007340static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7341 int wait_for_ready)
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05007342{
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007343 int i, iterations;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05007344 u32 scratchpad;
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007345 if (wait_for_ready)
7346 iterations = HPSA_BOARD_READY_ITERATIONS;
7347 else
7348 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05007349
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007350 for (i = 0; i < iterations; i++) {
7351 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7352 if (wait_for_ready) {
7353 if (scratchpad == HPSA_FIRMWARE_READY)
7354 return 0;
7355 } else {
7356 if (scratchpad != HPSA_FIRMWARE_READY)
7357 return 0;
7358 }
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05007359 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7360 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007361 dev_warn(&pdev->dev, "board not ready, timed out.\n");
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05007362 return -ENODEV;
7363}
7364
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007365static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7366 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7367 u64 *cfg_offset)
Stephen M. Camerona51fd472010-06-16 13:51:30 -05007368{
7369 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7370 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7371 *cfg_base_addr &= (u32) 0x0000ffff;
7372 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7373 if (*cfg_base_addr_index == -1) {
7374 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7375 return -ENODEV;
7376 }
7377 return 0;
7378}
7379
Robert Elliott195f2c62015-04-23 09:33:17 -05007380static void hpsa_free_cfgtables(struct ctlr_info *h)
7381{
Robert Elliott105a3db2015-04-23 09:33:48 -05007382 if (h->transtable) {
Robert Elliott195f2c62015-04-23 09:33:17 -05007383 iounmap(h->transtable);
Robert Elliott105a3db2015-04-23 09:33:48 -05007384 h->transtable = NULL;
7385 }
7386 if (h->cfgtable) {
Robert Elliott195f2c62015-04-23 09:33:17 -05007387 iounmap(h->cfgtable);
Robert Elliott105a3db2015-04-23 09:33:48 -05007388 h->cfgtable = NULL;
7389 }
Robert Elliott195f2c62015-04-23 09:33:17 -05007390}
7391
7392/* Find and map CISS config table and transfer table
7393+ * several items must be unmapped (freed) later
7394+ * */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007395static int hpsa_find_cfgtables(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007396{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06007397 u64 cfg_offset;
7398 u32 cfg_base_addr;
7399 u64 cfg_base_addr_index;
Don Brace303932f2010-02-04 08:42:40 -06007400 u32 trans_offset;
Stephen M. Camerona51fd472010-06-16 13:51:30 -05007401 int rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007402
Stephen M. Camerona51fd472010-06-16 13:51:30 -05007403 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7404 &cfg_base_addr_index, &cfg_offset);
7405 if (rc)
7406 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007407 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
Stephen M. Camerona51fd472010-06-16 13:51:30 -05007408 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
Robert Elliottcd3c81c2015-01-23 16:42:27 -06007409 if (!h->cfgtable) {
7410 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007411 return -ENOMEM;
Robert Elliottcd3c81c2015-01-23 16:42:27 -06007412 }
Stephen M. Cameron580ada32011-05-03 14:59:10 -05007413 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7414 if (rc)
7415 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007416 /* Find performant mode table. */
Stephen M. Camerona51fd472010-06-16 13:51:30 -05007417 trans_offset = readl(&h->cfgtable->TransMethodOffset);
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007418 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7419 cfg_base_addr_index)+cfg_offset+trans_offset,
7420 sizeof(*h->transtable));
Robert Elliott195f2c62015-04-23 09:33:17 -05007421 if (!h->transtable) {
7422 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7423 hpsa_free_cfgtables(h);
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007424 return -ENOMEM;
Robert Elliott195f2c62015-04-23 09:33:17 -05007425 }
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007426 return 0;
7427}
7428
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007429static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05007430{
Stephen Cameron41ce4c32015-04-23 09:31:47 -05007431#define MIN_MAX_COMMANDS 16
7432 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7433
7434 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
Stephen M. Cameron72ceeae2011-01-06 14:48:13 -06007435
7436 /* Limit commands in memory limited kdump scenario. */
7437 if (reset_devices && h->max_commands > 32)
7438 h->max_commands = 32;
7439
Stephen Cameron41ce4c32015-04-23 09:31:47 -05007440 if (h->max_commands < MIN_MAX_COMMANDS) {
7441 dev_warn(&h->pdev->dev,
7442 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7443 h->max_commands,
7444 MIN_MAX_COMMANDS);
7445 h->max_commands = MIN_MAX_COMMANDS;
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05007446 }
7447}
7448
Webb Scalesc7ee65b2015-01-23 16:42:17 -06007449/* If the controller reports that the total max sg entries is greater than 512,
7450 * then we know that chained SG blocks work. (Original smart arrays did not
7451 * support chained SG blocks and would return zero for max sg entries.)
7452 */
7453static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7454{
7455 return h->maxsgentries > 512;
7456}
7457
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007458/* Interrogate the hardware for some limits:
7459 * max commands, max SG elements without chaining, and with chaining,
7460 * SG chain block size, etc.
7461 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007462static void hpsa_find_board_params(struct ctlr_info *h)
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007463{
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05007464 hpsa_get_max_perf_mode_cmds(h);
Stephen Cameron45fcb862015-01-23 16:43:04 -06007465 h->nr_cmds = h->max_commands;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007466 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007467 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
Webb Scalesc7ee65b2015-01-23 16:42:17 -06007468 if (hpsa_supports_chained_sg_blocks(h)) {
7469 /* Limit in-command s/g elements to 32 save dma'able memory. */
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007470 h->max_cmd_sg_entries = 32;
Webb Scales1a63ea62014-11-14 17:26:43 -06007471 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007472 h->maxsgentries--; /* save one for chain pointer */
7473 } else {
Webb Scalesc7ee65b2015-01-23 16:42:17 -06007474 /*
7475 * Original smart arrays supported at most 31 s/g entries
7476 * embedded inline in the command (trying to use more
7477 * would lock up the controller)
7478 */
7479 h->max_cmd_sg_entries = 31;
Webb Scales1a63ea62014-11-14 17:26:43 -06007480 h->maxsgentries = 31; /* default to traditional values */
Webb Scalesc7ee65b2015-01-23 16:42:17 -06007481 h->chainsize = 0;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007482 }
Stephen M. Cameron75167d22012-05-01 11:42:51 -05007483
7484 /* Find out what task management functions are supported and cache */
7485 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
Scott Teel0e7a7fc2014-02-18 13:55:59 -06007486 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7487 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7488 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7489 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
Stephen Cameron8be986c2015-04-23 09:34:06 -05007490 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7491 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007492}
7493
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05007494static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7495{
Akinobu Mita0fc9fd42012-04-04 22:14:59 +09007496 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06007497 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05007498 return false;
7499 }
7500 return true;
7501}
7502
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007503static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05007504{
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007505 u32 driver_support;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05007506
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007507 driver_support = readl(&(h->cfgtable->driver_support));
Arnd Bergmann0b9e7b72014-06-26 15:44:52 +02007508 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7509#ifdef CONFIG_X86
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007510 driver_support |= ENABLE_SCSI_PREFETCH;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05007511#endif
Stephen M. Cameron28e13442013-12-04 17:10:21 -06007512 driver_support |= ENABLE_UNIT_ATTN;
7513 writel(driver_support, &(h->cfgtable->driver_support));
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05007514}
7515
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05007516/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7517 * in a prefetch beyond physical memory.
7518 */
7519static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7520{
7521 u32 dma_prefetch;
7522
7523 if (h->board_id != 0x3225103C)
7524 return;
7525 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7526 dma_prefetch |= 0x8000;
7527 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7528}
7529
Robert Elliottc706a792015-01-23 16:45:01 -06007530static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007531{
7532 int i;
7533 u32 doorbell_value;
7534 unsigned long flags;
7535 /* wait until the clear_event_notify bit 6 is cleared by controller. */
Robert Elliott007e7aa2015-01-23 16:44:56 -06007536 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007537 spin_lock_irqsave(&h->lock, flags);
7538 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7539 spin_unlock_irqrestore(&h->lock, flags);
7540 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
Robert Elliottc706a792015-01-23 16:45:01 -06007541 goto done;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007542 /* delay and try again */
Robert Elliott007e7aa2015-01-23 16:44:56 -06007543 msleep(CLEAR_EVENT_WAIT_INTERVAL);
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007544 }
Robert Elliottc706a792015-01-23 16:45:01 -06007545 return -ENODEV;
7546done:
7547 return 0;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007548}
7549
Robert Elliottc706a792015-01-23 16:45:01 -06007550static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007551{
7552 int i;
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06007553 u32 doorbell_value;
7554 unsigned long flags;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007555
7556 /* under certain very rare conditions, this can take awhile.
7557 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7558 * as we enter this code.)
7559 */
Robert Elliott007e7aa2015-01-23 16:44:56 -06007560 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
Webb Scales25163bd2015-04-23 09:32:00 -05007561 if (h->remove_in_progress)
7562 goto done;
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06007563 spin_lock_irqsave(&h->lock, flags);
7564 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7565 spin_unlock_irqrestore(&h->lock, flags);
Dan Carpenter382be662011-02-15 15:33:13 -06007566 if (!(doorbell_value & CFGTBL_ChangeReq))
Robert Elliottc706a792015-01-23 16:45:01 -06007567 goto done;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007568 /* delay and try again */
Robert Elliott007e7aa2015-01-23 16:44:56 -06007569 msleep(MODE_CHANGE_WAIT_INTERVAL);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007570 }
Robert Elliottc706a792015-01-23 16:45:01 -06007571 return -ENODEV;
7572done:
7573 return 0;
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007574}
7575
Robert Elliottc706a792015-01-23 16:45:01 -06007576/* return -ENODEV or other reason on error, 0 on success */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007577static int hpsa_enter_simple_mode(struct ctlr_info *h)
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007578{
7579 u32 trans_support;
7580
7581 trans_support = readl(&(h->cfgtable->TransportSupport));
7582 if (!(trans_support & SIMPLE_MODE))
7583 return -ENOTSUPP;
7584
7585 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007586
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007587 /* Update the field, and then ring the doorbell */
7588 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007589 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007590 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Robert Elliottc706a792015-01-23 16:45:01 -06007591 if (hpsa_wait_for_mode_change_ack(h))
7592 goto error;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007593 print_cfg_table(&h->pdev->dev, h->cfgtable);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007594 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7595 goto error;
Stephen M. Cameron960a30e72011-02-15 15:33:03 -06007596 h->transMethod = CFGTBL_Trans_Simple;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007597 return 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007598error:
Stephen Cameron050f7142015-01-23 16:42:22 -06007599 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007600 return -ENODEV;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007601}
7602
Robert Elliott195f2c62015-04-23 09:33:17 -05007603/* free items allocated or mapped by hpsa_pci_init */
7604static void hpsa_free_pci_init(struct ctlr_info *h)
7605{
7606 hpsa_free_cfgtables(h); /* pci_init 4 */
7607 iounmap(h->vaddr); /* pci_init 3 */
Robert Elliott105a3db2015-04-23 09:33:48 -05007608 h->vaddr = NULL;
Robert Elliott195f2c62015-04-23 09:33:17 -05007609 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
Robert Elliott943a7022015-04-23 09:34:32 -05007610 /*
7611 * call pci_disable_device before pci_release_regions per
7612 * Documentation/PCI/pci.txt
7613 */
Robert Elliott195f2c62015-04-23 09:33:17 -05007614 pci_disable_device(h->pdev); /* pci_init 1 */
Robert Elliott943a7022015-04-23 09:34:32 -05007615 pci_release_regions(h->pdev); /* pci_init 2 */
Robert Elliott195f2c62015-04-23 09:33:17 -05007616}
7617
7618/* several items must be freed later */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007619static int hpsa_pci_init(struct ctlr_info *h)
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007620{
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007621 int prod_index, err;
Hannes Reinecke135ae6e2017-08-15 08:58:04 +02007622 bool legacy_board;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007623
Hannes Reinecke135ae6e2017-08-15 08:58:04 +02007624 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05007625 if (prod_index < 0)
Robert Elliott60f923b2015-01-23 16:42:06 -06007626 return prod_index;
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05007627 h->product_name = products[prod_index].product_name;
7628 h->access = *(products[prod_index].access);
Hannes Reinecke135ae6e2017-08-15 08:58:04 +02007629 h->legacy_board = legacy_board;
Matthew Garrette5a44df2011-11-11 11:14:23 -05007630 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7631 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7632
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05007633 err = pci_enable_device(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007634 if (err) {
Robert Elliott195f2c62015-04-23 09:33:17 -05007635 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
Robert Elliott943a7022015-04-23 09:34:32 -05007636 pci_disable_device(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007637 return err;
7638 }
7639
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06007640 err = pci_request_regions(h->pdev, HPSA);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007641 if (err) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05007642 dev_err(&h->pdev->dev,
Robert Elliott195f2c62015-04-23 09:33:17 -05007643 "failed to obtain PCI resources\n");
Robert Elliott943a7022015-04-23 09:34:32 -05007644 pci_disable_device(h->pdev);
7645 return err;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007646 }
Robert Elliott4fa604e2014-11-14 17:27:24 -06007647
7648 pci_set_master(h->pdev);
7649
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007650 err = hpsa_interrupt_mode(h);
7651 if (err)
7652 goto clean1;
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05007653 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05007654 if (err)
Robert Elliott195f2c62015-04-23 09:33:17 -05007655 goto clean2; /* intmode+region, pci */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007656 h->vaddr = remap_pci_mem(h->paddr, 0x250);
Stephen M. Cameron204892e2010-05-27 15:13:22 -05007657 if (!h->vaddr) {
Robert Elliott195f2c62015-04-23 09:33:17 -05007658 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
Stephen M. Cameron204892e2010-05-27 15:13:22 -05007659 err = -ENOMEM;
Robert Elliott195f2c62015-04-23 09:33:17 -05007660 goto clean2; /* intmode+region, pci */
Stephen M. Cameron204892e2010-05-27 15:13:22 -05007661 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007662 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05007663 if (err)
Robert Elliott195f2c62015-04-23 09:33:17 -05007664 goto clean3; /* vaddr, intmode+region, pci */
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007665 err = hpsa_find_cfgtables(h);
7666 if (err)
Robert Elliott195f2c62015-04-23 09:33:17 -05007667 goto clean3; /* vaddr, intmode+region, pci */
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007668 hpsa_find_board_params(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007669
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05007670 if (!hpsa_CISS_signature_present(h)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007671 err = -ENODEV;
Robert Elliott195f2c62015-04-23 09:33:17 -05007672 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007673 }
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007674 hpsa_set_driver_support_bits(h);
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05007675 hpsa_p600_dma_prefetch_quirk(h);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007676 err = hpsa_enter_simple_mode(h);
7677 if (err)
Robert Elliott195f2c62015-04-23 09:33:17 -05007678 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007679 return 0;
7680
Robert Elliott195f2c62015-04-23 09:33:17 -05007681clean4: /* cfgtables, vaddr, intmode+region, pci */
7682 hpsa_free_cfgtables(h);
7683clean3: /* vaddr, intmode+region, pci */
7684 iounmap(h->vaddr);
Robert Elliott105a3db2015-04-23 09:33:48 -05007685 h->vaddr = NULL;
Robert Elliott195f2c62015-04-23 09:33:17 -05007686clean2: /* intmode+region, pci */
7687 hpsa_disable_interrupt_mode(h);
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007688clean1:
Robert Elliott943a7022015-04-23 09:34:32 -05007689 /*
7690 * call pci_disable_device before pci_release_regions per
7691 * Documentation/PCI/pci.txt
7692 */
Robert Elliott195f2c62015-04-23 09:33:17 -05007693 pci_disable_device(h->pdev);
Robert Elliott943a7022015-04-23 09:34:32 -05007694 pci_release_regions(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007695 return err;
7696}
7697
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007698static void hpsa_hba_inquiry(struct ctlr_info *h)
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06007699{
7700 int rc;
7701
7702#define HBA_INQUIRY_BYTE_COUNT 64
7703 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7704 if (!h->hba_inquiry_data)
7705 return;
7706 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7707 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7708 if (rc != 0) {
7709 kfree(h->hba_inquiry_data);
7710 h->hba_inquiry_data = NULL;
7711 }
7712}
7713
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02007714static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007715{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007716 int rc, i;
Tomas Henzl3b747292015-01-23 16:41:20 -06007717 void __iomem *vaddr;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007718
7719 if (!reset_devices)
7720 return 0;
7721
Tomas Henzl132aa222014-08-14 16:12:39 +02007722 /* kdump kernel is loading, we don't know in which state is
7723 * the pci interface. The dev->enable_cnt is equal zero
7724 * so we call enable+disable, wait a while and switch it on.
7725 */
7726 rc = pci_enable_device(pdev);
7727 if (rc) {
7728 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7729 return -ENODEV;
7730 }
7731 pci_disable_device(pdev);
7732 msleep(260); /* a randomly chosen number */
7733 rc = pci_enable_device(pdev);
7734 if (rc) {
7735 dev_warn(&pdev->dev, "failed to enable device.\n");
7736 return -ENODEV;
7737 }
Robert Elliott4fa604e2014-11-14 17:27:24 -06007738
Tomas Henzl859c75a2014-09-12 14:44:15 +02007739 pci_set_master(pdev);
Robert Elliott4fa604e2014-11-14 17:27:24 -06007740
Tomas Henzl3b747292015-01-23 16:41:20 -06007741 vaddr = pci_ioremap_bar(pdev, 0);
7742 if (vaddr == NULL) {
7743 rc = -ENOMEM;
7744 goto out_disable;
7745 }
7746 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7747 iounmap(vaddr);
7748
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007749 /* Reset the controller with a PCI power-cycle or via doorbell */
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02007750 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007751
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007752 /* -ENOTSUPP here means we cannot reset the controller
7753 * but it's already (and still) up and running in
Stephen M. Cameron18867652010-06-16 13:51:45 -05007754 * "performant mode". Or, it might be 640x, which can't reset
7755 * due to concerns about shared bbwc between 6402/6404 pair.
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007756 */
Robert Elliottadf1b3a2015-01-23 16:42:01 -06007757 if (rc)
Tomas Henzl132aa222014-08-14 16:12:39 +02007758 goto out_disable;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007759
7760 /* Now try to get the controller to respond to a no-op */
Robert Elliott1ba66c92015-01-23 16:42:11 -06007761 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007762 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7763 if (hpsa_noop(pdev) == 0)
7764 break;
7765 else
7766 dev_warn(&pdev->dev, "no-op failed%s\n",
7767 (i < 11 ? "; re-trying" : ""));
7768 }
Tomas Henzl132aa222014-08-14 16:12:39 +02007769
7770out_disable:
7771
7772 pci_disable_device(pdev);
7773 return rc;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007774}
7775
Robert Elliott1fb7c982015-04-23 09:33:22 -05007776static void hpsa_free_cmd_pool(struct ctlr_info *h)
7777{
7778 kfree(h->cmd_pool_bits);
Robert Elliott105a3db2015-04-23 09:33:48 -05007779 h->cmd_pool_bits = NULL;
7780 if (h->cmd_pool) {
Robert Elliott1fb7c982015-04-23 09:33:22 -05007781 pci_free_consistent(h->pdev,
7782 h->nr_cmds * sizeof(struct CommandList),
7783 h->cmd_pool,
7784 h->cmd_pool_dhandle);
Robert Elliott105a3db2015-04-23 09:33:48 -05007785 h->cmd_pool = NULL;
7786 h->cmd_pool_dhandle = 0;
7787 }
7788 if (h->errinfo_pool) {
Robert Elliott1fb7c982015-04-23 09:33:22 -05007789 pci_free_consistent(h->pdev,
7790 h->nr_cmds * sizeof(struct ErrorInfo),
7791 h->errinfo_pool,
7792 h->errinfo_pool_dhandle);
Robert Elliott105a3db2015-04-23 09:33:48 -05007793 h->errinfo_pool = NULL;
7794 h->errinfo_pool_dhandle = 0;
7795 }
Robert Elliott1fb7c982015-04-23 09:33:22 -05007796}
7797
Robert Elliottd37ffbe2015-04-23 09:32:27 -05007798static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007799{
7800 h->cmd_pool_bits = kzalloc(
7801 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7802 sizeof(unsigned long), GFP_KERNEL);
7803 h->cmd_pool = pci_alloc_consistent(h->pdev,
7804 h->nr_cmds * sizeof(*h->cmd_pool),
7805 &(h->cmd_pool_dhandle));
7806 h->errinfo_pool = pci_alloc_consistent(h->pdev,
7807 h->nr_cmds * sizeof(*h->errinfo_pool),
7808 &(h->errinfo_pool_dhandle));
7809 if ((h->cmd_pool_bits == NULL)
7810 || (h->cmd_pool == NULL)
7811 || (h->errinfo_pool == NULL)) {
7812 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
Robert Elliott2c143342015-01-23 16:42:48 -06007813 goto clean_up;
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007814 }
Stephen Cameron360c73b2015-04-23 09:32:32 -05007815 hpsa_preinitialize_commands(h);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007816 return 0;
Robert Elliott2c143342015-01-23 16:42:48 -06007817clean_up:
7818 hpsa_free_cmd_pool(h);
7819 return -ENOMEM;
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007820}
7821
Robert Elliottec501a12015-01-23 16:41:40 -06007822/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7823static void hpsa_free_irqs(struct ctlr_info *h)
7824{
7825 int i;
7826
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007827 if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
Robert Elliottec501a12015-01-23 16:41:40 -06007828 /* Single reply queue, only one irq to free */
Colin Ian King7dc62d92016-11-14 12:59:35 +00007829 free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007830 h->q[h->intr_mode] = 0;
Robert Elliottec501a12015-01-23 16:41:40 -06007831 return;
7832 }
7833
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007834 for (i = 0; i < h->msix_vectors; i++) {
7835 free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
Robert Elliott105a3db2015-04-23 09:33:48 -05007836 h->q[i] = 0;
Robert Elliottec501a12015-01-23 16:41:40 -06007837 }
Robert Elliotta4e17fc2015-01-23 16:41:51 -06007838 for (; i < MAX_REPLY_QUEUES; i++)
7839 h->q[i] = 0;
Robert Elliottec501a12015-01-23 16:41:40 -06007840}
7841
Robert Elliott9ee61792015-01-23 16:42:32 -06007842/* returns 0 on success; cleans up and returns -Enn on error */
7843static int hpsa_request_irqs(struct ctlr_info *h,
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007844 irqreturn_t (*msixhandler)(int, void *),
7845 irqreturn_t (*intxhandler)(int, void *))
7846{
Matt Gates254f7962012-05-01 11:43:06 -05007847 int rc, i;
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007848
Matt Gates254f7962012-05-01 11:43:06 -05007849 /*
7850 * initialize h->q[x] = x so that interrupt handlers know which
7851 * queue to process.
7852 */
7853 for (i = 0; i < MAX_REPLY_QUEUES; i++)
7854 h->q[i] = (u8) i;
7855
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007856 if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
Matt Gates254f7962012-05-01 11:43:06 -05007857 /* If performant mode and MSI-X, use multiple reply queues */
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007858 for (i = 0; i < h->msix_vectors; i++) {
Robert Elliott8b470042015-04-23 09:34:58 -05007859 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007860 rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
Robert Elliott8b470042015-04-23 09:34:58 -05007861 0, h->intrname[i],
Matt Gates254f7962012-05-01 11:43:06 -05007862 &h->q[i]);
Robert Elliotta4e17fc2015-01-23 16:41:51 -06007863 if (rc) {
7864 int j;
7865
7866 dev_err(&h->pdev->dev,
7867 "failed to get irq %d for %s\n",
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007868 pci_irq_vector(h->pdev, i), h->devname);
Robert Elliotta4e17fc2015-01-23 16:41:51 -06007869 for (j = 0; j < i; j++) {
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007870 free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
Robert Elliotta4e17fc2015-01-23 16:41:51 -06007871 h->q[j] = 0;
7872 }
7873 for (; j < MAX_REPLY_QUEUES; j++)
7874 h->q[j] = 0;
7875 return rc;
7876 }
7877 }
Matt Gates254f7962012-05-01 11:43:06 -05007878 } else {
7879 /* Use single reply pool */
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007880 if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
7881 sprintf(h->intrname[0], "%s-msi%s", h->devname,
7882 h->msix_vectors ? "x" : "");
7883 rc = request_irq(pci_irq_vector(h->pdev, 0),
Robert Elliott8b470042015-04-23 09:34:58 -05007884 msixhandler, 0,
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007885 h->intrname[0],
Matt Gates254f7962012-05-01 11:43:06 -05007886 &h->q[h->intr_mode]);
7887 } else {
Robert Elliott8b470042015-04-23 09:34:58 -05007888 sprintf(h->intrname[h->intr_mode],
7889 "%s-intx", h->devname);
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007890 rc = request_irq(pci_irq_vector(h->pdev, 0),
Robert Elliott8b470042015-04-23 09:34:58 -05007891 intxhandler, IRQF_SHARED,
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007892 h->intrname[0],
Matt Gates254f7962012-05-01 11:43:06 -05007893 &h->q[h->intr_mode]);
7894 }
7895 }
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007896 if (rc) {
Robert Elliott195f2c62015-04-23 09:33:17 -05007897 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08007898 pci_irq_vector(h->pdev, 0), h->devname);
Robert Elliott195f2c62015-04-23 09:33:17 -05007899 hpsa_free_irqs(h);
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007900 return -ENODEV;
7901 }
7902 return 0;
7903}
7904
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007905static int hpsa_kdump_soft_reset(struct ctlr_info *h)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007906{
Robert Elliott39c53f52015-04-23 09:35:14 -05007907 int rc;
Robert Elliottbf43caf2015-04-23 09:33:38 -05007908 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007909
7910 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
Robert Elliott39c53f52015-04-23 09:35:14 -05007911 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
7912 if (rc) {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007913 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
Robert Elliott39c53f52015-04-23 09:35:14 -05007914 return rc;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007915 }
7916
7917 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
Robert Elliott39c53f52015-04-23 09:35:14 -05007918 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7919 if (rc) {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007920 dev_warn(&h->pdev->dev, "Board failed to become ready "
7921 "after soft reset.\n");
Robert Elliott39c53f52015-04-23 09:35:14 -05007922 return rc;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007923 }
7924
7925 return 0;
7926}
7927
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007928static void hpsa_free_reply_queues(struct ctlr_info *h)
7929{
7930 int i;
7931
7932 for (i = 0; i < h->nreply_queues; i++) {
7933 if (!h->reply_queue[i].head)
7934 continue;
Robert Elliott1fb7c982015-04-23 09:33:22 -05007935 pci_free_consistent(h->pdev,
7936 h->reply_queue_size,
7937 h->reply_queue[i].head,
7938 h->reply_queue[i].busaddr);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007939 h->reply_queue[i].head = NULL;
7940 h->reply_queue[i].busaddr = 0;
7941 }
Robert Elliott105a3db2015-04-23 09:33:48 -05007942 h->reply_queue_size = 0;
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007943}
7944
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05007945static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
7946{
Robert Elliott105a3db2015-04-23 09:33:48 -05007947 hpsa_free_performant_mode(h); /* init_one 7 */
7948 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
7949 hpsa_free_cmd_pool(h); /* init_one 5 */
7950 hpsa_free_irqs(h); /* init_one 4 */
Robert Elliott2946e822015-04-23 09:35:09 -05007951 scsi_host_put(h->scsi_host); /* init_one 3 */
7952 h->scsi_host = NULL; /* init_one 3 */
7953 hpsa_free_pci_init(h); /* init_one 2_5 */
Robert Elliott9ecd9532015-04-23 09:34:43 -05007954 free_percpu(h->lockup_detected); /* init_one 2 */
7955 h->lockup_detected = NULL; /* init_one 2 */
7956 if (h->resubmit_wq) {
7957 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
7958 h->resubmit_wq = NULL;
7959 }
7960 if (h->rescan_ctlr_wq) {
7961 destroy_workqueue(h->rescan_ctlr_wq);
7962 h->rescan_ctlr_wq = NULL;
7963 }
Robert Elliott105a3db2015-04-23 09:33:48 -05007964 kfree(h); /* init_one 1 */
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007965}
7966
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007967/* Called when controller lockup detected. */
Don Bracef2405db2015-01-23 16:43:09 -06007968static void fail_all_outstanding_cmds(struct ctlr_info *h)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007969{
Webb Scales281a7fd2015-01-23 16:43:35 -06007970 int i, refcount;
7971 struct CommandList *c;
Webb Scales25163bd2015-04-23 09:32:00 -05007972 int failcount = 0;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007973
Don Brace080ef1c2015-01-23 16:43:25 -06007974 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
Don Bracef2405db2015-01-23 16:43:09 -06007975 for (i = 0; i < h->nr_cmds; i++) {
Don Bracef2405db2015-01-23 16:43:09 -06007976 c = h->cmd_pool + i;
Webb Scales281a7fd2015-01-23 16:43:35 -06007977 refcount = atomic_inc_return(&c->refcount);
7978 if (refcount > 1) {
Webb Scales25163bd2015-04-23 09:32:00 -05007979 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
Webb Scales281a7fd2015-01-23 16:43:35 -06007980 finish_cmd(c);
Stephen Cameron433b5f42015-04-23 09:32:11 -05007981 atomic_dec(&h->commands_outstanding);
Webb Scales25163bd2015-04-23 09:32:00 -05007982 failcount++;
Webb Scales281a7fd2015-01-23 16:43:35 -06007983 }
7984 cmd_free(h, c);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007985 }
Webb Scales25163bd2015-04-23 09:32:00 -05007986 dev_warn(&h->pdev->dev,
7987 "failed %d commands in fail_all\n", failcount);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007988}
7989
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007990static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
7991{
Rusty Russellc8ed0012015-03-05 10:49:19 +10307992 int cpu;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007993
Rusty Russellc8ed0012015-03-05 10:49:19 +10307994 for_each_online_cpu(cpu) {
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007995 u32 *lockup_detected;
7996 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
7997 *lockup_detected = value;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007998 }
7999 wmb(); /* be sure the per-cpu variables are out to memory */
8000}
8001
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008002static void controller_lockup_detected(struct ctlr_info *h)
8003{
8004 unsigned long flags;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05008005 u32 lockup_detected;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008006
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008007 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8008 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05008009 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8010 if (!lockup_detected) {
8011 /* no heartbeat, but controller gave us a zero. */
8012 dev_warn(&h->pdev->dev,
Webb Scales25163bd2015-04-23 09:32:00 -05008013 "lockup detected after %d but scratchpad register is zero\n",
8014 h->heartbeat_sample_interval / HZ);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05008015 lockup_detected = 0xffffffff;
8016 }
8017 set_lockup_detected_for_all_cpus(h, lockup_detected);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008018 spin_unlock_irqrestore(&h->lock, flags);
Webb Scales25163bd2015-04-23 09:32:00 -05008019 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8020 lockup_detected, h->heartbeat_sample_interval / HZ);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008021 pci_disable_device(h->pdev);
Don Bracef2405db2015-01-23 16:43:09 -06008022 fail_all_outstanding_cmds(h);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008023}
8024
Webb Scales25163bd2015-04-23 09:32:00 -05008025static int detect_controller_lockup(struct ctlr_info *h)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008026{
8027 u64 now;
8028 u32 heartbeat;
8029 unsigned long flags;
8030
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008031 now = get_jiffies_64();
8032 /* If we've received an interrupt recently, we're ok. */
8033 if (time_after64(h->last_intr_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05008034 (h->heartbeat_sample_interval), now))
Webb Scales25163bd2015-04-23 09:32:00 -05008035 return false;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008036
8037 /*
8038 * If we've already checked the heartbeat recently, we're ok.
8039 * This could happen if someone sends us a signal. We
8040 * otherwise don't care about signals in this thread.
8041 */
8042 if (time_after64(h->last_heartbeat_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05008043 (h->heartbeat_sample_interval), now))
Webb Scales25163bd2015-04-23 09:32:00 -05008044 return false;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008045
8046 /* If heartbeat has not changed since we last looked, we're not ok. */
8047 spin_lock_irqsave(&h->lock, flags);
8048 heartbeat = readl(&h->cfgtable->HeartBeat);
8049 spin_unlock_irqrestore(&h->lock, flags);
8050 if (h->last_heartbeat == heartbeat) {
8051 controller_lockup_detected(h);
Webb Scales25163bd2015-04-23 09:32:00 -05008052 return true;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008053 }
8054
8055 /* We're ok. */
8056 h->last_heartbeat = heartbeat;
8057 h->last_heartbeat_timestamp = now;
Webb Scales25163bd2015-04-23 09:32:00 -05008058 return false;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008059}
8060
Stephen M. Cameron98465902014-02-21 16:25:00 -06008061static void hpsa_ack_ctlr_events(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008062{
8063 int i;
8064 char *event_type;
8065
Stephen Camerone4aa3e62015-01-23 16:44:07 -06008066 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8067 return;
8068
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008069 /* Ask the controller to clear the events we're handling. */
Stephen M. Cameron1f7cee82014-02-18 13:56:09 -06008070 if ((h->transMethod & (CFGTBL_Trans_io_accel1
8071 | CFGTBL_Trans_io_accel2)) &&
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008072 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8073 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8074
8075 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8076 event_type = "state change";
8077 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8078 event_type = "configuration change";
8079 /* Stop sending new RAID offload reqs via the IO accelerator */
8080 scsi_block_requests(h->scsi_host);
Don Brace5323ed72016-04-27 17:13:59 -05008081 for (i = 0; i < h->ndevices; i++) {
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008082 h->dev[i]->offload_enabled = 0;
Don Brace5323ed72016-04-27 17:13:59 -05008083 h->dev[i]->offload_to_be_enabled = 0;
8084 }
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06008085 hpsa_drain_accel_commands(h);
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008086 /* Set 'accelerator path config change' bit */
8087 dev_warn(&h->pdev->dev,
8088 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8089 h->events, event_type);
8090 writel(h->events, &(h->cfgtable->clear_event_notify));
8091 /* Set the "clear event notify field update" bit 6 */
8092 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8093 /* Wait until ctlr clears 'clear event notify field', bit 6 */
8094 hpsa_wait_for_clear_event_notify_ack(h);
8095 scsi_unblock_requests(h->scsi_host);
8096 } else {
8097 /* Acknowledge controller notification events. */
8098 writel(h->events, &(h->cfgtable->clear_event_notify));
8099 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8100 hpsa_wait_for_clear_event_notify_ack(h);
8101#if 0
8102 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8103 hpsa_wait_for_mode_change_ack(h);
8104#endif
8105 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06008106 return;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008107}
8108
8109/* Check a register on the controller to see if there are configuration
8110 * changes (added/changed/removed logical drives, etc.) which mean that
Scott Teele863d682014-02-18 13:57:05 -06008111 * we should rescan the controller for devices.
8112 * Also check flag for driver-initiated rescan.
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008113 */
Stephen M. Cameron98465902014-02-21 16:25:00 -06008114static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008115{
Don Brace853633e2015-11-04 15:50:37 -06008116 if (h->drv_req_rescan) {
8117 h->drv_req_rescan = 0;
8118 return 1;
8119 }
8120
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008121 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
Stephen M. Cameron98465902014-02-21 16:25:00 -06008122 return 0;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008123
8124 h->events = readl(&(h->cfgtable->event_notify));
Stephen M. Cameron98465902014-02-21 16:25:00 -06008125 return h->events & RESCAN_REQUIRED_EVENT_BITS;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008126}
8127
Stephen M. Cameron98465902014-02-21 16:25:00 -06008128/*
8129 * Check if any of the offline devices have become ready
8130 */
8131static int hpsa_offline_devices_ready(struct ctlr_info *h)
8132{
8133 unsigned long flags;
8134 struct offline_device_entry *d;
8135 struct list_head *this, *tmp;
8136
8137 spin_lock_irqsave(&h->offline_device_lock, flags);
8138 list_for_each_safe(this, tmp, &h->offline_device_list) {
8139 d = list_entry(this, struct offline_device_entry,
8140 offline_list);
8141 spin_unlock_irqrestore(&h->offline_device_lock, flags);
Stephen M. Camerond1fea472014-07-03 10:17:58 -05008142 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8143 spin_lock_irqsave(&h->offline_device_lock, flags);
8144 list_del(&d->offline_list);
8145 spin_unlock_irqrestore(&h->offline_device_lock, flags);
Stephen M. Cameron98465902014-02-21 16:25:00 -06008146 return 1;
Stephen M. Camerond1fea472014-07-03 10:17:58 -05008147 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06008148 spin_lock_irqsave(&h->offline_device_lock, flags);
8149 }
8150 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8151 return 0;
8152}
8153
Scott Teel34592252015-11-04 15:52:09 -06008154static int hpsa_luns_changed(struct ctlr_info *h)
8155{
8156 int rc = 1; /* assume there are changes */
8157 struct ReportLUNdata *logdev = NULL;
8158
8159 /* if we can't find out if lun data has changed,
8160 * assume that it has.
8161 */
8162
8163 if (!h->lastlogicals)
Amit Kushwaha7e8a9482016-12-12 16:34:21 +05308164 return rc;
Scott Teel34592252015-11-04 15:52:09 -06008165
8166 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
Amit Kushwaha7e8a9482016-12-12 16:34:21 +05308167 if (!logdev)
8168 return rc;
8169
Scott Teel34592252015-11-04 15:52:09 -06008170 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8171 dev_warn(&h->pdev->dev,
8172 "report luns failed, can't track lun changes.\n");
8173 goto out;
8174 }
8175 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8176 dev_info(&h->pdev->dev,
8177 "Lun changes detected.\n");
8178 memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8179 goto out;
8180 } else
8181 rc = 0; /* no changes detected. */
8182out:
8183 kfree(logdev);
8184 return rc;
8185}
8186
Scott Teel3d38f002017-05-04 17:51:36 -05008187static void hpsa_perform_rescan(struct ctlr_info *h)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008188{
Scott Teel3d38f002017-05-04 17:51:36 -05008189 struct Scsi_Host *sh = NULL;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008190 unsigned long flags;
Stephen M. Cameron98465902014-02-21 16:25:00 -06008191
Don Bracebfd75462016-11-15 14:45:32 -06008192 /*
8193 * Do the scan after the reset
8194 */
Don Bracec59d04f2017-05-04 17:51:22 -05008195 spin_lock_irqsave(&h->reset_lock, flags);
Don Bracebfd75462016-11-15 14:45:32 -06008196 if (h->reset_in_progress) {
8197 h->drv_req_rescan = 1;
Don Bracec59d04f2017-05-04 17:51:22 -05008198 spin_unlock_irqrestore(&h->reset_lock, flags);
Don Bracebfd75462016-11-15 14:45:32 -06008199 return;
8200 }
Don Bracec59d04f2017-05-04 17:51:22 -05008201 spin_unlock_irqrestore(&h->reset_lock, flags);
Don Bracebfd75462016-11-15 14:45:32 -06008202
Scott Teel3d38f002017-05-04 17:51:36 -05008203 sh = scsi_host_get(h->scsi_host);
8204 if (sh != NULL) {
8205 hpsa_scan_start(sh);
8206 scsi_host_put(sh);
8207 h->drv_req_rescan = 0;
8208 }
8209}
8210
8211/*
8212 * watch for controller events
8213 */
8214static void hpsa_event_monitor_worker(struct work_struct *work)
8215{
8216 struct ctlr_info *h = container_of(to_delayed_work(work),
8217 struct ctlr_info, event_monitor_work);
8218 unsigned long flags;
8219
8220 spin_lock_irqsave(&h->lock, flags);
8221 if (h->remove_in_progress) {
8222 spin_unlock_irqrestore(&h->lock, flags);
8223 return;
8224 }
8225 spin_unlock_irqrestore(&h->lock, flags);
8226
8227 if (hpsa_ctlr_needs_rescan(h)) {
Stephen M. Cameron98465902014-02-21 16:25:00 -06008228 hpsa_ack_ctlr_events(h);
Scott Teel3d38f002017-05-04 17:51:36 -05008229 hpsa_perform_rescan(h);
8230 }
8231
8232 spin_lock_irqsave(&h->lock, flags);
8233 if (!h->remove_in_progress)
8234 schedule_delayed_work(&h->event_monitor_work,
8235 HPSA_EVENT_MONITOR_INTERVAL);
8236 spin_unlock_irqrestore(&h->lock, flags);
8237}
8238
8239static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8240{
8241 unsigned long flags;
8242 struct ctlr_info *h = container_of(to_delayed_work(work),
8243 struct ctlr_info, rescan_ctlr_work);
8244
8245 spin_lock_irqsave(&h->lock, flags);
8246 if (h->remove_in_progress) {
8247 spin_unlock_irqrestore(&h->lock, flags);
8248 return;
8249 }
8250 spin_unlock_irqrestore(&h->lock, flags);
8251
8252 if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
8253 hpsa_perform_rescan(h);
Scott Teel34592252015-11-04 15:52:09 -06008254 } else if (h->discovery_polling) {
Scott Teelc2adae42015-11-04 15:52:16 -06008255 hpsa_disable_rld_caching(h);
Scott Teel34592252015-11-04 15:52:09 -06008256 if (hpsa_luns_changed(h)) {
Scott Teel34592252015-11-04 15:52:09 -06008257 dev_info(&h->pdev->dev,
8258 "driver discovery polling rescan.\n");
Scott Teel3d38f002017-05-04 17:51:36 -05008259 hpsa_perform_rescan(h);
Scott Teel34592252015-11-04 15:52:09 -06008260 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06008261 }
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008262 spin_lock_irqsave(&h->lock, flags);
Don Brace6636e7f2015-01-23 16:45:17 -06008263 if (!h->remove_in_progress)
8264 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008265 h->heartbeat_sample_interval);
8266 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008267}
8268
Don Brace6636e7f2015-01-23 16:45:17 -06008269static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8270{
8271 unsigned long flags;
8272 struct ctlr_info *h = container_of(to_delayed_work(work),
8273 struct ctlr_info, monitor_ctlr_work);
8274
8275 detect_controller_lockup(h);
8276 if (lockup_detected(h))
8277 return;
8278
8279 spin_lock_irqsave(&h->lock, flags);
8280 if (!h->remove_in_progress)
8281 schedule_delayed_work(&h->monitor_ctlr_work,
8282 h->heartbeat_sample_interval);
8283 spin_unlock_irqrestore(&h->lock, flags);
8284}
8285
8286static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8287 char *name)
8288{
8289 struct workqueue_struct *wq = NULL;
Don Brace6636e7f2015-01-23 16:45:17 -06008290
Don Brace397ea9c2015-02-06 17:44:15 -06008291 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
Don Brace6636e7f2015-01-23 16:45:17 -06008292 if (!wq)
8293 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8294
8295 return wq;
8296}
8297
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008298static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008299{
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05008300 int dac, rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008301 struct ctlr_info *h;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008302 int try_soft_reset = 0;
8303 unsigned long flags;
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02008304 u32 board_id;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008305
8306 if (number_of_controllers == 0)
8307 printk(KERN_INFO DRIVER_NAME "\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008308
Hannes Reinecke135ae6e2017-08-15 08:58:04 +02008309 rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02008310 if (rc < 0) {
8311 dev_warn(&pdev->dev, "Board ID not found\n");
8312 return rc;
8313 }
8314
8315 rc = hpsa_init_reset_devices(pdev, board_id);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008316 if (rc) {
8317 if (rc != -ENOTSUPP)
8318 return rc;
8319 /* If the reset fails in a particular way (it has no way to do
8320 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8321 * a soft reset once we get the controller configured up to the
8322 * point that it can accept a command.
8323 */
8324 try_soft_reset = 1;
8325 rc = 0;
8326 }
8327
8328reinit_after_soft_reset:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008329
Don Brace303932f2010-02-04 08:42:40 -06008330 /* Command structures must be aligned on a 32-byte boundary because
8331 * the 5 lower bits of the address are used by the hardware. and by
8332 * the driver. See comments in hpsa.h for more info.
8333 */
Don Brace303932f2010-02-04 08:42:40 -06008334 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008335 h = kzalloc(sizeof(*h), GFP_KERNEL);
Robert Elliott105a3db2015-04-23 09:33:48 -05008336 if (!h) {
8337 dev_err(&pdev->dev, "Failed to allocate controller head\n");
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06008338 return -ENOMEM;
Robert Elliott105a3db2015-04-23 09:33:48 -05008339 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008340
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05008341 h->pdev = pdev;
Robert Elliott105a3db2015-04-23 09:33:48 -05008342
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06008343 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
Stephen M. Cameron98465902014-02-21 16:25:00 -06008344 INIT_LIST_HEAD(&h->offline_device_list);
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06008345 spin_lock_init(&h->lock);
Stephen M. Cameron98465902014-02-21 16:25:00 -06008346 spin_lock_init(&h->offline_device_lock);
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06008347 spin_lock_init(&h->scan_lock);
Don Bracec59d04f2017-05-04 17:51:22 -05008348 spin_lock_init(&h->reset_lock);
Don Brace34f0c622015-01-23 16:43:46 -06008349 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05008350
8351 /* Allocate and clear per-cpu variable lockup_detected */
8352 h->lockup_detected = alloc_percpu(u32);
Stephen M. Cameron2a5ac322014-07-03 10:18:08 -05008353 if (!h->lockup_detected) {
Robert Elliott105a3db2015-04-23 09:33:48 -05008354 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
Stephen M. Cameron2a5ac322014-07-03 10:18:08 -05008355 rc = -ENOMEM;
Robert Elliott2efa5922015-04-23 09:34:53 -05008356 goto clean1; /* aer/h */
Stephen M. Cameron2a5ac322014-07-03 10:18:08 -05008357 }
Stephen M. Cameron094963d2014-05-29 10:53:18 -05008358 set_lockup_detected_for_all_cpus(h, 0);
8359
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05008360 rc = hpsa_pci_init(h);
Robert Elliott105a3db2015-04-23 09:33:48 -05008361 if (rc)
Robert Elliott2946e822015-04-23 09:35:09 -05008362 goto clean2; /* lu, aer/h */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008363
Robert Elliott2946e822015-04-23 09:35:09 -05008364 /* relies on h-> settings made by hpsa_pci_init, including
8365 * interrupt_mode h->intr */
8366 rc = hpsa_scsi_host_alloc(h);
8367 if (rc)
8368 goto clean2_5; /* pci, lu, aer/h */
8369
8370 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008371 h->ctlr = number_of_controllers;
8372 number_of_controllers++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008373
8374 /* configure PCI DMA stuff */
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06008375 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8376 if (rc == 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008377 dac = 1;
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06008378 } else {
8379 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8380 if (rc == 0) {
8381 dac = 0;
8382 } else {
8383 dev_err(&pdev->dev, "no suitable DMA available\n");
Robert Elliott2946e822015-04-23 09:35:09 -05008384 goto clean3; /* shost, pci, lu, aer/h */
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06008385 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008386 }
8387
8388 /* make sure the board interrupts are off */
8389 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05008390
Robert Elliott105a3db2015-04-23 09:33:48 -05008391 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8392 if (rc)
Robert Elliott2946e822015-04-23 09:35:09 -05008393 goto clean3; /* shost, pci, lu, aer/h */
Robert Elliottd37ffbe2015-04-23 09:32:27 -05008394 rc = hpsa_alloc_cmd_pool(h);
Robert Elliott8947fd12015-01-23 16:42:54 -06008395 if (rc)
Robert Elliott2946e822015-04-23 09:35:09 -05008396 goto clean4; /* irq, shost, pci, lu, aer/h */
Robert Elliott105a3db2015-04-23 09:33:48 -05008397 rc = hpsa_alloc_sg_chain_blocks(h);
8398 if (rc)
Robert Elliott2946e822015-04-23 09:35:09 -05008399 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
Stephen M. Camerona08a8472010-02-04 08:43:16 -06008400 init_waitqueue_head(&h->scan_wait_queue);
Webb Scalesd604f532015-04-23 09:35:22 -05008401 init_waitqueue_head(&h->event_sync_wait_queue);
8402 mutex_init(&h->reset_mutex);
Stephen M. Camerona08a8472010-02-04 08:43:16 -06008403 h->scan_finished = 1; /* no scan currently in progress */
Don Brace87b9e6a2017-03-10 14:35:17 -06008404 h->scan_waiting = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008405
8406 pci_set_drvdata(pdev, h);
Stephen M. Cameron9a413382011-05-03 14:59:41 -05008407 h->ndevices = 0;
Robert Elliott2946e822015-04-23 09:35:09 -05008408
Stephen M. Cameron9a413382011-05-03 14:59:41 -05008409 spin_lock_init(&h->devlock);
Robert Elliott105a3db2015-04-23 09:33:48 -05008410 rc = hpsa_put_ctlr_into_performant_mode(h);
8411 if (rc)
Robert Elliott2946e822015-04-23 09:35:09 -05008412 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8413
Robert Elliott2efa5922015-04-23 09:34:53 -05008414 /* create the resubmit workqueue */
8415 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8416 if (!h->rescan_ctlr_wq) {
8417 rc = -ENOMEM;
8418 goto clean7;
8419 }
8420
8421 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8422 if (!h->resubmit_wq) {
8423 rc = -ENOMEM;
8424 goto clean7; /* aer/h */
8425 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008426
Robert Elliott105a3db2015-04-23 09:33:48 -05008427 /*
8428 * At this point, the controller is ready to take commands.
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008429 * Now, if reset_devices and the hard reset didn't work, try
8430 * the soft reset and see if that works.
8431 */
8432 if (try_soft_reset) {
8433
8434 /* This is kind of gross. We may or may not get a completion
8435 * from the soft reset command, and if we do, then the value
8436 * from the fifo may or may not be valid. So, we wait 10 secs
8437 * after the reset throwing away any completions we get during
8438 * that time. Unregister the interrupt handler and register
8439 * fake ones to scoop up any residual completions.
8440 */
8441 spin_lock_irqsave(&h->lock, flags);
8442 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8443 spin_unlock_irqrestore(&h->lock, flags);
Robert Elliottec501a12015-01-23 16:41:40 -06008444 hpsa_free_irqs(h);
Robert Elliott9ee61792015-01-23 16:42:32 -06008445 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008446 hpsa_intx_discard_completions);
8447 if (rc) {
Robert Elliott9ee61792015-01-23 16:42:32 -06008448 dev_warn(&h->pdev->dev,
8449 "Failed to request_irq after soft reset.\n");
Robert Elliottd4987572015-04-23 09:34:37 -05008450 /*
Robert Elliottb2ef4802015-04-23 09:34:48 -05008451 * cannot goto clean7 or free_irqs will be called
8452 * again. Instead, do its work
8453 */
8454 hpsa_free_performant_mode(h); /* clean7 */
8455 hpsa_free_sg_chain_blocks(h); /* clean6 */
8456 hpsa_free_cmd_pool(h); /* clean5 */
8457 /*
8458 * skip hpsa_free_irqs(h) clean4 since that
8459 * was just called before request_irqs failed
Robert Elliottd4987572015-04-23 09:34:37 -05008460 */
8461 goto clean3;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008462 }
8463
8464 rc = hpsa_kdump_soft_reset(h);
8465 if (rc)
8466 /* Neither hard nor soft reset worked, we're hosed. */
Don Brace7ef73232015-07-18 11:12:33 -05008467 goto clean7;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008468
8469 dev_info(&h->pdev->dev, "Board READY.\n");
8470 dev_info(&h->pdev->dev,
8471 "Waiting for stale completions to drain.\n");
8472 h->access.set_intr_mask(h, HPSA_INTR_ON);
8473 msleep(10000);
8474 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8475
8476 rc = controller_reset_failed(h->cfgtable);
8477 if (rc)
8478 dev_info(&h->pdev->dev,
8479 "Soft reset appears to have failed.\n");
8480
8481 /* since the controller's reset, we have to go back and re-init
8482 * everything. Easiest to just forget what we've done and do it
8483 * all over again.
8484 */
8485 hpsa_undo_allocations_after_kdump_soft_reset(h);
8486 try_soft_reset = 0;
8487 if (rc)
Robert Elliottb2ef4802015-04-23 09:34:48 -05008488 /* don't goto clean, we already unallocated */
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008489 return -ENODEV;
8490
8491 goto reinit_after_soft_reset;
8492 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008493
Robert Elliott105a3db2015-04-23 09:33:48 -05008494 /* Enable Accelerated IO path at driver layer */
8495 h->acciopath_status = 1;
Scott Teel34592252015-11-04 15:52:09 -06008496 /* Disable discovery polling.*/
8497 h->discovery_polling = 0;
Scott Teelda0697b2014-02-18 13:57:00 -06008498
Scott Teele863d682014-02-18 13:57:05 -06008499
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008500 /* Turn the interrupts on so we can service requests */
8501 h->access.set_intr_mask(h, HPSA_INTR_ON);
8502
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06008503 hpsa_hba_inquiry(h);
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008504
Scott Teel34592252015-11-04 15:52:09 -06008505 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8506 if (!h->lastlogicals)
8507 dev_info(&h->pdev->dev,
8508 "Can't track change to report lun data\n");
8509
Don Bracecf477232016-04-27 17:13:26 -05008510 /* hook into SCSI subsystem */
8511 rc = hpsa_scsi_add_host(h);
8512 if (rc)
8513 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8514
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008515 /* Monitor the controller for firmware lockups */
8516 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8517 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8518 schedule_delayed_work(&h->monitor_ctlr_work,
8519 h->heartbeat_sample_interval);
Don Brace6636e7f2015-01-23 16:45:17 -06008520 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8521 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8522 h->heartbeat_sample_interval);
Scott Teel3d38f002017-05-04 17:51:36 -05008523 INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
8524 schedule_delayed_work(&h->event_monitor_work,
8525 HPSA_EVENT_MONITOR_INTERVAL);
Stephen M. Cameron88bf6d62013-11-01 11:02:25 -05008526 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008527
Robert Elliott2946e822015-04-23 09:35:09 -05008528clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
Robert Elliott105a3db2015-04-23 09:33:48 -05008529 hpsa_free_performant_mode(h);
8530 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8531clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06008532 hpsa_free_sg_chain_blocks(h);
Robert Elliott2946e822015-04-23 09:35:09 -05008533clean5: /* cmd, irq, shost, pci, lu, aer/h */
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05008534 hpsa_free_cmd_pool(h);
Robert Elliott2946e822015-04-23 09:35:09 -05008535clean4: /* irq, shost, pci, lu, aer/h */
Robert Elliottec501a12015-01-23 16:41:40 -06008536 hpsa_free_irqs(h);
Robert Elliott2946e822015-04-23 09:35:09 -05008537clean3: /* shost, pci, lu, aer/h */
8538 scsi_host_put(h->scsi_host);
8539 h->scsi_host = NULL;
8540clean2_5: /* pci, lu, aer/h */
Robert Elliott195f2c62015-04-23 09:33:17 -05008541 hpsa_free_pci_init(h);
Robert Elliott2946e822015-04-23 09:35:09 -05008542clean2: /* lu, aer/h */
Robert Elliott105a3db2015-04-23 09:33:48 -05008543 if (h->lockup_detected) {
Stephen M. Cameron094963d2014-05-29 10:53:18 -05008544 free_percpu(h->lockup_detected);
Robert Elliott105a3db2015-04-23 09:33:48 -05008545 h->lockup_detected = NULL;
8546 }
8547clean1: /* wq/aer/h */
8548 if (h->resubmit_wq) {
8549 destroy_workqueue(h->resubmit_wq);
8550 h->resubmit_wq = NULL;
8551 }
8552 if (h->rescan_ctlr_wq) {
8553 destroy_workqueue(h->rescan_ctlr_wq);
8554 h->rescan_ctlr_wq = NULL;
8555 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008556 kfree(h);
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06008557 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008558}
8559
8560static void hpsa_flush_cache(struct ctlr_info *h)
8561{
8562 char *flush_buf;
8563 struct CommandList *c;
Webb Scales25163bd2015-04-23 09:32:00 -05008564 int rc;
Stephen M. Cameron702890e2013-09-23 13:33:30 -05008565
Stephen M. Cameron094963d2014-05-29 10:53:18 -05008566 if (unlikely(lockup_detected(h)))
Stephen M. Cameron702890e2013-09-23 13:33:30 -05008567 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008568 flush_buf = kzalloc(4, GFP_KERNEL);
8569 if (!flush_buf)
8570 return;
8571
Stephen Cameron45fcb862015-01-23 16:43:04 -06008572 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05008573
Stephen M. Camerona2dac132013-02-20 11:24:41 -06008574 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8575 RAID_CTLR_LUNID, TYPE_CMD)) {
8576 goto out;
8577 }
Webb Scales25163bd2015-04-23 09:32:00 -05008578 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
Don Bracec448ecf2016-04-27 17:13:51 -05008579 PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
Webb Scales25163bd2015-04-23 09:32:00 -05008580 if (rc)
8581 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008582 if (c->err_info->CommandStatus != 0)
Stephen M. Camerona2dac132013-02-20 11:24:41 -06008583out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008584 dev_warn(&h->pdev->dev,
8585 "error flushing cache on controller\n");
Stephen Cameron45fcb862015-01-23 16:43:04 -06008586 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008587 kfree(flush_buf);
8588}
8589
Scott Teelc2adae42015-11-04 15:52:16 -06008590/* Make controller gather fresh report lun data each time we
8591 * send down a report luns request
8592 */
8593static void hpsa_disable_rld_caching(struct ctlr_info *h)
8594{
8595 u32 *options;
8596 struct CommandList *c;
8597 int rc;
8598
8599 /* Don't bother trying to set diag options if locked up */
8600 if (unlikely(h->lockup_detected))
8601 return;
8602
8603 options = kzalloc(sizeof(*options), GFP_KERNEL);
Amit Kushwaha7e8a9482016-12-12 16:34:21 +05308604 if (!options)
Scott Teelc2adae42015-11-04 15:52:16 -06008605 return;
Scott Teelc2adae42015-11-04 15:52:16 -06008606
8607 c = cmd_alloc(h);
8608
8609 /* first, get the current diag options settings */
8610 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8611 RAID_CTLR_LUNID, TYPE_CMD))
8612 goto errout;
8613
8614 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
Don Bracec448ecf2016-04-27 17:13:51 -05008615 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
Scott Teelc2adae42015-11-04 15:52:16 -06008616 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8617 goto errout;
8618
8619 /* Now, set the bit for disabling the RLD caching */
8620 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
8621
8622 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
8623 RAID_CTLR_LUNID, TYPE_CMD))
8624 goto errout;
8625
8626 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
Don Bracec448ecf2016-04-27 17:13:51 -05008627 PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
Scott Teelc2adae42015-11-04 15:52:16 -06008628 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8629 goto errout;
8630
8631 /* Now verify that it got set: */
8632 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8633 RAID_CTLR_LUNID, TYPE_CMD))
8634 goto errout;
8635
8636 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
Don Bracec448ecf2016-04-27 17:13:51 -05008637 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
Scott Teelc2adae42015-11-04 15:52:16 -06008638 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8639 goto errout;
8640
Dan Carpenterd8a080c2015-11-12 12:43:38 +03008641 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
Scott Teelc2adae42015-11-04 15:52:16 -06008642 goto out;
8643
8644errout:
8645 dev_err(&h->pdev->dev,
8646 "Error: failed to disable report lun data caching.\n");
8647out:
8648 cmd_free(h, c);
8649 kfree(options);
8650}
8651
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008652static void hpsa_shutdown(struct pci_dev *pdev)
8653{
8654 struct ctlr_info *h;
8655
8656 h = pci_get_drvdata(pdev);
8657 /* Turn board interrupts off and send the flush cache command
8658 * sendcmd will turn off interrupt, and send the flush...
8659 * To write all data in the battery backed cache to disks
8660 */
8661 hpsa_flush_cache(h);
8662 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Robert Elliott105a3db2015-04-23 09:33:48 -05008663 hpsa_free_irqs(h); /* init_one 4 */
Robert Elliottcc64c812015-04-23 09:33:12 -05008664 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008665}
8666
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008667static void hpsa_free_device_info(struct ctlr_info *h)
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06008668{
8669 int i;
8670
Robert Elliott105a3db2015-04-23 09:33:48 -05008671 for (i = 0; i < h->ndevices; i++) {
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06008672 kfree(h->dev[i]);
Robert Elliott105a3db2015-04-23 09:33:48 -05008673 h->dev[i] = NULL;
8674 }
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06008675}
8676
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008677static void hpsa_remove_one(struct pci_dev *pdev)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008678{
8679 struct ctlr_info *h;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008680 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008681
8682 if (pci_get_drvdata(pdev) == NULL) {
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008683 dev_err(&pdev->dev, "unable to remove device\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008684 return;
8685 }
8686 h = pci_get_drvdata(pdev);
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008687
8688 /* Get rid of any controller monitoring work items */
8689 spin_lock_irqsave(&h->lock, flags);
8690 h->remove_in_progress = 1;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008691 spin_unlock_irqrestore(&h->lock, flags);
Don Brace6636e7f2015-01-23 16:45:17 -06008692 cancel_delayed_work_sync(&h->monitor_ctlr_work);
8693 cancel_delayed_work_sync(&h->rescan_ctlr_work);
Scott Teel3d38f002017-05-04 17:51:36 -05008694 cancel_delayed_work_sync(&h->event_monitor_work);
Don Brace6636e7f2015-01-23 16:45:17 -06008695 destroy_workqueue(h->rescan_ctlr_wq);
8696 destroy_workqueue(h->resubmit_wq);
Robert Elliottcc64c812015-04-23 09:33:12 -05008697
Don Brace2d041302015-07-18 11:13:15 -05008698 /*
8699 * Call before disabling interrupts.
8700 * scsi_remove_host can trigger I/O operations especially
8701 * when multipath is enabled. There can be SYNCHRONIZE CACHE
8702 * operations which cannot complete and will hang the system.
8703 */
8704 if (h->scsi_host)
8705 scsi_remove_host(h->scsi_host); /* init_one 8 */
Robert Elliott105a3db2015-04-23 09:33:48 -05008706 /* includes hpsa_free_irqs - init_one 4 */
Robert Elliott195f2c62015-04-23 09:33:17 -05008707 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008708 hpsa_shutdown(pdev);
Robert Elliottcc64c812015-04-23 09:33:12 -05008709
Robert Elliott105a3db2015-04-23 09:33:48 -05008710 hpsa_free_device_info(h); /* scan */
8711
Robert Elliott2946e822015-04-23 09:35:09 -05008712 kfree(h->hba_inquiry_data); /* init_one 10 */
8713 h->hba_inquiry_data = NULL; /* init_one 10 */
Robert Elliott2946e822015-04-23 09:35:09 -05008714 hpsa_free_ioaccel2_sg_chain_blocks(h);
Robert Elliott105a3db2015-04-23 09:33:48 -05008715 hpsa_free_performant_mode(h); /* init_one 7 */
8716 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8717 hpsa_free_cmd_pool(h); /* init_one 5 */
Scott Teel34592252015-11-04 15:52:09 -06008718 kfree(h->lastlogicals);
Robert Elliott105a3db2015-04-23 09:33:48 -05008719
8720 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
Robert Elliott195f2c62015-04-23 09:33:17 -05008721
Robert Elliott2946e822015-04-23 09:35:09 -05008722 scsi_host_put(h->scsi_host); /* init_one 3 */
8723 h->scsi_host = NULL; /* init_one 3 */
8724
Robert Elliott195f2c62015-04-23 09:33:17 -05008725 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
Robert Elliott2946e822015-04-23 09:35:09 -05008726 hpsa_free_pci_init(h); /* init_one 2.5 */
Robert Elliott195f2c62015-04-23 09:33:17 -05008727
Robert Elliott105a3db2015-04-23 09:33:48 -05008728 free_percpu(h->lockup_detected); /* init_one 2 */
8729 h->lockup_detected = NULL; /* init_one 2 */
8730 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
Kevin Barnettd04e62b2015-11-04 15:52:34 -06008731
8732 hpsa_delete_sas_host(h);
8733
Robert Elliott105a3db2015-04-23 09:33:48 -05008734 kfree(h); /* init_one 1 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008735}
8736
8737static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8738 __attribute__((unused)) pm_message_t state)
8739{
8740 return -ENOSYS;
8741}
8742
8743static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8744{
8745 return -ENOSYS;
8746}
8747
8748static struct pci_driver hpsa_pci_driver = {
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06008749 .name = HPSA,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008750 .probe = hpsa_init_one,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008751 .remove = hpsa_remove_one,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008752 .id_table = hpsa_pci_device_id, /* id_table */
8753 .shutdown = hpsa_shutdown,
8754 .suspend = hpsa_suspend,
8755 .resume = hpsa_resume,
8756};
8757
Don Brace303932f2010-02-04 08:42:40 -06008758/* Fill in bucket_map[], given nsgs (the max number of
8759 * scatter gather elements supported) and bucket[],
8760 * which is an array of 8 integers. The bucket[] array
8761 * contains 8 different DMA transfer sizes (in 16
8762 * byte increments) which the controller uses to fetch
8763 * commands. This function fills in bucket_map[], which
8764 * maps a given number of scatter gather elements to one of
8765 * the 8 DMA transfer sizes. The point of it is to allow the
8766 * controller to only do as much DMA as needed to fetch the
8767 * command, with the DMA transfer size encoded in the lower
8768 * bits of the command address.
8769 */
8770static void calc_bucket_map(int bucket[], int num_buckets,
Don Brace2b08b3e2015-01-23 16:41:09 -06008771 int nsgs, int min_blocks, u32 *bucket_map)
Don Brace303932f2010-02-04 08:42:40 -06008772{
8773 int i, j, b, size;
8774
Don Brace303932f2010-02-04 08:42:40 -06008775 /* Note, bucket_map must have nsgs+1 entries. */
8776 for (i = 0; i <= nsgs; i++) {
8777 /* Compute size of a command with i SG entries */
Matt Gatese1f7de02014-02-18 13:55:17 -06008778 size = i + min_blocks;
Don Brace303932f2010-02-04 08:42:40 -06008779 b = num_buckets; /* Assume the biggest bucket */
8780 /* Find the bucket that is just big enough */
Matt Gatese1f7de02014-02-18 13:55:17 -06008781 for (j = 0; j < num_buckets; j++) {
Don Brace303932f2010-02-04 08:42:40 -06008782 if (bucket[j] >= size) {
8783 b = j;
8784 break;
8785 }
8786 }
8787 /* for a command with i SG entries, use bucket b. */
8788 bucket_map[i] = b;
8789 }
8790}
8791
Robert Elliott105a3db2015-04-23 09:33:48 -05008792/*
8793 * return -ENODEV on err, 0 on success (or no action)
8794 * allocates numerous items that must be freed later
8795 */
Robert Elliottc706a792015-01-23 16:45:01 -06008796static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
Don Brace303932f2010-02-04 08:42:40 -06008797{
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008798 int i;
8799 unsigned long register_value;
Matt Gatese1f7de02014-02-18 13:55:17 -06008800 unsigned long transMethod = CFGTBL_Trans_Performant |
8801 (trans_support & CFGTBL_Trans_use_short_tags) |
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008802 CFGTBL_Trans_enable_directed_msix |
8803 (trans_support & (CFGTBL_Trans_io_accel1 |
8804 CFGTBL_Trans_io_accel2));
Matt Gatese1f7de02014-02-18 13:55:17 -06008805 struct access_method access = SA5_performant_access;
Stephen M. Camerondef342b2010-05-27 15:14:39 -05008806
8807 /* This is a bit complicated. There are 8 registers on
8808 * the controller which we write to to tell it 8 different
8809 * sizes of commands which there may be. It's a way of
8810 * reducing the DMA done to fetch each command. Encoded into
8811 * each command's tag are 3 bits which communicate to the controller
8812 * which of the eight sizes that command fits within. The size of
8813 * each command depends on how many scatter gather entries there are.
8814 * Each SG entry requires 16 bytes. The eight registers are programmed
8815 * with the number of 16-byte blocks a command of that size requires.
8816 * The smallest command possible requires 5 such 16 byte blocks.
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008817 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
Stephen M. Camerondef342b2010-05-27 15:14:39 -05008818 * blocks. Note, this only extends to the SG entries contained
8819 * within the command block, and does not extend to chained blocks
8820 * of SG elements. bft[] contains the eight values we write to
8821 * the registers. They are not evenly distributed, but have more
8822 * sizes for small commands, and fewer sizes for larger commands.
8823 */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008824 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008825#define MIN_IOACCEL2_BFT_ENTRY 5
8826#define HPSA_IOACCEL2_HEADER_SZ 4
8827 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
8828 13, 14, 15, 16, 17, 18, 19,
8829 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
8830 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
8831 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
8832 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
8833 16 * MIN_IOACCEL2_BFT_ENTRY);
8834 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008835 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
Don Brace303932f2010-02-04 08:42:40 -06008836 /* 5 = 1 s/g entry or 4k
8837 * 6 = 2 s/g entry or 8k
8838 * 8 = 4 s/g entry or 16k
8839 * 10 = 6 s/g entry or 24k
8840 */
Don Brace303932f2010-02-04 08:42:40 -06008841
Stephen M. Cameronb3a52e72014-05-29 10:53:23 -05008842 /* If the controller supports either ioaccel method then
8843 * we can also use the RAID stack submit path that does not
8844 * perform the superfluous readl() after each command submission.
8845 */
8846 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
8847 access = SA5_performant_access_no_read;
8848
Don Brace303932f2010-02-04 08:42:40 -06008849 /* Controller spec: zero out this buffer. */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05008850 for (i = 0; i < h->nreply_queues; i++)
8851 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
Don Brace303932f2010-02-04 08:42:40 -06008852
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008853 bft[7] = SG_ENTRIES_IN_CMD + 4;
8854 calc_bucket_map(bft, ARRAY_SIZE(bft),
Matt Gatese1f7de02014-02-18 13:55:17 -06008855 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
Don Brace303932f2010-02-04 08:42:40 -06008856 for (i = 0; i < 8; i++)
8857 writel(bft[i], &h->transtable->BlockFetch[i]);
8858
8859 /* size of controller ring buffer */
8860 writel(h->max_commands, &h->transtable->RepQSize);
Matt Gates254f7962012-05-01 11:43:06 -05008861 writel(h->nreply_queues, &h->transtable->RepQCount);
Don Brace303932f2010-02-04 08:42:40 -06008862 writel(0, &h->transtable->RepQCtrAddrLow32);
8863 writel(0, &h->transtable->RepQCtrAddrHigh32);
Matt Gates254f7962012-05-01 11:43:06 -05008864
8865 for (i = 0; i < h->nreply_queues; i++) {
8866 writel(0, &h->transtable->RepQAddr[i].upper);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05008867 writel(h->reply_queue[i].busaddr,
Matt Gates254f7962012-05-01 11:43:06 -05008868 &h->transtable->RepQAddr[i].lower);
8869 }
8870
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008871 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Matt Gatese1f7de02014-02-18 13:55:17 -06008872 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
8873 /*
8874 * enable outbound interrupt coalescing in accelerator mode;
8875 */
8876 if (trans_support & CFGTBL_Trans_io_accel1) {
8877 access = SA5_ioaccel_mode1_access;
8878 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8879 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
Don Brace96b6ce42017-01-30 16:05:17 -06008880 } else
8881 if (trans_support & CFGTBL_Trans_io_accel2)
Scott Teelc3497752014-02-18 13:56:34 -06008882 access = SA5_ioaccel_mode2_access;
Don Brace303932f2010-02-04 08:42:40 -06008883 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Robert Elliottc706a792015-01-23 16:45:01 -06008884 if (hpsa_wait_for_mode_change_ack(h)) {
8885 dev_err(&h->pdev->dev,
8886 "performant mode problem - doorbell timeout\n");
8887 return -ENODEV;
8888 }
Don Brace303932f2010-02-04 08:42:40 -06008889 register_value = readl(&(h->cfgtable->TransportActive));
8890 if (!(register_value & CFGTBL_Trans_Performant)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06008891 dev_err(&h->pdev->dev,
8892 "performant mode problem - transport not active\n");
Robert Elliottc706a792015-01-23 16:45:01 -06008893 return -ENODEV;
Don Brace303932f2010-02-04 08:42:40 -06008894 }
Stephen M. Cameron960a30e72011-02-15 15:33:03 -06008895 /* Change the access methods to the performant access methods */
Matt Gatese1f7de02014-02-18 13:55:17 -06008896 h->access = access;
8897 h->transMethod = transMethod;
8898
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008899 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
8900 (trans_support & CFGTBL_Trans_io_accel2)))
Robert Elliottc706a792015-01-23 16:45:01 -06008901 return 0;
Matt Gatese1f7de02014-02-18 13:55:17 -06008902
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008903 if (trans_support & CFGTBL_Trans_io_accel1) {
8904 /* Set up I/O accelerator mode */
8905 for (i = 0; i < h->nreply_queues; i++) {
8906 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
8907 h->reply_queue[i].current_entry =
8908 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
8909 }
8910 bft[7] = h->ioaccel_maxsg + 8;
8911 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
8912 h->ioaccel1_blockFetchTable);
8913
8914 /* initialize all reply queue entries to unused */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05008915 for (i = 0; i < h->nreply_queues; i++)
8916 memset(h->reply_queue[i].head,
8917 (u8) IOACCEL_MODE1_REPLY_UNUSED,
8918 h->reply_queue_size);
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008919
8920 /* set all the constant fields in the accelerator command
8921 * frames once at init time to save CPU cycles later.
8922 */
8923 for (i = 0; i < h->nr_cmds; i++) {
8924 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
8925
8926 cp->function = IOACCEL1_FUNCTION_SCSIIO;
8927 cp->err_info = (u32) (h->errinfo_pool_dhandle +
8928 (i * sizeof(struct ErrorInfo)));
8929 cp->err_info_len = sizeof(struct ErrorInfo);
8930 cp->sgl_offset = IOACCEL1_SGLOFFSET;
Don Brace2b08b3e2015-01-23 16:41:09 -06008931 cp->host_context_flags =
8932 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008933 cp->timeout_sec = 0;
8934 cp->ReplyQueue = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06008935 cp->tag =
Don Bracef2405db2015-01-23 16:43:09 -06008936 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06008937 cp->host_addr =
8938 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008939 (i * sizeof(struct io_accel1_cmd)));
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008940 }
8941 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8942 u64 cfg_offset, cfg_base_addr_index;
8943 u32 bft2_offset, cfg_base_addr;
8944 int rc;
8945
8946 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
8947 &cfg_base_addr_index, &cfg_offset);
8948 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
8949 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
8950 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
8951 4, h->ioaccel2_blockFetchTable);
8952 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
8953 BUILD_BUG_ON(offsetof(struct CfgTable,
8954 io_accel_request_size_offset) != 0xb8);
8955 h->ioaccel2_bft2_regs =
8956 remap_pci_mem(pci_resource_start(h->pdev,
8957 cfg_base_addr_index) +
8958 cfg_offset + bft2_offset,
8959 ARRAY_SIZE(bft2) *
8960 sizeof(*h->ioaccel2_bft2_regs));
8961 for (i = 0; i < ARRAY_SIZE(bft2); i++)
8962 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
Matt Gatese1f7de02014-02-18 13:55:17 -06008963 }
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008964 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Robert Elliottc706a792015-01-23 16:45:01 -06008965 if (hpsa_wait_for_mode_change_ack(h)) {
8966 dev_err(&h->pdev->dev,
8967 "performant mode problem - enabling ioaccel mode\n");
8968 return -ENODEV;
8969 }
8970 return 0;
Matt Gatese1f7de02014-02-18 13:55:17 -06008971}
8972
Robert Elliott1fb7c982015-04-23 09:33:22 -05008973/* Free ioaccel1 mode command blocks and block fetch table */
8974static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8975{
Robert Elliott105a3db2015-04-23 09:33:48 -05008976 if (h->ioaccel_cmd_pool) {
Robert Elliott1fb7c982015-04-23 09:33:22 -05008977 pci_free_consistent(h->pdev,
8978 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8979 h->ioaccel_cmd_pool,
8980 h->ioaccel_cmd_pool_dhandle);
Robert Elliott105a3db2015-04-23 09:33:48 -05008981 h->ioaccel_cmd_pool = NULL;
8982 h->ioaccel_cmd_pool_dhandle = 0;
8983 }
Robert Elliott1fb7c982015-04-23 09:33:22 -05008984 kfree(h->ioaccel1_blockFetchTable);
Robert Elliott105a3db2015-04-23 09:33:48 -05008985 h->ioaccel1_blockFetchTable = NULL;
Robert Elliott1fb7c982015-04-23 09:33:22 -05008986}
8987
Robert Elliottd37ffbe2015-04-23 09:32:27 -05008988/* Allocate ioaccel1 mode command blocks and block fetch table */
8989static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
Matt Gatese1f7de02014-02-18 13:55:17 -06008990{
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06008991 h->ioaccel_maxsg =
8992 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8993 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
8994 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
8995
Matt Gatese1f7de02014-02-18 13:55:17 -06008996 /* Command structures must be aligned on a 128-byte boundary
8997 * because the 7 lower bits of the address are used by the
8998 * hardware.
8999 */
Matt Gatese1f7de02014-02-18 13:55:17 -06009000 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9001 IOACCEL1_COMMANDLIST_ALIGNMENT);
9002 h->ioaccel_cmd_pool =
9003 pci_alloc_consistent(h->pdev,
9004 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9005 &(h->ioaccel_cmd_pool_dhandle));
9006
9007 h->ioaccel1_blockFetchTable =
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06009008 kmalloc(((h->ioaccel_maxsg + 1) *
Matt Gatese1f7de02014-02-18 13:55:17 -06009009 sizeof(u32)), GFP_KERNEL);
9010
9011 if ((h->ioaccel_cmd_pool == NULL) ||
9012 (h->ioaccel1_blockFetchTable == NULL))
9013 goto clean_up;
9014
9015 memset(h->ioaccel_cmd_pool, 0,
9016 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9017 return 0;
9018
9019clean_up:
Robert Elliott1fb7c982015-04-23 09:33:22 -05009020 hpsa_free_ioaccel1_cmd_and_bft(h);
Robert Elliott2dd02d72015-04-23 09:33:43 -05009021 return -ENOMEM;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05009022}
9023
Robert Elliott1fb7c982015-04-23 09:33:22 -05009024/* Free ioaccel2 mode command blocks and block fetch table */
9025static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9026{
Webb Scalesd9a729f2015-04-23 09:33:27 -05009027 hpsa_free_ioaccel2_sg_chain_blocks(h);
9028
Robert Elliott105a3db2015-04-23 09:33:48 -05009029 if (h->ioaccel2_cmd_pool) {
Robert Elliott1fb7c982015-04-23 09:33:22 -05009030 pci_free_consistent(h->pdev,
9031 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9032 h->ioaccel2_cmd_pool,
9033 h->ioaccel2_cmd_pool_dhandle);
Robert Elliott105a3db2015-04-23 09:33:48 -05009034 h->ioaccel2_cmd_pool = NULL;
9035 h->ioaccel2_cmd_pool_dhandle = 0;
9036 }
Robert Elliott1fb7c982015-04-23 09:33:22 -05009037 kfree(h->ioaccel2_blockFetchTable);
Robert Elliott105a3db2015-04-23 09:33:48 -05009038 h->ioaccel2_blockFetchTable = NULL;
Robert Elliott1fb7c982015-04-23 09:33:22 -05009039}
9040
Robert Elliottd37ffbe2015-04-23 09:32:27 -05009041/* Allocate ioaccel2 mode command blocks and block fetch table */
9042static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
Stephen M. Cameronaca90122014-02-18 13:56:14 -06009043{
Webb Scalesd9a729f2015-04-23 09:33:27 -05009044 int rc;
9045
Stephen M. Cameronaca90122014-02-18 13:56:14 -06009046 /* Allocate ioaccel2 mode command blocks and block fetch table */
9047
9048 h->ioaccel_maxsg =
9049 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9050 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9051 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9052
Stephen M. Cameronaca90122014-02-18 13:56:14 -06009053 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9054 IOACCEL2_COMMANDLIST_ALIGNMENT);
9055 h->ioaccel2_cmd_pool =
9056 pci_alloc_consistent(h->pdev,
9057 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9058 &(h->ioaccel2_cmd_pool_dhandle));
9059
9060 h->ioaccel2_blockFetchTable =
9061 kmalloc(((h->ioaccel_maxsg + 1) *
9062 sizeof(u32)), GFP_KERNEL);
9063
9064 if ((h->ioaccel2_cmd_pool == NULL) ||
Webb Scalesd9a729f2015-04-23 09:33:27 -05009065 (h->ioaccel2_blockFetchTable == NULL)) {
9066 rc = -ENOMEM;
9067 goto clean_up;
9068 }
9069
9070 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9071 if (rc)
Stephen M. Cameronaca90122014-02-18 13:56:14 -06009072 goto clean_up;
9073
9074 memset(h->ioaccel2_cmd_pool, 0,
9075 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9076 return 0;
9077
9078clean_up:
Robert Elliott1fb7c982015-04-23 09:33:22 -05009079 hpsa_free_ioaccel2_cmd_and_bft(h);
Webb Scalesd9a729f2015-04-23 09:33:27 -05009080 return rc;
Stephen M. Cameronaca90122014-02-18 13:56:14 -06009081}
9082
Robert Elliott105a3db2015-04-23 09:33:48 -05009083/* Free items allocated by hpsa_put_ctlr_into_performant_mode */
9084static void hpsa_free_performant_mode(struct ctlr_info *h)
9085{
9086 kfree(h->blockFetchTable);
9087 h->blockFetchTable = NULL;
9088 hpsa_free_reply_queues(h);
9089 hpsa_free_ioaccel1_cmd_and_bft(h);
9090 hpsa_free_ioaccel2_cmd_and_bft(h);
9091}
9092
9093/* return -ENODEV on error, 0 on success (or no action)
9094 * allocates numerous items that must be freed later
9095 */
9096static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05009097{
9098 u32 trans_support;
Matt Gatese1f7de02014-02-18 13:55:17 -06009099 unsigned long transMethod = CFGTBL_Trans_Performant |
9100 CFGTBL_Trans_use_short_tags;
Robert Elliott105a3db2015-04-23 09:33:48 -05009101 int i, rc;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05009102
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -06009103 if (hpsa_simple_mode)
Robert Elliott105a3db2015-04-23 09:33:48 -05009104 return 0;
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -06009105
scameron@beardog.cce.hp.com67c99a72014-04-14 14:01:09 -05009106 trans_support = readl(&(h->cfgtable->TransportSupport));
9107 if (!(trans_support & PERFORMANT_MODE))
Robert Elliott105a3db2015-04-23 09:33:48 -05009108 return 0;
scameron@beardog.cce.hp.com67c99a72014-04-14 14:01:09 -05009109
Matt Gatese1f7de02014-02-18 13:55:17 -06009110 /* Check for I/O accelerator mode support */
9111 if (trans_support & CFGTBL_Trans_io_accel1) {
9112 transMethod |= CFGTBL_Trans_io_accel1 |
9113 CFGTBL_Trans_enable_directed_msix;
Robert Elliott105a3db2015-04-23 09:33:48 -05009114 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9115 if (rc)
9116 return rc;
9117 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9118 transMethod |= CFGTBL_Trans_io_accel2 |
Stephen M. Cameronaca90122014-02-18 13:56:14 -06009119 CFGTBL_Trans_enable_directed_msix;
Robert Elliott105a3db2015-04-23 09:33:48 -05009120 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9121 if (rc)
9122 return rc;
Matt Gatese1f7de02014-02-18 13:55:17 -06009123 }
9124
Christoph Hellwigbc2bb152016-11-09 10:42:22 -08009125 h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05009126 hpsa_get_max_perf_mode_cmds(h);
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05009127 /* Performant mode ring buffer and supporting data structures */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05009128 h->reply_queue_size = h->max_commands * sizeof(u64);
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05009129
Matt Gates254f7962012-05-01 11:43:06 -05009130 for (i = 0; i < h->nreply_queues; i++) {
Stephen M. Cameron072b0512014-05-29 10:53:07 -05009131 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
9132 h->reply_queue_size,
9133 &(h->reply_queue[i].busaddr));
Robert Elliott105a3db2015-04-23 09:33:48 -05009134 if (!h->reply_queue[i].head) {
9135 rc = -ENOMEM;
9136 goto clean1; /* rq, ioaccel */
9137 }
Matt Gates254f7962012-05-01 11:43:06 -05009138 h->reply_queue[i].size = h->max_commands;
9139 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
9140 h->reply_queue[i].current_entry = 0;
9141 }
9142
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05009143 /* Need a block fetch table for performant mode */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06009144 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05009145 sizeof(u32)), GFP_KERNEL);
Robert Elliott105a3db2015-04-23 09:33:48 -05009146 if (!h->blockFetchTable) {
9147 rc = -ENOMEM;
9148 goto clean1; /* rq, ioaccel */
9149 }
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05009150
Robert Elliott105a3db2015-04-23 09:33:48 -05009151 rc = hpsa_enter_performant_mode(h, trans_support);
9152 if (rc)
9153 goto clean2; /* bft, rq, ioaccel */
9154 return 0;
Don Brace303932f2010-02-04 08:42:40 -06009155
Robert Elliott105a3db2015-04-23 09:33:48 -05009156clean2: /* bft, rq, ioaccel */
Don Brace303932f2010-02-04 08:42:40 -06009157 kfree(h->blockFetchTable);
Robert Elliott105a3db2015-04-23 09:33:48 -05009158 h->blockFetchTable = NULL;
9159clean1: /* rq, ioaccel */
9160 hpsa_free_reply_queues(h);
9161 hpsa_free_ioaccel1_cmd_and_bft(h);
9162 hpsa_free_ioaccel2_cmd_and_bft(h);
9163 return rc;
Don Brace303932f2010-02-04 08:42:40 -06009164}
9165
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06009166static int is_accelerated_cmd(struct CommandList *c)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06009167{
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06009168 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9169}
9170
9171static void hpsa_drain_accel_commands(struct ctlr_info *h)
9172{
9173 struct CommandList *c = NULL;
Don Bracef2405db2015-01-23 16:43:09 -06009174 int i, accel_cmds_out;
Webb Scales281a7fd2015-01-23 16:43:35 -06009175 int refcount;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06009176
Don Bracef2405db2015-01-23 16:43:09 -06009177 do { /* wait for all outstanding ioaccel commands to drain out */
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06009178 accel_cmds_out = 0;
Don Bracef2405db2015-01-23 16:43:09 -06009179 for (i = 0; i < h->nr_cmds; i++) {
Don Bracef2405db2015-01-23 16:43:09 -06009180 c = h->cmd_pool + i;
Webb Scales281a7fd2015-01-23 16:43:35 -06009181 refcount = atomic_inc_return(&c->refcount);
9182 if (refcount > 1) /* Command is allocated */
9183 accel_cmds_out += is_accelerated_cmd(c);
9184 cmd_free(h, c);
Don Bracef2405db2015-01-23 16:43:09 -06009185 }
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06009186 if (accel_cmds_out <= 0)
Webb Scales281a7fd2015-01-23 16:43:35 -06009187 break;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06009188 msleep(100);
9189 } while (1);
9190}
9191
Kevin Barnettd04e62b2015-11-04 15:52:34 -06009192static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9193 struct hpsa_sas_port *hpsa_sas_port)
9194{
9195 struct hpsa_sas_phy *hpsa_sas_phy;
9196 struct sas_phy *phy;
9197
9198 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9199 if (!hpsa_sas_phy)
9200 return NULL;
9201
9202 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9203 hpsa_sas_port->next_phy_index);
9204 if (!phy) {
9205 kfree(hpsa_sas_phy);
9206 return NULL;
9207 }
9208
9209 hpsa_sas_port->next_phy_index++;
9210 hpsa_sas_phy->phy = phy;
9211 hpsa_sas_phy->parent_port = hpsa_sas_port;
9212
9213 return hpsa_sas_phy;
9214}
9215
9216static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9217{
9218 struct sas_phy *phy = hpsa_sas_phy->phy;
9219
9220 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9221 sas_phy_free(phy);
9222 if (hpsa_sas_phy->added_to_port)
9223 list_del(&hpsa_sas_phy->phy_list_entry);
9224 kfree(hpsa_sas_phy);
9225}
9226
9227static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9228{
9229 int rc;
9230 struct hpsa_sas_port *hpsa_sas_port;
9231 struct sas_phy *phy;
9232 struct sas_identify *identify;
9233
9234 hpsa_sas_port = hpsa_sas_phy->parent_port;
9235 phy = hpsa_sas_phy->phy;
9236
9237 identify = &phy->identify;
9238 memset(identify, 0, sizeof(*identify));
9239 identify->sas_address = hpsa_sas_port->sas_address;
9240 identify->device_type = SAS_END_DEVICE;
9241 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9242 identify->target_port_protocols = SAS_PROTOCOL_STP;
9243 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9244 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9245 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9246 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9247 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9248
9249 rc = sas_phy_add(hpsa_sas_phy->phy);
9250 if (rc)
9251 return rc;
9252
9253 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9254 list_add_tail(&hpsa_sas_phy->phy_list_entry,
9255 &hpsa_sas_port->phy_list_head);
9256 hpsa_sas_phy->added_to_port = true;
9257
9258 return 0;
9259}
9260
9261static int
9262 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9263 struct sas_rphy *rphy)
9264{
9265 struct sas_identify *identify;
9266
9267 identify = &rphy->identify;
9268 identify->sas_address = hpsa_sas_port->sas_address;
9269 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9270 identify->target_port_protocols = SAS_PROTOCOL_STP;
9271
9272 return sas_rphy_add(rphy);
9273}
9274
9275static struct hpsa_sas_port
9276 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9277 u64 sas_address)
9278{
9279 int rc;
9280 struct hpsa_sas_port *hpsa_sas_port;
9281 struct sas_port *port;
9282
9283 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9284 if (!hpsa_sas_port)
9285 return NULL;
9286
9287 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9288 hpsa_sas_port->parent_node = hpsa_sas_node;
9289
9290 port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9291 if (!port)
9292 goto free_hpsa_port;
9293
9294 rc = sas_port_add(port);
9295 if (rc)
9296 goto free_sas_port;
9297
9298 hpsa_sas_port->port = port;
9299 hpsa_sas_port->sas_address = sas_address;
9300 list_add_tail(&hpsa_sas_port->port_list_entry,
9301 &hpsa_sas_node->port_list_head);
9302
9303 return hpsa_sas_port;
9304
9305free_sas_port:
9306 sas_port_free(port);
9307free_hpsa_port:
9308 kfree(hpsa_sas_port);
9309
9310 return NULL;
9311}
9312
9313static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9314{
9315 struct hpsa_sas_phy *hpsa_sas_phy;
9316 struct hpsa_sas_phy *next;
9317
9318 list_for_each_entry_safe(hpsa_sas_phy, next,
9319 &hpsa_sas_port->phy_list_head, phy_list_entry)
9320 hpsa_free_sas_phy(hpsa_sas_phy);
9321
9322 sas_port_delete(hpsa_sas_port->port);
9323 list_del(&hpsa_sas_port->port_list_entry);
9324 kfree(hpsa_sas_port);
9325}
9326
9327static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9328{
9329 struct hpsa_sas_node *hpsa_sas_node;
9330
9331 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9332 if (hpsa_sas_node) {
9333 hpsa_sas_node->parent_dev = parent_dev;
9334 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9335 }
9336
9337 return hpsa_sas_node;
9338}
9339
9340static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9341{
9342 struct hpsa_sas_port *hpsa_sas_port;
9343 struct hpsa_sas_port *next;
9344
9345 if (!hpsa_sas_node)
9346 return;
9347
9348 list_for_each_entry_safe(hpsa_sas_port, next,
9349 &hpsa_sas_node->port_list_head, port_list_entry)
9350 hpsa_free_sas_port(hpsa_sas_port);
9351
9352 kfree(hpsa_sas_node);
9353}
9354
9355static struct hpsa_scsi_dev_t
9356 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9357 struct sas_rphy *rphy)
9358{
9359 int i;
9360 struct hpsa_scsi_dev_t *device;
9361
9362 for (i = 0; i < h->ndevices; i++) {
9363 device = h->dev[i];
9364 if (!device->sas_port)
9365 continue;
9366 if (device->sas_port->rphy == rphy)
9367 return device;
9368 }
9369
9370 return NULL;
9371}
9372
9373static int hpsa_add_sas_host(struct ctlr_info *h)
9374{
9375 int rc;
9376 struct device *parent_dev;
9377 struct hpsa_sas_node *hpsa_sas_node;
9378 struct hpsa_sas_port *hpsa_sas_port;
9379 struct hpsa_sas_phy *hpsa_sas_phy;
9380
9381 parent_dev = &h->scsi_host->shost_gendev;
9382
9383 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9384 if (!hpsa_sas_node)
9385 return -ENOMEM;
9386
9387 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9388 if (!hpsa_sas_port) {
9389 rc = -ENODEV;
9390 goto free_sas_node;
9391 }
9392
9393 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9394 if (!hpsa_sas_phy) {
9395 rc = -ENODEV;
9396 goto free_sas_port;
9397 }
9398
9399 rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9400 if (rc)
9401 goto free_sas_phy;
9402
9403 h->sas_host = hpsa_sas_node;
9404
9405 return 0;
9406
9407free_sas_phy:
9408 hpsa_free_sas_phy(hpsa_sas_phy);
9409free_sas_port:
9410 hpsa_free_sas_port(hpsa_sas_port);
9411free_sas_node:
9412 hpsa_free_sas_node(hpsa_sas_node);
9413
9414 return rc;
9415}
9416
9417static void hpsa_delete_sas_host(struct ctlr_info *h)
9418{
9419 hpsa_free_sas_node(h->sas_host);
9420}
9421
9422static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9423 struct hpsa_scsi_dev_t *device)
9424{
9425 int rc;
9426 struct hpsa_sas_port *hpsa_sas_port;
9427 struct sas_rphy *rphy;
9428
9429 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9430 if (!hpsa_sas_port)
9431 return -ENOMEM;
9432
9433 rphy = sas_end_device_alloc(hpsa_sas_port->port);
9434 if (!rphy) {
9435 rc = -ENODEV;
9436 goto free_sas_port;
9437 }
9438
9439 hpsa_sas_port->rphy = rphy;
9440 device->sas_port = hpsa_sas_port;
9441
9442 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9443 if (rc)
9444 goto free_sas_port;
9445
9446 return 0;
9447
9448free_sas_port:
9449 hpsa_free_sas_port(hpsa_sas_port);
9450 device->sas_port = NULL;
9451
9452 return rc;
9453}
9454
9455static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9456{
9457 if (device->sas_port) {
9458 hpsa_free_sas_port(device->sas_port);
9459 device->sas_port = NULL;
9460 }
9461}
9462
9463static int
9464hpsa_sas_get_linkerrors(struct sas_phy *phy)
9465{
9466 return 0;
9467}
9468
9469static int
9470hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9471{
Dan Carpenteraa105692016-04-14 12:37:44 +03009472 *identifier = 0;
Kevin Barnettd04e62b2015-11-04 15:52:34 -06009473 return 0;
9474}
9475
9476static int
9477hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9478{
9479 return -ENXIO;
9480}
9481
9482static int
9483hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9484{
9485 return 0;
9486}
9487
9488static int
9489hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9490{
9491 return 0;
9492}
9493
9494static int
9495hpsa_sas_phy_setup(struct sas_phy *phy)
9496{
9497 return 0;
9498}
9499
9500static void
9501hpsa_sas_phy_release(struct sas_phy *phy)
9502{
9503}
9504
9505static int
9506hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9507{
9508 return -EINVAL;
9509}
9510
9511/* SMP = Serial Management Protocol */
9512static int
9513hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
9514struct request *req)
9515{
9516 return -EINVAL;
9517}
9518
9519static struct sas_function_template hpsa_sas_transport_functions = {
9520 .get_linkerrors = hpsa_sas_get_linkerrors,
9521 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9522 .get_bay_identifier = hpsa_sas_get_bay_identifier,
9523 .phy_reset = hpsa_sas_phy_reset,
9524 .phy_enable = hpsa_sas_phy_enable,
9525 .phy_setup = hpsa_sas_phy_setup,
9526 .phy_release = hpsa_sas_phy_release,
9527 .set_phy_speed = hpsa_sas_phy_speed,
9528 .smp_handler = hpsa_sas_smp_handler,
9529};
9530
Stephen M. Cameronedd16362009-12-08 14:09:11 -08009531/*
9532 * This is it. Register the PCI driver information for the cards we control
9533 * the OS will call our registered routines when it finds one of our cards.
9534 */
9535static int __init hpsa_init(void)
9536{
Kevin Barnettd04e62b2015-11-04 15:52:34 -06009537 int rc;
9538
9539 hpsa_sas_transport_template =
9540 sas_attach_transport(&hpsa_sas_transport_functions);
9541 if (!hpsa_sas_transport_template)
9542 return -ENODEV;
9543
9544 rc = pci_register_driver(&hpsa_pci_driver);
9545
9546 if (rc)
9547 sas_release_transport(hpsa_sas_transport_template);
9548
9549 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08009550}
9551
9552static void __exit hpsa_cleanup(void)
9553{
9554 pci_unregister_driver(&hpsa_pci_driver);
Kevin Barnettd04e62b2015-11-04 15:52:34 -06009555 sas_release_transport(hpsa_sas_transport_template);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08009556}
9557
Matt Gatese1f7de02014-02-18 13:55:17 -06009558static void __attribute__((unused)) verify_offsets(void)
9559{
9560#define VERIFY_OFFSET(member, offset) \
Scott Teeldd0e19f2014-02-18 13:57:31 -06009561 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9562
9563 VERIFY_OFFSET(structure_size, 0);
9564 VERIFY_OFFSET(volume_blk_size, 4);
9565 VERIFY_OFFSET(volume_blk_cnt, 8);
9566 VERIFY_OFFSET(phys_blk_shift, 16);
9567 VERIFY_OFFSET(parity_rotation_shift, 17);
9568 VERIFY_OFFSET(strip_size, 18);
9569 VERIFY_OFFSET(disk_starting_blk, 20);
9570 VERIFY_OFFSET(disk_blk_cnt, 28);
9571 VERIFY_OFFSET(data_disks_per_row, 36);
9572 VERIFY_OFFSET(metadata_disks_per_row, 38);
9573 VERIFY_OFFSET(row_cnt, 40);
9574 VERIFY_OFFSET(layout_map_count, 42);
9575 VERIFY_OFFSET(flags, 44);
9576 VERIFY_OFFSET(dekindex, 46);
9577 /* VERIFY_OFFSET(reserved, 48 */
9578 VERIFY_OFFSET(data, 64);
9579
9580#undef VERIFY_OFFSET
9581
9582#define VERIFY_OFFSET(member, offset) \
Mike Millerb66cc252014-02-18 13:56:04 -06009583 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9584
9585 VERIFY_OFFSET(IU_type, 0);
9586 VERIFY_OFFSET(direction, 1);
9587 VERIFY_OFFSET(reply_queue, 2);
9588 /* VERIFY_OFFSET(reserved1, 3); */
9589 VERIFY_OFFSET(scsi_nexus, 4);
9590 VERIFY_OFFSET(Tag, 8);
9591 VERIFY_OFFSET(cdb, 16);
9592 VERIFY_OFFSET(cciss_lun, 32);
9593 VERIFY_OFFSET(data_len, 40);
9594 VERIFY_OFFSET(cmd_priority_task_attr, 44);
9595 VERIFY_OFFSET(sg_count, 45);
9596 /* VERIFY_OFFSET(reserved3 */
9597 VERIFY_OFFSET(err_ptr, 48);
9598 VERIFY_OFFSET(err_len, 56);
9599 /* VERIFY_OFFSET(reserved4 */
9600 VERIFY_OFFSET(sg, 64);
9601
9602#undef VERIFY_OFFSET
9603
9604#define VERIFY_OFFSET(member, offset) \
Matt Gatese1f7de02014-02-18 13:55:17 -06009605 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9606
9607 VERIFY_OFFSET(dev_handle, 0x00);
9608 VERIFY_OFFSET(reserved1, 0x02);
9609 VERIFY_OFFSET(function, 0x03);
9610 VERIFY_OFFSET(reserved2, 0x04);
9611 VERIFY_OFFSET(err_info, 0x0C);
9612 VERIFY_OFFSET(reserved3, 0x10);
9613 VERIFY_OFFSET(err_info_len, 0x12);
9614 VERIFY_OFFSET(reserved4, 0x13);
9615 VERIFY_OFFSET(sgl_offset, 0x14);
9616 VERIFY_OFFSET(reserved5, 0x15);
9617 VERIFY_OFFSET(transfer_len, 0x1C);
9618 VERIFY_OFFSET(reserved6, 0x20);
9619 VERIFY_OFFSET(io_flags, 0x24);
9620 VERIFY_OFFSET(reserved7, 0x26);
9621 VERIFY_OFFSET(LUN, 0x34);
9622 VERIFY_OFFSET(control, 0x3C);
9623 VERIFY_OFFSET(CDB, 0x40);
9624 VERIFY_OFFSET(reserved8, 0x50);
9625 VERIFY_OFFSET(host_context_flags, 0x60);
9626 VERIFY_OFFSET(timeout_sec, 0x62);
9627 VERIFY_OFFSET(ReplyQueue, 0x64);
9628 VERIFY_OFFSET(reserved9, 0x65);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06009629 VERIFY_OFFSET(tag, 0x68);
Matt Gatese1f7de02014-02-18 13:55:17 -06009630 VERIFY_OFFSET(host_addr, 0x70);
9631 VERIFY_OFFSET(CISS_LUN, 0x78);
9632 VERIFY_OFFSET(SG, 0x78 + 8);
9633#undef VERIFY_OFFSET
9634}
9635
Stephen M. Cameronedd16362009-12-08 14:09:11 -08009636module_init(hpsa_init);
9637module_exit(hpsa_cleanup);