blob: 0c4751c0d1445f14be7412fdf8ca2604f43cdecd [file] [log] [blame]
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001/*
2 * Disk Array driver for HP Smart Array SAS controllers
Scott Teel51c35132014-02-18 13:57:26 -06003 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
Matthew Garrette5a44df2011-11-11 11:14:23 -050026#include <linux/pci-aspm.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080027#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/fs.h>
31#include <linux/timer.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080032#include <linux/init.h>
33#include <linux/spinlock.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080034#include <linux/compat.h>
35#include <linux/blktrace_api.h>
36#include <linux/uaccess.h>
37#include <linux/io.h>
38#include <linux/dma-mapping.h>
39#include <linux/completion.h>
40#include <linux/moduleparam.h>
41#include <scsi/scsi.h>
42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_host.h>
Stephen M. Cameron667e23d2010-02-25 14:02:51 -060045#include <scsi/scsi_tcq.h>
Stephen Cameron9437ac42015-04-23 09:32:16 -050046#include <scsi/scsi_eh.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080047#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
Arun Sharma600634972011-07-26 16:09:06 -070050#include <linux/atomic.h>
Stephen M. Camerona0c12412011-10-26 16:22:04 -050051#include <linux/jiffies.h>
Don Brace42a91642014-11-14 17:26:27 -060052#include <linux/percpu-defs.h>
Stephen M. Cameron094963d2014-05-29 10:53:18 -050053#include <linux/percpu.h>
Don Brace2b08b3e2015-01-23 16:41:09 -060054#include <asm/unaligned.h>
Stephen M. Cameron283b4a92014-02-18 13:55:33 -060055#include <asm/div64.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080056#include "hpsa_cmd.h"
57#include "hpsa.h"
58
59/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
Stephen M. Cameron9a993302014-03-13 17:13:06 -050060#define HPSA_DRIVER_VERSION "3.4.4-1"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080061#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -060062#define HPSA "hpsa"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080063
Robert Elliott007e7aa2015-01-23 16:44:56 -060064/* How long to wait for CISS doorbell communication */
65#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
66#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
67#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
68#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
Stephen M. Cameronedd16362009-12-08 14:09:11 -080069#define MAX_IOCTL_CONFIG_WAIT 1000
70
71/*define how many times we will try a command because of bus resets */
72#define MAX_CMD_RETRIES 3
73
74/* Embedded module documentation macros - see modules.h */
75MODULE_AUTHOR("Hewlett-Packard Company");
76MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
77 HPSA_DRIVER_VERSION);
78MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
79MODULE_VERSION(HPSA_DRIVER_VERSION);
80MODULE_LICENSE("GPL");
81
82static int hpsa_allow_any;
83module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
84MODULE_PARM_DESC(hpsa_allow_any,
85 "Allow hpsa driver to access unknown HP Smart Array hardware");
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -060086static int hpsa_simple_mode;
87module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
88MODULE_PARM_DESC(hpsa_simple_mode,
89 "Use 'simple mode' rather than 'performant mode'");
Stephen M. Cameronedd16362009-12-08 14:09:11 -080090
91/* define the PCI info for the cards we can control */
92static const struct pci_device_id hpsa_pci_device_id[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -080093 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
Mike Miller163dbcd2013-09-04 15:11:10 -050098 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
Mike Millerf8b01eb2010-02-04 08:42:45 -0600100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
scameron@beardog.cce.hp.com9143a962011-03-07 10:44:16 -0600101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
Mike Millerfe0c9612012-09-20 16:05:18 -0500108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
Mike Millerfe0c9612012-09-20 16:05:18 -0500112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
Mike Miller97b9f532013-09-04 15:05:55 -0500114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
Mike Miller97b9f532013-09-04 15:05:55 -0500125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
Stephen M. Cameron8e616a52014-02-18 13:58:02 -0600133 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
134 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
135 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
136 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
137 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
Mike Miller7c03b872010-12-01 11:16:07 -0600138 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
Stephen M. Cameron6798cc02010-06-16 13:51:20 -0500139 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800140 {0,}
141};
142
143MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
144
145/* board_id = Subsystem Device ID & Vendor ID
146 * product = Marketing Name for the board
147 * access = Address of the struct of function pointers
148 */
149static struct board_type products[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800150 {0x3241103C, "Smart Array P212", &SA5_access},
151 {0x3243103C, "Smart Array P410", &SA5_access},
152 {0x3245103C, "Smart Array P410i", &SA5_access},
153 {0x3247103C, "Smart Array P411", &SA5_access},
154 {0x3249103C, "Smart Array P812", &SA5_access},
Mike Miller163dbcd2013-09-04 15:11:10 -0500155 {0x324A103C, "Smart Array P712m", &SA5_access},
156 {0x324B103C, "Smart Array P711m", &SA5_access},
Stephen M. Cameron7d2cce52014-11-14 17:26:38 -0600157 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
Mike Millerfe0c9612012-09-20 16:05:18 -0500158 {0x3350103C, "Smart Array P222", &SA5_access},
159 {0x3351103C, "Smart Array P420", &SA5_access},
160 {0x3352103C, "Smart Array P421", &SA5_access},
161 {0x3353103C, "Smart Array P822", &SA5_access},
162 {0x3354103C, "Smart Array P420i", &SA5_access},
163 {0x3355103C, "Smart Array P220i", &SA5_access},
164 {0x3356103C, "Smart Array P721m", &SA5_access},
Mike Miller1fd6c8e2013-09-04 15:08:29 -0500165 {0x1921103C, "Smart Array P830i", &SA5_access},
166 {0x1922103C, "Smart Array P430", &SA5_access},
167 {0x1923103C, "Smart Array P431", &SA5_access},
168 {0x1924103C, "Smart Array P830", &SA5_access},
169 {0x1926103C, "Smart Array P731m", &SA5_access},
170 {0x1928103C, "Smart Array P230i", &SA5_access},
171 {0x1929103C, "Smart Array P530", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600172 {0x21BD103C, "Smart Array P244br", &SA5_access},
173 {0x21BE103C, "Smart Array P741m", &SA5_access},
174 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
175 {0x21C0103C, "Smart Array P440ar", &SA5_access},
Don Bracec8ae0ab2015-01-23 16:45:12 -0600176 {0x21C1103C, "Smart Array P840ar", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600177 {0x21C2103C, "Smart Array P440", &SA5_access},
178 {0x21C3103C, "Smart Array P441", &SA5_access},
Mike Miller97b9f532013-09-04 15:05:55 -0500179 {0x21C4103C, "Smart Array", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600180 {0x21C5103C, "Smart Array P841", &SA5_access},
181 {0x21C6103C, "Smart HBA H244br", &SA5_access},
182 {0x21C7103C, "Smart HBA H240", &SA5_access},
183 {0x21C8103C, "Smart HBA H241", &SA5_access},
Mike Miller97b9f532013-09-04 15:05:55 -0500184 {0x21C9103C, "Smart Array", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600185 {0x21CA103C, "Smart Array P246br", &SA5_access},
186 {0x21CB103C, "Smart Array P840", &SA5_access},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500187 {0x21CC103C, "Smart Array", &SA5_access},
188 {0x21CD103C, "Smart Array", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600189 {0x21CE103C, "Smart HBA", &SA5_access},
Stephen M. Cameron8e616a52014-02-18 13:58:02 -0600190 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
191 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
192 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
193 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
194 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800195 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
196};
197
198static int number_of_controllers;
199
Stephen M. Cameron10f66012010-06-16 13:51:50 -0500200static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
201static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
Don Brace42a91642014-11-14 17:26:27 -0600202static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800203
204#ifdef CONFIG_COMPAT
Don Brace42a91642014-11-14 17:26:27 -0600205static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
206 void __user *arg);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800207#endif
208
209static void cmd_free(struct ctlr_info *h, struct CommandList *c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800210static struct CommandList *cmd_alloc(struct ctlr_info *h);
Stephen M. Camerona2dac132013-02-20 11:24:41 -0600211static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -0600212 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800213 int cmd_type);
Robert Elliott2c143342015-01-23 16:42:48 -0600214static void hpsa_free_cmd_pool(struct ctlr_info *h);
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -0600215#define VPD_PAGE (1 << 8)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800216
Jeff Garzikf2812332010-11-16 02:10:29 -0500217static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
Stephen M. Camerona08a8472010-02-04 08:43:16 -0600218static void hpsa_scan_start(struct Scsi_Host *);
219static int hpsa_scan_finished(struct Scsi_Host *sh,
220 unsigned long elapsed_time);
Don Brace7c0a0222015-01-23 16:41:30 -0600221static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800222
223static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500224static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800225static int hpsa_slave_alloc(struct scsi_device *sdev);
Stephen Cameron41ce4c32015-04-23 09:31:47 -0500226static int hpsa_slave_configure(struct scsi_device *sdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800227static void hpsa_slave_destroy(struct scsi_device *sdev);
228
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800229static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800230static int check_for_unit_attention(struct ctlr_info *h,
231 struct CommandList *c);
232static void check_ioctl_unit_attention(struct ctlr_info *h,
233 struct CommandList *c);
Don Brace303932f2010-02-04 08:42:40 -0600234/* performant mode helper functions */
235static void calc_bucket_map(int *bucket, int num_buckets,
Don Brace2b08b3e2015-01-23 16:41:09 -0600236 int nsgs, int min_blocks, u32 *bucket_map);
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800237static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
Matt Gates254f7962012-05-01 11:43:06 -0500238static inline u32 next_command(struct ctlr_info *h, u8 q);
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800239static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
240 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
241 u64 *cfg_offset);
242static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
243 unsigned long *memory_bar);
244static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
245static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
246 int wait_for_ready);
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500247static inline void finish_cmd(struct CommandList *c);
Robert Elliottc706a792015-01-23 16:45:01 -0600248static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -0600249#define BOARD_NOT_READY 0
250#define BOARD_READY 1
Stephen M. Cameron23100dd2014-02-18 13:57:37 -0600251static void hpsa_drain_accel_commands(struct ctlr_info *h);
Stephen M. Cameron76438d02014-02-18 13:55:43 -0600252static void hpsa_flush_cache(struct ctlr_info *h);
Scott Teelc3497752014-02-18 13:56:34 -0600253static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
254 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -0600255 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
Don Brace080ef1c2015-01-23 16:43:25 -0600256static void hpsa_command_resubmit_worker(struct work_struct *work);
Webb Scales25163bd2015-04-23 09:32:00 -0500257static u32 lockup_detected(struct ctlr_info *h);
258static int detect_controller_lockup(struct ctlr_info *h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800259
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800260static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
261{
262 unsigned long *priv = shost_priv(sdev->host);
263 return (struct ctlr_info *) *priv;
264}
265
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600266static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
267{
268 unsigned long *priv = shost_priv(sh);
269 return (struct ctlr_info *) *priv;
270}
271
Stephen Cameron9437ac42015-04-23 09:32:16 -0500272/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
273static void decode_sense_data(const u8 *sense_data, int sense_data_len,
274 u8 *sense_key, u8 *asc, u8 *ascq)
275{
276 struct scsi_sense_hdr sshdr;
277 bool rc;
278
279 *sense_key = -1;
280 *asc = -1;
281 *ascq = -1;
282
283 if (sense_data_len < 1)
284 return;
285
286 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
287 if (rc) {
288 *sense_key = sshdr.sense_key;
289 *asc = sshdr.asc;
290 *ascq = sshdr.ascq;
291 }
292}
293
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800294static int check_for_unit_attention(struct ctlr_info *h,
295 struct CommandList *c)
296{
Stephen Cameron9437ac42015-04-23 09:32:16 -0500297 u8 sense_key, asc, ascq;
298 int sense_len;
299
300 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
301 sense_len = sizeof(c->err_info->SenseInfo);
302 else
303 sense_len = c->err_info->SenseLen;
304
305 decode_sense_data(c->err_info->SenseInfo, sense_len,
306 &sense_key, &asc, &ascq);
307 if (sense_key != UNIT_ATTENTION || asc == -1)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800308 return 0;
309
Stephen Cameron9437ac42015-04-23 09:32:16 -0500310 switch (asc) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800311 case STATE_CHANGED:
Stephen Cameron9437ac42015-04-23 09:32:16 -0500312 dev_warn(&h->pdev->dev,
313 HPSA "%d: a state change detected, command retried\n",
314 h->ctlr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800315 break;
316 case LUN_FAILED:
Stephen M. Cameron7f736952014-11-14 17:26:48 -0600317 dev_warn(&h->pdev->dev,
318 HPSA "%d: LUN failure detected\n", h->ctlr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800319 break;
320 case REPORT_LUNS_CHANGED:
Stephen M. Cameron7f736952014-11-14 17:26:48 -0600321 dev_warn(&h->pdev->dev,
322 HPSA "%d: report LUN data changed\n", h->ctlr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800323 /*
Scott Teel4f4eb9f2012-01-19 14:01:25 -0600324 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
325 * target (array) devices.
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800326 */
327 break;
328 case POWER_OR_RESET:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600329 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800330 "or device reset detected\n", h->ctlr);
331 break;
332 case UNIT_ATTENTION_CLEARED:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600333 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800334 "cleared by another initiator\n", h->ctlr);
335 break;
336 default:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600337 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800338 "unit attention detected\n", h->ctlr);
339 break;
340 }
341 return 1;
342}
343
Matt Bondurant852af202012-05-01 11:42:35 -0500344static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
345{
346 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
347 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
348 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
349 return 0;
350 dev_warn(&h->pdev->dev, HPSA "device busy");
351 return 1;
352}
353
Scott Teelda0697b2014-02-18 13:57:00 -0600354static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
355 struct device_attribute *attr,
356 const char *buf, size_t count)
357{
358 int status, len;
359 struct ctlr_info *h;
360 struct Scsi_Host *shost = class_to_shost(dev);
361 char tmpbuf[10];
362
363 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
364 return -EACCES;
365 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
366 strncpy(tmpbuf, buf, len);
367 tmpbuf[len] = '\0';
368 if (sscanf(tmpbuf, "%d", &status) != 1)
369 return -EINVAL;
370 h = shost_to_hba(shost);
371 h->acciopath_status = !!status;
372 dev_warn(&h->pdev->dev,
373 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
374 h->acciopath_status ? "enabled" : "disabled");
375 return count;
376}
377
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600378static ssize_t host_store_raid_offload_debug(struct device *dev,
379 struct device_attribute *attr,
380 const char *buf, size_t count)
381{
382 int debug_level, len;
383 struct ctlr_info *h;
384 struct Scsi_Host *shost = class_to_shost(dev);
385 char tmpbuf[10];
386
387 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
388 return -EACCES;
389 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
390 strncpy(tmpbuf, buf, len);
391 tmpbuf[len] = '\0';
392 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
393 return -EINVAL;
394 if (debug_level < 0)
395 debug_level = 0;
396 h = shost_to_hba(shost);
397 h->raid_offload_debug = debug_level;
398 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
399 h->raid_offload_debug);
400 return count;
401}
402
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800403static ssize_t host_store_rescan(struct device *dev,
404 struct device_attribute *attr,
405 const char *buf, size_t count)
406{
407 struct ctlr_info *h;
408 struct Scsi_Host *shost = class_to_shost(dev);
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600409 h = shost_to_hba(shost);
Mike Miller31468402010-02-25 14:03:12 -0600410 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800411 return count;
412}
413
Stephen M. Camerond28ce022010-05-27 15:14:34 -0500414static ssize_t host_show_firmware_revision(struct device *dev,
415 struct device_attribute *attr, char *buf)
416{
417 struct ctlr_info *h;
418 struct Scsi_Host *shost = class_to_shost(dev);
419 unsigned char *fwrev;
420
421 h = shost_to_hba(shost);
422 if (!h->hba_inquiry_data)
423 return 0;
424 fwrev = &h->hba_inquiry_data[32];
425 return snprintf(buf, 20, "%c%c%c%c\n",
426 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
427}
428
Stephen M. Cameron94a13642011-01-06 14:48:39 -0600429static ssize_t host_show_commands_outstanding(struct device *dev,
430 struct device_attribute *attr, char *buf)
431{
432 struct Scsi_Host *shost = class_to_shost(dev);
433 struct ctlr_info *h = shost_to_hba(shost);
434
Stephen M. Cameron0cbf7682014-11-14 17:27:09 -0600435 return snprintf(buf, 20, "%d\n",
436 atomic_read(&h->commands_outstanding));
Stephen M. Cameron94a13642011-01-06 14:48:39 -0600437}
438
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600439static ssize_t host_show_transport_mode(struct device *dev,
440 struct device_attribute *attr, char *buf)
441{
442 struct ctlr_info *h;
443 struct Scsi_Host *shost = class_to_shost(dev);
444
445 h = shost_to_hba(shost);
446 return snprintf(buf, 20, "%s\n",
Stephen M. Cameron960a30e2011-02-15 15:33:03 -0600447 h->transMethod & CFGTBL_Trans_Performant ?
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600448 "performant" : "simple");
449}
450
Scott Teelda0697b2014-02-18 13:57:00 -0600451static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
452 struct device_attribute *attr, char *buf)
453{
454 struct ctlr_info *h;
455 struct Scsi_Host *shost = class_to_shost(dev);
456
457 h = shost_to_hba(shost);
458 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
459 (h->acciopath_status == 1) ? "enabled" : "disabled");
460}
461
Stephen M. Cameron46380782011-05-03 15:00:01 -0500462/* List of controllers which cannot be hard reset on kexec with reset_devices */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600463static u32 unresettable_controller[] = {
464 0x324a103C, /* Smart Array P712m */
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500465 0x324b103C, /* Smart Array P711m */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600466 0x3223103C, /* Smart Array P800 */
467 0x3234103C, /* Smart Array P400 */
468 0x3235103C, /* Smart Array P400i */
469 0x3211103C, /* Smart Array E200i */
470 0x3212103C, /* Smart Array E200 */
471 0x3213103C, /* Smart Array E200i */
472 0x3214103C, /* Smart Array E200i */
473 0x3215103C, /* Smart Array E200i */
474 0x3237103C, /* Smart Array E500 */
475 0x323D103C, /* Smart Array P700m */
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100476 0x40800E11, /* Smart Array 5i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600477 0x409C0E11, /* Smart Array 6400 */
478 0x409D0E11, /* Smart Array 6400 EM */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100479 0x40700E11, /* Smart Array 5300 */
480 0x40820E11, /* Smart Array 532 */
481 0x40830E11, /* Smart Array 5312 */
482 0x409A0E11, /* Smart Array 641 */
483 0x409B0E11, /* Smart Array 642 */
484 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600485};
486
Stephen M. Cameron46380782011-05-03 15:00:01 -0500487/* List of controllers which cannot even be soft reset */
488static u32 soft_unresettable_controller[] = {
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100489 0x40800E11, /* Smart Array 5i */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100490 0x40700E11, /* Smart Array 5300 */
491 0x40820E11, /* Smart Array 532 */
492 0x40830E11, /* Smart Array 5312 */
493 0x409A0E11, /* Smart Array 641 */
494 0x409B0E11, /* Smart Array 642 */
495 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron46380782011-05-03 15:00:01 -0500496 /* Exclude 640x boards. These are two pci devices in one slot
497 * which share a battery backed cache module. One controls the
498 * cache, the other accesses the cache through the one that controls
499 * it. If we reset the one controlling the cache, the other will
500 * likely not be happy. Just forbid resetting this conjoined mess.
501 * The 640x isn't really supported by hpsa anyway.
502 */
503 0x409C0E11, /* Smart Array 6400 */
504 0x409D0E11, /* Smart Array 6400 EM */
505};
506
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500507static u32 needs_abort_tags_swizzled[] = {
508 0x323D103C, /* Smart Array P700m */
509 0x324a103C, /* Smart Array P712m */
510 0x324b103C, /* SmartArray P711m */
511};
512
513static int board_id_in_array(u32 a[], int nelems, u32 board_id)
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600514{
515 int i;
516
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500517 for (i = 0; i < nelems; i++)
518 if (a[i] == board_id)
519 return 1;
520 return 0;
521}
522
523static int ctlr_is_hard_resettable(u32 board_id)
524{
525 return !board_id_in_array(unresettable_controller,
526 ARRAY_SIZE(unresettable_controller), board_id);
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600527}
528
Stephen M. Cameron46380782011-05-03 15:00:01 -0500529static int ctlr_is_soft_resettable(u32 board_id)
530{
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500531 return !board_id_in_array(soft_unresettable_controller,
532 ARRAY_SIZE(soft_unresettable_controller), board_id);
Stephen M. Cameron46380782011-05-03 15:00:01 -0500533}
534
535static int ctlr_is_resettable(u32 board_id)
536{
537 return ctlr_is_hard_resettable(board_id) ||
538 ctlr_is_soft_resettable(board_id);
539}
540
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500541static int ctlr_needs_abort_tags_swizzled(u32 board_id)
542{
543 return board_id_in_array(needs_abort_tags_swizzled,
544 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
545}
546
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600547static ssize_t host_show_resettable(struct device *dev,
548 struct device_attribute *attr, char *buf)
549{
550 struct ctlr_info *h;
551 struct Scsi_Host *shost = class_to_shost(dev);
552
553 h = shost_to_hba(shost);
Stephen M. Cameron46380782011-05-03 15:00:01 -0500554 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600555}
556
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800557static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
558{
559 return (scsi3addr[3] & 0xC0) == 0x40;
560}
561
Robert Elliottf2ef0ce2015-01-23 16:41:35 -0600562static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
563 "1(+0)ADM", "UNKNOWN"
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800564};
Scott Teel6b80b182014-02-18 13:56:55 -0600565#define HPSA_RAID_0 0
566#define HPSA_RAID_4 1
567#define HPSA_RAID_1 2 /* also used for RAID 10 */
568#define HPSA_RAID_5 3 /* also used for RAID 50 */
569#define HPSA_RAID_51 4
570#define HPSA_RAID_6 5 /* also used for RAID 60 */
571#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800572#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
573
574static ssize_t raid_level_show(struct device *dev,
575 struct device_attribute *attr, char *buf)
576{
577 ssize_t l = 0;
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600578 unsigned char rlevel;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800579 struct ctlr_info *h;
580 struct scsi_device *sdev;
581 struct hpsa_scsi_dev_t *hdev;
582 unsigned long flags;
583
584 sdev = to_scsi_device(dev);
585 h = sdev_to_hba(sdev);
586 spin_lock_irqsave(&h->lock, flags);
587 hdev = sdev->hostdata;
588 if (!hdev) {
589 spin_unlock_irqrestore(&h->lock, flags);
590 return -ENODEV;
591 }
592
593 /* Is this even a logical drive? */
594 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
595 spin_unlock_irqrestore(&h->lock, flags);
596 l = snprintf(buf, PAGE_SIZE, "N/A\n");
597 return l;
598 }
599
600 rlevel = hdev->raid_level;
601 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600602 if (rlevel > RAID_UNKNOWN)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800603 rlevel = RAID_UNKNOWN;
604 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
605 return l;
606}
607
608static ssize_t lunid_show(struct device *dev,
609 struct device_attribute *attr, char *buf)
610{
611 struct ctlr_info *h;
612 struct scsi_device *sdev;
613 struct hpsa_scsi_dev_t *hdev;
614 unsigned long flags;
615 unsigned char lunid[8];
616
617 sdev = to_scsi_device(dev);
618 h = sdev_to_hba(sdev);
619 spin_lock_irqsave(&h->lock, flags);
620 hdev = sdev->hostdata;
621 if (!hdev) {
622 spin_unlock_irqrestore(&h->lock, flags);
623 return -ENODEV;
624 }
625 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
626 spin_unlock_irqrestore(&h->lock, flags);
627 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
628 lunid[0], lunid[1], lunid[2], lunid[3],
629 lunid[4], lunid[5], lunid[6], lunid[7]);
630}
631
632static ssize_t unique_id_show(struct device *dev,
633 struct device_attribute *attr, char *buf)
634{
635 struct ctlr_info *h;
636 struct scsi_device *sdev;
637 struct hpsa_scsi_dev_t *hdev;
638 unsigned long flags;
639 unsigned char sn[16];
640
641 sdev = to_scsi_device(dev);
642 h = sdev_to_hba(sdev);
643 spin_lock_irqsave(&h->lock, flags);
644 hdev = sdev->hostdata;
645 if (!hdev) {
646 spin_unlock_irqrestore(&h->lock, flags);
647 return -ENODEV;
648 }
649 memcpy(sn, hdev->device_id, sizeof(sn));
650 spin_unlock_irqrestore(&h->lock, flags);
651 return snprintf(buf, 16 * 2 + 2,
652 "%02X%02X%02X%02X%02X%02X%02X%02X"
653 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
654 sn[0], sn[1], sn[2], sn[3],
655 sn[4], sn[5], sn[6], sn[7],
656 sn[8], sn[9], sn[10], sn[11],
657 sn[12], sn[13], sn[14], sn[15]);
658}
659
Scott Teelc1988682014-02-18 13:55:54 -0600660static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
661 struct device_attribute *attr, char *buf)
662{
663 struct ctlr_info *h;
664 struct scsi_device *sdev;
665 struct hpsa_scsi_dev_t *hdev;
666 unsigned long flags;
667 int offload_enabled;
668
669 sdev = to_scsi_device(dev);
670 h = sdev_to_hba(sdev);
671 spin_lock_irqsave(&h->lock, flags);
672 hdev = sdev->hostdata;
673 if (!hdev) {
674 spin_unlock_irqrestore(&h->lock, flags);
675 return -ENODEV;
676 }
677 offload_enabled = hdev->offload_enabled;
678 spin_unlock_irqrestore(&h->lock, flags);
679 return snprintf(buf, 20, "%d\n", offload_enabled);
680}
681
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600682static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
683static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
684static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
685static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
Scott Teelc1988682014-02-18 13:55:54 -0600686static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
687 host_show_hp_ssd_smart_path_enabled, NULL);
Scott Teelda0697b2014-02-18 13:57:00 -0600688static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
689 host_show_hp_ssd_smart_path_status,
690 host_store_hp_ssd_smart_path_status);
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600691static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
692 host_store_raid_offload_debug);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600693static DEVICE_ATTR(firmware_revision, S_IRUGO,
694 host_show_firmware_revision, NULL);
695static DEVICE_ATTR(commands_outstanding, S_IRUGO,
696 host_show_commands_outstanding, NULL);
697static DEVICE_ATTR(transport_mode, S_IRUGO,
698 host_show_transport_mode, NULL);
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600699static DEVICE_ATTR(resettable, S_IRUGO,
700 host_show_resettable, NULL);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600701
702static struct device_attribute *hpsa_sdev_attrs[] = {
703 &dev_attr_raid_level,
704 &dev_attr_lunid,
705 &dev_attr_unique_id,
Scott Teelc1988682014-02-18 13:55:54 -0600706 &dev_attr_hp_ssd_smart_path_enabled,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600707 NULL,
708};
709
710static struct device_attribute *hpsa_shost_attrs[] = {
711 &dev_attr_rescan,
712 &dev_attr_firmware_revision,
713 &dev_attr_commands_outstanding,
714 &dev_attr_transport_mode,
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600715 &dev_attr_resettable,
Scott Teelda0697b2014-02-18 13:57:00 -0600716 &dev_attr_hp_ssd_smart_path_status,
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600717 &dev_attr_raid_offload_debug,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600718 NULL,
719};
720
Stephen Cameron41ce4c32015-04-23 09:31:47 -0500721#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
722 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
723
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600724static struct scsi_host_template hpsa_driver_template = {
725 .module = THIS_MODULE,
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600726 .name = HPSA,
727 .proc_name = HPSA,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600728 .queuecommand = hpsa_scsi_queue_command,
729 .scan_start = hpsa_scan_start,
730 .scan_finished = hpsa_scan_finished,
Don Brace7c0a0222015-01-23 16:41:30 -0600731 .change_queue_depth = hpsa_change_queue_depth,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600732 .this_id = -1,
733 .use_clustering = ENABLE_CLUSTERING,
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500734 .eh_abort_handler = hpsa_eh_abort_handler,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600735 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
736 .ioctl = hpsa_ioctl,
737 .slave_alloc = hpsa_slave_alloc,
Stephen Cameron41ce4c32015-04-23 09:31:47 -0500738 .slave_configure = hpsa_slave_configure,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600739 .slave_destroy = hpsa_slave_destroy,
740#ifdef CONFIG_COMPAT
741 .compat_ioctl = hpsa_compat_ioctl,
742#endif
743 .sdev_attrs = hpsa_sdev_attrs,
744 .shost_attrs = hpsa_shost_attrs,
Stephen M. Cameronc0d6a4d2011-10-26 16:20:53 -0500745 .max_sectors = 8192,
Martin K. Petersen54b2b502013-10-23 06:25:40 -0400746 .no_write_same = 1,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600747};
748
Matt Gates254f7962012-05-01 11:43:06 -0500749static inline u32 next_command(struct ctlr_info *h, u8 q)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600750{
751 u32 a;
Stephen M. Cameron072b0512014-05-29 10:53:07 -0500752 struct reply_queue_buffer *rq = &h->reply_queue[q];
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600753
Matt Gatese1f7de02014-02-18 13:55:17 -0600754 if (h->transMethod & CFGTBL_Trans_io_accel1)
755 return h->access.command_completed(h, q);
756
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600757 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
Matt Gates254f7962012-05-01 11:43:06 -0500758 return h->access.command_completed(h, q);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600759
Matt Gates254f7962012-05-01 11:43:06 -0500760 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
761 a = rq->head[rq->current_entry];
762 rq->current_entry++;
Stephen M. Cameron0cbf7682014-11-14 17:27:09 -0600763 atomic_dec(&h->commands_outstanding);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600764 } else {
765 a = FIFO_EMPTY;
766 }
767 /* Check for wraparound */
Matt Gates254f7962012-05-01 11:43:06 -0500768 if (rq->current_entry == h->max_commands) {
769 rq->current_entry = 0;
770 rq->wraparound ^= 1;
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600771 }
772 return a;
773}
774
Scott Teelc3497752014-02-18 13:56:34 -0600775/*
776 * There are some special bits in the bus address of the
777 * command that we have to set for the controller to know
778 * how to process the command:
779 *
780 * Normal performant mode:
781 * bit 0: 1 means performant mode, 0 means simple mode.
782 * bits 1-3 = block fetch table entry
783 * bits 4-6 = command type (== 0)
784 *
785 * ioaccel1 mode:
786 * bit 0 = "performant mode" bit.
787 * bits 1-3 = block fetch table entry
788 * bits 4-6 = command type (== 110)
789 * (command type is needed because ioaccel1 mode
790 * commands are submitted through the same register as normal
791 * mode commands, so this is how the controller knows whether
792 * the command is normal mode or ioaccel1 mode.)
793 *
794 * ioaccel2 mode:
795 * bit 0 = "performant mode" bit.
796 * bits 1-4 = block fetch table entry (note extra bit)
797 * bits 4-6 = not needed, because ioaccel2 mode has
798 * a separate special register for submitting commands.
799 */
800
Webb Scales25163bd2015-04-23 09:32:00 -0500801/*
802 * set_performant_mode: Modify the tag for cciss performant
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600803 * set bit 0 for pull model, bits 3-1 for block fetch
804 * register number
805 */
Webb Scales25163bd2015-04-23 09:32:00 -0500806#define DEFAULT_REPLY_QUEUE (-1)
807static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
808 int reply_queue)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600809{
Matt Gates254f7962012-05-01 11:43:06 -0500810 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600811 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
Webb Scales25163bd2015-04-23 09:32:00 -0500812 if (unlikely(!h->msix_vector))
813 return;
814 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
Matt Gates254f7962012-05-01 11:43:06 -0500815 c->Header.ReplyQueue =
John Kacur804a5cb2013-07-26 16:06:18 +0200816 raw_smp_processor_id() % h->nreply_queues;
Webb Scales25163bd2015-04-23 09:32:00 -0500817 else
818 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
Matt Gates254f7962012-05-01 11:43:06 -0500819 }
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600820}
821
Scott Teelc3497752014-02-18 13:56:34 -0600822static void set_ioaccel1_performant_mode(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -0500823 struct CommandList *c,
824 int reply_queue)
Scott Teelc3497752014-02-18 13:56:34 -0600825{
826 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
827
Webb Scales25163bd2015-04-23 09:32:00 -0500828 /*
829 * Tell the controller to post the reply to the queue for this
Scott Teelc3497752014-02-18 13:56:34 -0600830 * processor. This seems to give the best I/O throughput.
831 */
Webb Scales25163bd2015-04-23 09:32:00 -0500832 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
833 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
834 else
835 cp->ReplyQueue = reply_queue % h->nreply_queues;
836 /*
837 * Set the bits in the address sent down to include:
Scott Teelc3497752014-02-18 13:56:34 -0600838 * - performant mode bit (bit 0)
839 * - pull count (bits 1-3)
840 * - command type (bits 4-6)
841 */
842 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
843 IOACCEL1_BUSADDR_CMDTYPE;
844}
845
846static void set_ioaccel2_performant_mode(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -0500847 struct CommandList *c,
848 int reply_queue)
Scott Teelc3497752014-02-18 13:56:34 -0600849{
850 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
851
Webb Scales25163bd2015-04-23 09:32:00 -0500852 /*
853 * Tell the controller to post the reply to the queue for this
Scott Teelc3497752014-02-18 13:56:34 -0600854 * processor. This seems to give the best I/O throughput.
855 */
Webb Scales25163bd2015-04-23 09:32:00 -0500856 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
857 cp->reply_queue = smp_processor_id() % h->nreply_queues;
858 else
859 cp->reply_queue = reply_queue % h->nreply_queues;
860 /*
861 * Set the bits in the address sent down to include:
Scott Teelc3497752014-02-18 13:56:34 -0600862 * - performant mode bit not used in ioaccel mode 2
863 * - pull count (bits 0-3)
864 * - command type isn't needed for ioaccel2
865 */
866 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
867}
868
Stephen M. Camerone85c5972012-05-01 11:43:42 -0500869static int is_firmware_flash_cmd(u8 *cdb)
870{
871 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
872}
873
874/*
875 * During firmware flash, the heartbeat register may not update as frequently
876 * as it should. So we dial down lockup detection during firmware flash. and
877 * dial it back up when firmware flash completes.
878 */
879#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
880#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
881static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
882 struct CommandList *c)
883{
884 if (!is_firmware_flash_cmd(c->Request.CDB))
885 return;
886 atomic_inc(&h->firmware_flash_in_progress);
887 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
888}
889
890static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
891 struct CommandList *c)
892{
893 if (is_firmware_flash_cmd(c->Request.CDB) &&
894 atomic_dec_and_test(&h->firmware_flash_in_progress))
895 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
896}
897
Webb Scales25163bd2015-04-23 09:32:00 -0500898static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
899 struct CommandList *c, int reply_queue)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600900{
Stephen Cameronc05e8862015-01-23 16:44:40 -0600901 dial_down_lockup_detection_during_fw_flash(h, c);
902 atomic_inc(&h->commands_outstanding);
Scott Teelc3497752014-02-18 13:56:34 -0600903 switch (c->cmd_type) {
904 case CMD_IOACCEL1:
Webb Scales25163bd2015-04-23 09:32:00 -0500905 set_ioaccel1_performant_mode(h, c, reply_queue);
Stephen Cameronc05e8862015-01-23 16:44:40 -0600906 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
Scott Teelc3497752014-02-18 13:56:34 -0600907 break;
908 case CMD_IOACCEL2:
Webb Scales25163bd2015-04-23 09:32:00 -0500909 set_ioaccel2_performant_mode(h, c, reply_queue);
Stephen Cameronc05e8862015-01-23 16:44:40 -0600910 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
Scott Teelc3497752014-02-18 13:56:34 -0600911 break;
912 default:
Webb Scales25163bd2015-04-23 09:32:00 -0500913 set_performant_mode(h, c, reply_queue);
Stephen Cameronc05e8862015-01-23 16:44:40 -0600914 h->access.submit_command(h, c);
Scott Teelc3497752014-02-18 13:56:34 -0600915 }
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600916}
917
Webb Scales25163bd2015-04-23 09:32:00 -0500918static void enqueue_cmd_and_start_io(struct ctlr_info *h,
919 struct CommandList *c)
920{
921 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
922}
923
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600924static inline int is_hba_lunid(unsigned char scsi3addr[])
925{
926 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
927}
928
929static inline int is_scsi_rev_5(struct ctlr_info *h)
930{
931 if (!h->hba_inquiry_data)
932 return 0;
933 if ((h->hba_inquiry_data[2] & 0x07) == 5)
934 return 1;
935 return 0;
936}
937
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800938static int hpsa_find_target_lun(struct ctlr_info *h,
939 unsigned char scsi3addr[], int bus, int *target, int *lun)
940{
941 /* finds an unused bus, target, lun for a new physical device
942 * assumes h->devlock is held
943 */
944 int i, found = 0;
Scott Teelcfe5bad2011-10-26 16:21:07 -0500945 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800946
Akinobu Mita263d9402012-01-21 00:15:27 +0900947 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800948
949 for (i = 0; i < h->ndevices; i++) {
950 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
Akinobu Mita263d9402012-01-21 00:15:27 +0900951 __set_bit(h->dev[i]->target, lun_taken);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800952 }
953
Akinobu Mita263d9402012-01-21 00:15:27 +0900954 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
955 if (i < HPSA_MAX_DEVICES) {
956 /* *bus = 1; */
957 *target = i;
958 *lun = 0;
959 found = 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800960 }
961 return !found;
962}
963
Webb Scales0d96ef52015-04-23 09:31:55 -0500964static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
965 struct hpsa_scsi_dev_t *dev, char *description)
966{
967 dev_printk(level, &h->pdev->dev,
968 "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
969 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
970 description,
971 scsi_device_type(dev->devtype),
972 dev->vendor,
973 dev->model,
974 dev->raid_level > RAID_UNKNOWN ?
975 "RAID-?" : raid_label[dev->raid_level],
976 dev->offload_config ? '+' : '-',
977 dev->offload_enabled ? '+' : '-',
978 dev->expose_state);
979}
980
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800981/* Add an entry into h->dev[] array. */
982static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
983 struct hpsa_scsi_dev_t *device,
984 struct hpsa_scsi_dev_t *added[], int *nadded)
985{
986 /* assumes h->devlock is held */
987 int n = h->ndevices;
988 int i;
989 unsigned char addr1[8], addr2[8];
990 struct hpsa_scsi_dev_t *sd;
991
Scott Teelcfe5bad2011-10-26 16:21:07 -0500992 if (n >= HPSA_MAX_DEVICES) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800993 dev_err(&h->pdev->dev, "too many devices, some will be "
994 "inaccessible.\n");
995 return -1;
996 }
997
998 /* physical devices do not have lun or target assigned until now. */
999 if (device->lun != -1)
1000 /* Logical device, lun is already assigned. */
1001 goto lun_assigned;
1002
1003 /* If this device a non-zero lun of a multi-lun device
1004 * byte 4 of the 8-byte LUN addr will contain the logical
Don Brace2b08b3e2015-01-23 16:41:09 -06001005 * unit no, zero otherwise.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001006 */
1007 if (device->scsi3addr[4] == 0) {
1008 /* This is not a non-zero lun of a multi-lun device */
1009 if (hpsa_find_target_lun(h, device->scsi3addr,
1010 device->bus, &device->target, &device->lun) != 0)
1011 return -1;
1012 goto lun_assigned;
1013 }
1014
1015 /* This is a non-zero lun of a multi-lun device.
1016 * Search through our list and find the device which
1017 * has the same 8 byte LUN address, excepting byte 4.
1018 * Assign the same bus and target for this new LUN.
1019 * Use the logical unit number from the firmware.
1020 */
1021 memcpy(addr1, device->scsi3addr, 8);
1022 addr1[4] = 0;
1023 for (i = 0; i < n; i++) {
1024 sd = h->dev[i];
1025 memcpy(addr2, sd->scsi3addr, 8);
1026 addr2[4] = 0;
1027 /* differ only in byte 4? */
1028 if (memcmp(addr1, addr2, 8) == 0) {
1029 device->bus = sd->bus;
1030 device->target = sd->target;
1031 device->lun = device->scsi3addr[4];
1032 break;
1033 }
1034 }
1035 if (device->lun == -1) {
1036 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1037 " suspect firmware bug or unsupported hardware "
1038 "configuration.\n");
1039 return -1;
1040 }
1041
1042lun_assigned:
1043
1044 h->dev[n] = device;
1045 h->ndevices++;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001046 device->offload_to_be_enabled = device->offload_enabled;
1047 device->offload_enabled = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001048 added[*nadded] = device;
1049 (*nadded)++;
Webb Scales0d96ef52015-04-23 09:31:55 -05001050 hpsa_show_dev_msg(KERN_INFO, h, device,
1051 device->expose_state & HPSA_SCSI_ADD ? "added" : "masked");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001052 return 0;
1053}
1054
Scott Teelbd9244f2012-01-19 14:01:30 -06001055/* Update an entry in h->dev[] array. */
1056static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
1057 int entry, struct hpsa_scsi_dev_t *new_entry)
1058{
1059 /* assumes h->devlock is held */
1060 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1061
1062 /* Raid level changed. */
1063 h->dev[entry]->raid_level = new_entry->raid_level;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001064
Don Brace03383732015-01-23 16:43:30 -06001065 /* Raid offload parameters changed. Careful about the ordering. */
1066 if (new_entry->offload_config && new_entry->offload_enabled) {
1067 /*
1068 * if drive is newly offload_enabled, we want to copy the
1069 * raid map data first. If previously offload_enabled and
1070 * offload_config were set, raid map data had better be
1071 * the same as it was before. if raid map data is changed
1072 * then it had better be the case that
1073 * h->dev[entry]->offload_enabled is currently 0.
1074 */
1075 h->dev[entry]->raid_map = new_entry->raid_map;
1076 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
Don Brace03383732015-01-23 16:43:30 -06001077 }
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001078 h->dev[entry]->offload_config = new_entry->offload_config;
Stephen M. Cameron9fb0de22014-02-18 13:56:50 -06001079 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
Don Brace03383732015-01-23 16:43:30 -06001080 h->dev[entry]->queue_depth = new_entry->queue_depth;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001081
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001082 /*
1083 * We can turn off ioaccel offload now, but need to delay turning
1084 * it on until we can update h->dev[entry]->phys_disk[], but we
1085 * can't do that until all the devices are updated.
1086 */
1087 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1088 if (!new_entry->offload_enabled)
1089 h->dev[entry]->offload_enabled = 0;
1090
Webb Scales0d96ef52015-04-23 09:31:55 -05001091 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
Scott Teelbd9244f2012-01-19 14:01:30 -06001092}
1093
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001094/* Replace an entry from h->dev[] array. */
1095static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1096 int entry, struct hpsa_scsi_dev_t *new_entry,
1097 struct hpsa_scsi_dev_t *added[], int *nadded,
1098 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1099{
1100 /* assumes h->devlock is held */
Scott Teelcfe5bad2011-10-26 16:21:07 -05001101 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001102 removed[*nremoved] = h->dev[entry];
1103 (*nremoved)++;
Stephen M. Cameron01350d02011-08-09 08:18:01 -05001104
1105 /*
1106 * New physical devices won't have target/lun assigned yet
1107 * so we need to preserve the values in the slot we are replacing.
1108 */
1109 if (new_entry->target == -1) {
1110 new_entry->target = h->dev[entry]->target;
1111 new_entry->lun = h->dev[entry]->lun;
1112 }
1113
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001114 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1115 new_entry->offload_enabled = 0;
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001116 h->dev[entry] = new_entry;
1117 added[*nadded] = new_entry;
1118 (*nadded)++;
Webb Scales0d96ef52015-04-23 09:31:55 -05001119 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001120}
1121
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001122/* Remove an entry from h->dev[] array. */
1123static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1124 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1125{
1126 /* assumes h->devlock is held */
1127 int i;
1128 struct hpsa_scsi_dev_t *sd;
1129
Scott Teelcfe5bad2011-10-26 16:21:07 -05001130 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001131
1132 sd = h->dev[entry];
1133 removed[*nremoved] = h->dev[entry];
1134 (*nremoved)++;
1135
1136 for (i = entry; i < h->ndevices-1; i++)
1137 h->dev[i] = h->dev[i+1];
1138 h->ndevices--;
Webb Scales0d96ef52015-04-23 09:31:55 -05001139 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001140}
1141
1142#define SCSI3ADDR_EQ(a, b) ( \
1143 (a)[7] == (b)[7] && \
1144 (a)[6] == (b)[6] && \
1145 (a)[5] == (b)[5] && \
1146 (a)[4] == (b)[4] && \
1147 (a)[3] == (b)[3] && \
1148 (a)[2] == (b)[2] && \
1149 (a)[1] == (b)[1] && \
1150 (a)[0] == (b)[0])
1151
1152static void fixup_botched_add(struct ctlr_info *h,
1153 struct hpsa_scsi_dev_t *added)
1154{
1155 /* called when scsi_add_device fails in order to re-adjust
1156 * h->dev[] to match the mid layer's view.
1157 */
1158 unsigned long flags;
1159 int i, j;
1160
1161 spin_lock_irqsave(&h->lock, flags);
1162 for (i = 0; i < h->ndevices; i++) {
1163 if (h->dev[i] == added) {
1164 for (j = i; j < h->ndevices-1; j++)
1165 h->dev[j] = h->dev[j+1];
1166 h->ndevices--;
1167 break;
1168 }
1169 }
1170 spin_unlock_irqrestore(&h->lock, flags);
1171 kfree(added);
1172}
1173
1174static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1175 struct hpsa_scsi_dev_t *dev2)
1176{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001177 /* we compare everything except lun and target as these
1178 * are not yet assigned. Compare parts likely
1179 * to differ first
1180 */
1181 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1182 sizeof(dev1->scsi3addr)) != 0)
1183 return 0;
1184 if (memcmp(dev1->device_id, dev2->device_id,
1185 sizeof(dev1->device_id)) != 0)
1186 return 0;
1187 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1188 return 0;
1189 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1190 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001191 if (dev1->devtype != dev2->devtype)
1192 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001193 if (dev1->bus != dev2->bus)
1194 return 0;
1195 return 1;
1196}
1197
Scott Teelbd9244f2012-01-19 14:01:30 -06001198static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1199 struct hpsa_scsi_dev_t *dev2)
1200{
1201 /* Device attributes that can change, but don't mean
1202 * that the device is a different device, nor that the OS
1203 * needs to be told anything about the change.
1204 */
1205 if (dev1->raid_level != dev2->raid_level)
1206 return 1;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001207 if (dev1->offload_config != dev2->offload_config)
1208 return 1;
1209 if (dev1->offload_enabled != dev2->offload_enabled)
1210 return 1;
Don Brace03383732015-01-23 16:43:30 -06001211 if (dev1->queue_depth != dev2->queue_depth)
1212 return 1;
Scott Teelbd9244f2012-01-19 14:01:30 -06001213 return 0;
1214}
1215
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001216/* Find needle in haystack. If exact match found, return DEVICE_SAME,
1217 * and return needle location in *index. If scsi3addr matches, but not
1218 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
Scott Teelbd9244f2012-01-19 14:01:30 -06001219 * location in *index.
1220 * In the case of a minor device attribute change, such as RAID level, just
1221 * return DEVICE_UPDATED, along with the updated device's location in index.
1222 * If needle not found, return DEVICE_NOT_FOUND.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001223 */
1224static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1225 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1226 int *index)
1227{
1228 int i;
1229#define DEVICE_NOT_FOUND 0
1230#define DEVICE_CHANGED 1
1231#define DEVICE_SAME 2
Scott Teelbd9244f2012-01-19 14:01:30 -06001232#define DEVICE_UPDATED 3
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001233 for (i = 0; i < haystack_size; i++) {
Stephen M. Cameron23231042010-02-04 08:43:36 -06001234 if (haystack[i] == NULL) /* previously removed. */
1235 continue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001236 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1237 *index = i;
Scott Teelbd9244f2012-01-19 14:01:30 -06001238 if (device_is_the_same(needle, haystack[i])) {
1239 if (device_updated(needle, haystack[i]))
1240 return DEVICE_UPDATED;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001241 return DEVICE_SAME;
Scott Teelbd9244f2012-01-19 14:01:30 -06001242 } else {
Stephen M. Cameron98465902014-02-21 16:25:00 -06001243 /* Keep offline devices offline */
1244 if (needle->volume_offline)
1245 return DEVICE_NOT_FOUND;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001246 return DEVICE_CHANGED;
Scott Teelbd9244f2012-01-19 14:01:30 -06001247 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001248 }
1249 }
1250 *index = -1;
1251 return DEVICE_NOT_FOUND;
1252}
1253
Stephen M. Cameron98465902014-02-21 16:25:00 -06001254static void hpsa_monitor_offline_device(struct ctlr_info *h,
1255 unsigned char scsi3addr[])
1256{
1257 struct offline_device_entry *device;
1258 unsigned long flags;
1259
1260 /* Check to see if device is already on the list */
1261 spin_lock_irqsave(&h->offline_device_lock, flags);
1262 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1263 if (memcmp(device->scsi3addr, scsi3addr,
1264 sizeof(device->scsi3addr)) == 0) {
1265 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1266 return;
1267 }
1268 }
1269 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1270
1271 /* Device is not on the list, add it. */
1272 device = kmalloc(sizeof(*device), GFP_KERNEL);
1273 if (!device) {
1274 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1275 return;
1276 }
1277 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1278 spin_lock_irqsave(&h->offline_device_lock, flags);
1279 list_add_tail(&device->offline_list, &h->offline_device_list);
1280 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1281}
1282
1283/* Print a message explaining various offline volume states */
1284static void hpsa_show_volume_status(struct ctlr_info *h,
1285 struct hpsa_scsi_dev_t *sd)
1286{
1287 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1288 dev_info(&h->pdev->dev,
1289 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1290 h->scsi_host->host_no,
1291 sd->bus, sd->target, sd->lun);
1292 switch (sd->volume_offline) {
1293 case HPSA_LV_OK:
1294 break;
1295 case HPSA_LV_UNDERGOING_ERASE:
1296 dev_info(&h->pdev->dev,
1297 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1298 h->scsi_host->host_no,
1299 sd->bus, sd->target, sd->lun);
1300 break;
1301 case HPSA_LV_UNDERGOING_RPI:
1302 dev_info(&h->pdev->dev,
1303 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1304 h->scsi_host->host_no,
1305 sd->bus, sd->target, sd->lun);
1306 break;
1307 case HPSA_LV_PENDING_RPI:
1308 dev_info(&h->pdev->dev,
1309 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1310 h->scsi_host->host_no,
1311 sd->bus, sd->target, sd->lun);
1312 break;
1313 case HPSA_LV_ENCRYPTED_NO_KEY:
1314 dev_info(&h->pdev->dev,
1315 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1316 h->scsi_host->host_no,
1317 sd->bus, sd->target, sd->lun);
1318 break;
1319 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1320 dev_info(&h->pdev->dev,
1321 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1322 h->scsi_host->host_no,
1323 sd->bus, sd->target, sd->lun);
1324 break;
1325 case HPSA_LV_UNDERGOING_ENCRYPTION:
1326 dev_info(&h->pdev->dev,
1327 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1328 h->scsi_host->host_no,
1329 sd->bus, sd->target, sd->lun);
1330 break;
1331 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1332 dev_info(&h->pdev->dev,
1333 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1334 h->scsi_host->host_no,
1335 sd->bus, sd->target, sd->lun);
1336 break;
1337 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1338 dev_info(&h->pdev->dev,
1339 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1340 h->scsi_host->host_no,
1341 sd->bus, sd->target, sd->lun);
1342 break;
1343 case HPSA_LV_PENDING_ENCRYPTION:
1344 dev_info(&h->pdev->dev,
1345 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1346 h->scsi_host->host_no,
1347 sd->bus, sd->target, sd->lun);
1348 break;
1349 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1350 dev_info(&h->pdev->dev,
1351 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1352 h->scsi_host->host_no,
1353 sd->bus, sd->target, sd->lun);
1354 break;
1355 }
1356}
1357
Don Brace03383732015-01-23 16:43:30 -06001358/*
1359 * Figure the list of physical drive pointers for a logical drive with
1360 * raid offload configured.
1361 */
1362static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1363 struct hpsa_scsi_dev_t *dev[], int ndevices,
1364 struct hpsa_scsi_dev_t *logical_drive)
1365{
1366 struct raid_map_data *map = &logical_drive->raid_map;
1367 struct raid_map_disk_data *dd = &map->data[0];
1368 int i, j;
1369 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1370 le16_to_cpu(map->metadata_disks_per_row);
1371 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1372 le16_to_cpu(map->layout_map_count) *
1373 total_disks_per_row;
1374 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1375 total_disks_per_row;
1376 int qdepth;
1377
1378 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1379 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1380
1381 qdepth = 0;
1382 for (i = 0; i < nraid_map_entries; i++) {
1383 logical_drive->phys_disk[i] = NULL;
1384 if (!logical_drive->offload_config)
1385 continue;
1386 for (j = 0; j < ndevices; j++) {
1387 if (dev[j]->devtype != TYPE_DISK)
1388 continue;
1389 if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1390 continue;
1391 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1392 continue;
1393
1394 logical_drive->phys_disk[i] = dev[j];
1395 if (i < nphys_disk)
1396 qdepth = min(h->nr_cmds, qdepth +
1397 logical_drive->phys_disk[i]->queue_depth);
1398 break;
1399 }
1400
1401 /*
1402 * This can happen if a physical drive is removed and
1403 * the logical drive is degraded. In that case, the RAID
1404 * map data will refer to a physical disk which isn't actually
1405 * present. And in that case offload_enabled should already
1406 * be 0, but we'll turn it off here just in case
1407 */
1408 if (!logical_drive->phys_disk[i]) {
1409 logical_drive->offload_enabled = 0;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001410 logical_drive->offload_to_be_enabled = 0;
1411 logical_drive->queue_depth = 8;
Don Brace03383732015-01-23 16:43:30 -06001412 }
1413 }
1414 if (nraid_map_entries)
1415 /*
1416 * This is correct for reads, too high for full stripe writes,
1417 * way too high for partial stripe writes
1418 */
1419 logical_drive->queue_depth = qdepth;
1420 else
1421 logical_drive->queue_depth = h->nr_cmds;
1422}
1423
1424static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1425 struct hpsa_scsi_dev_t *dev[], int ndevices)
1426{
1427 int i;
1428
1429 for (i = 0; i < ndevices; i++) {
1430 if (dev[i]->devtype != TYPE_DISK)
1431 continue;
1432 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1433 continue;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001434
1435 /*
1436 * If offload is currently enabled, the RAID map and
1437 * phys_disk[] assignment *better* not be changing
1438 * and since it isn't changing, we do not need to
1439 * update it.
1440 */
1441 if (dev[i]->offload_enabled)
1442 continue;
1443
Don Brace03383732015-01-23 16:43:30 -06001444 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1445 }
1446}
1447
Stephen M. Cameron4967bd32010-02-04 08:41:49 -06001448static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001449 struct hpsa_scsi_dev_t *sd[], int nsds)
1450{
1451 /* sd contains scsi3 addresses and devtypes, and inquiry
1452 * data. This function takes what's in sd to be the current
1453 * reality and updates h->dev[] to reflect that reality.
1454 */
1455 int i, entry, device_change, changes = 0;
1456 struct hpsa_scsi_dev_t *csd;
1457 unsigned long flags;
1458 struct hpsa_scsi_dev_t **added, **removed;
1459 int nadded, nremoved;
1460 struct Scsi_Host *sh = NULL;
1461
Scott Teelcfe5bad2011-10-26 16:21:07 -05001462 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1463 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001464
1465 if (!added || !removed) {
1466 dev_warn(&h->pdev->dev, "out of memory in "
1467 "adjust_hpsa_scsi_table\n");
1468 goto free_and_out;
1469 }
1470
1471 spin_lock_irqsave(&h->devlock, flags);
1472
1473 /* find any devices in h->dev[] that are not in
1474 * sd[] and remove them from h->dev[], and for any
1475 * devices which have changed, remove the old device
1476 * info and add the new device info.
Scott Teelbd9244f2012-01-19 14:01:30 -06001477 * If minor device attributes change, just update
1478 * the existing device structure.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001479 */
1480 i = 0;
1481 nremoved = 0;
1482 nadded = 0;
1483 while (i < h->ndevices) {
1484 csd = h->dev[i];
1485 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1486 if (device_change == DEVICE_NOT_FOUND) {
1487 changes++;
1488 hpsa_scsi_remove_entry(h, hostno, i,
1489 removed, &nremoved);
1490 continue; /* remove ^^^, hence i not incremented */
1491 } else if (device_change == DEVICE_CHANGED) {
1492 changes++;
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001493 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1494 added, &nadded, removed, &nremoved);
Stephen M. Cameronc7f172d2010-02-04 08:43:31 -06001495 /* Set it to NULL to prevent it from being freed
1496 * at the bottom of hpsa_update_scsi_devices()
1497 */
1498 sd[entry] = NULL;
Scott Teelbd9244f2012-01-19 14:01:30 -06001499 } else if (device_change == DEVICE_UPDATED) {
1500 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001501 }
1502 i++;
1503 }
1504
1505 /* Now, make sure every device listed in sd[] is also
1506 * listed in h->dev[], adding them if they aren't found
1507 */
1508
1509 for (i = 0; i < nsds; i++) {
1510 if (!sd[i]) /* if already added above. */
1511 continue;
Stephen M. Cameron98465902014-02-21 16:25:00 -06001512
1513 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1514 * as the SCSI mid-layer does not handle such devices well.
1515 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1516 * at 160Hz, and prevents the system from coming up.
1517 */
1518 if (sd[i]->volume_offline) {
1519 hpsa_show_volume_status(h, sd[i]);
Webb Scales0d96ef52015-04-23 09:31:55 -05001520 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
Stephen M. Cameron98465902014-02-21 16:25:00 -06001521 continue;
1522 }
1523
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001524 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1525 h->ndevices, &entry);
1526 if (device_change == DEVICE_NOT_FOUND) {
1527 changes++;
1528 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1529 added, &nadded) != 0)
1530 break;
1531 sd[i] = NULL; /* prevent from being freed later. */
1532 } else if (device_change == DEVICE_CHANGED) {
1533 /* should never happen... */
1534 changes++;
1535 dev_warn(&h->pdev->dev,
1536 "device unexpectedly changed.\n");
1537 /* but if it does happen, we just ignore that device */
1538 }
1539 }
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001540 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1541
1542 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1543 * any logical drives that need it enabled.
1544 */
1545 for (i = 0; i < h->ndevices; i++)
1546 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1547
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001548 spin_unlock_irqrestore(&h->devlock, flags);
1549
Stephen M. Cameron98465902014-02-21 16:25:00 -06001550 /* Monitor devices which are in one of several NOT READY states to be
1551 * brought online later. This must be done without holding h->devlock,
1552 * so don't touch h->dev[]
1553 */
1554 for (i = 0; i < nsds; i++) {
1555 if (!sd[i]) /* if already added above. */
1556 continue;
1557 if (sd[i]->volume_offline)
1558 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1559 }
1560
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001561 /* Don't notify scsi mid layer of any changes the first time through
1562 * (or if there are no changes) scsi_scan_host will do it later the
1563 * first time through.
1564 */
1565 if (hostno == -1 || !changes)
1566 goto free_and_out;
1567
1568 sh = h->scsi_host;
1569 /* Notify scsi mid layer of any removed devices */
1570 for (i = 0; i < nremoved; i++) {
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001571 if (removed[i]->expose_state & HPSA_SCSI_ADD) {
1572 struct scsi_device *sdev =
1573 scsi_device_lookup(sh, removed[i]->bus,
1574 removed[i]->target, removed[i]->lun);
1575 if (sdev != NULL) {
1576 scsi_remove_device(sdev);
1577 scsi_device_put(sdev);
1578 } else {
1579 /*
1580 * We don't expect to get here.
1581 * future cmds to this device will get selection
1582 * timeout as if the device was gone.
1583 */
Webb Scales0d96ef52015-04-23 09:31:55 -05001584 hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
1585 "didn't find device for removal.");
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001586 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001587 }
1588 kfree(removed[i]);
1589 removed[i] = NULL;
1590 }
1591
1592 /* Notify scsi mid layer of any added devices */
1593 for (i = 0; i < nadded; i++) {
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001594 if (!(added[i]->expose_state & HPSA_SCSI_ADD))
1595 continue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001596 if (scsi_add_device(sh, added[i]->bus,
1597 added[i]->target, added[i]->lun) == 0)
1598 continue;
Webb Scales0d96ef52015-04-23 09:31:55 -05001599 hpsa_show_dev_msg(KERN_WARNING, h, added[i],
1600 "addition failed, device not added.");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001601 /* now we have to remove it from h->dev,
1602 * since it didn't get added to scsi mid layer
1603 */
1604 fixup_botched_add(h, added[i]);
1605 }
1606
1607free_and_out:
1608 kfree(added);
1609 kfree(removed);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001610}
1611
1612/*
Joe Perches9e03aa22013-09-03 13:45:58 -07001613 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001614 * Assume's h->devlock is held.
1615 */
1616static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1617 int bus, int target, int lun)
1618{
1619 int i;
1620 struct hpsa_scsi_dev_t *sd;
1621
1622 for (i = 0; i < h->ndevices; i++) {
1623 sd = h->dev[i];
1624 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1625 return sd;
1626 }
1627 return NULL;
1628}
1629
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001630static int hpsa_slave_alloc(struct scsi_device *sdev)
1631{
1632 struct hpsa_scsi_dev_t *sd;
1633 unsigned long flags;
1634 struct ctlr_info *h;
1635
1636 h = sdev_to_hba(sdev);
1637 spin_lock_irqsave(&h->devlock, flags);
1638 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1639 sdev_id(sdev), sdev->lun);
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001640 if (likely(sd)) {
Don Brace03383732015-01-23 16:43:30 -06001641 atomic_set(&sd->ioaccel_cmds_out, 0);
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001642 sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL;
1643 } else
1644 sdev->hostdata = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001645 spin_unlock_irqrestore(&h->devlock, flags);
1646 return 0;
1647}
1648
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001649/* configure scsi device based on internal per-device structure */
1650static int hpsa_slave_configure(struct scsi_device *sdev)
1651{
1652 struct hpsa_scsi_dev_t *sd;
1653 int queue_depth;
1654
1655 sd = sdev->hostdata;
1656 sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH);
1657
1658 if (sd)
1659 queue_depth = sd->queue_depth != 0 ?
1660 sd->queue_depth : sdev->host->can_queue;
1661 else
1662 queue_depth = sdev->host->can_queue;
1663
1664 scsi_change_queue_depth(sdev, queue_depth);
1665
1666 return 0;
1667}
1668
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001669static void hpsa_slave_destroy(struct scsi_device *sdev)
1670{
Stephen M. Cameronbcc44252010-02-04 08:41:54 -06001671 /* nothing to do. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001672}
1673
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001674static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1675{
1676 int i;
1677
1678 if (!h->cmd_sg_list)
1679 return;
1680 for (i = 0; i < h->nr_cmds; i++) {
1681 kfree(h->cmd_sg_list[i]);
1682 h->cmd_sg_list[i] = NULL;
1683 }
1684 kfree(h->cmd_sg_list);
1685 h->cmd_sg_list = NULL;
1686}
1687
1688static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1689{
1690 int i;
1691
1692 if (h->chainsize <= 0)
1693 return 0;
1694
1695 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1696 GFP_KERNEL);
Robert Elliott3d4e6af2015-01-23 16:42:42 -06001697 if (!h->cmd_sg_list) {
1698 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001699 return -ENOMEM;
Robert Elliott3d4e6af2015-01-23 16:42:42 -06001700 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001701 for (i = 0; i < h->nr_cmds; i++) {
1702 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1703 h->chainsize, GFP_KERNEL);
Robert Elliott3d4e6af2015-01-23 16:42:42 -06001704 if (!h->cmd_sg_list[i]) {
1705 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001706 goto clean;
Robert Elliott3d4e6af2015-01-23 16:42:42 -06001707 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001708 }
1709 return 0;
1710
1711clean:
1712 hpsa_free_sg_chain_blocks(h);
1713 return -ENOMEM;
1714}
1715
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001716static int hpsa_map_sg_chain_block(struct ctlr_info *h,
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001717 struct CommandList *c)
1718{
1719 struct SGDescriptor *chain_sg, *chain_block;
1720 u64 temp64;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001721 u32 chain_len;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001722
1723 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1724 chain_block = h->cmd_sg_list[c->cmdindex];
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001725 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1726 chain_len = sizeof(*chain_sg) *
Don Brace2b08b3e2015-01-23 16:41:09 -06001727 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001728 chain_sg->Len = cpu_to_le32(chain_len);
1729 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001730 PCI_DMA_TODEVICE);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001731 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1732 /* prevent subsequent unmapping */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001733 chain_sg->Addr = cpu_to_le64(0);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001734 return -1;
1735 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001736 chain_sg->Addr = cpu_to_le64(temp64);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001737 return 0;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001738}
1739
1740static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1741 struct CommandList *c)
1742{
1743 struct SGDescriptor *chain_sg;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001744
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001745 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001746 return;
1747
1748 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001749 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1750 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001751}
1752
Scott Teela09c1442014-02-18 13:57:21 -06001753
1754/* Decode the various types of errors on ioaccel2 path.
1755 * Return 1 for any error that should generate a RAID path retry.
1756 * Return 0 for errors that don't require a RAID path retry.
1757 */
1758static int handle_ioaccel_mode2_error(struct ctlr_info *h,
Scott Teelc3497752014-02-18 13:56:34 -06001759 struct CommandList *c,
1760 struct scsi_cmnd *cmd,
1761 struct io_accel2_cmd *c2)
1762{
1763 int data_len;
Scott Teela09c1442014-02-18 13:57:21 -06001764 int retry = 0;
Scott Teelc3497752014-02-18 13:56:34 -06001765
1766 switch (c2->error_data.serv_response) {
1767 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1768 switch (c2->error_data.status) {
1769 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1770 break;
1771 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1772 dev_warn(&h->pdev->dev,
1773 "%s: task complete with check condition.\n",
1774 "HP SSD Smart Path");
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05001775 cmd->result |= SAM_STAT_CHECK_CONDITION;
Scott Teelc3497752014-02-18 13:56:34 -06001776 if (c2->error_data.data_present !=
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05001777 IOACCEL2_SENSE_DATA_PRESENT) {
1778 memset(cmd->sense_buffer, 0,
1779 SCSI_SENSE_BUFFERSIZE);
Scott Teelc3497752014-02-18 13:56:34 -06001780 break;
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05001781 }
Scott Teelc3497752014-02-18 13:56:34 -06001782 /* copy the sense data */
1783 data_len = c2->error_data.sense_data_len;
1784 if (data_len > SCSI_SENSE_BUFFERSIZE)
1785 data_len = SCSI_SENSE_BUFFERSIZE;
1786 if (data_len > sizeof(c2->error_data.sense_data_buff))
1787 data_len =
1788 sizeof(c2->error_data.sense_data_buff);
1789 memcpy(cmd->sense_buffer,
1790 c2->error_data.sense_data_buff, data_len);
Scott Teela09c1442014-02-18 13:57:21 -06001791 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001792 break;
1793 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1794 dev_warn(&h->pdev->dev,
1795 "%s: task complete with BUSY status.\n",
1796 "HP SSD Smart Path");
Scott Teela09c1442014-02-18 13:57:21 -06001797 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001798 break;
1799 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1800 dev_warn(&h->pdev->dev,
1801 "%s: task complete with reservation conflict.\n",
1802 "HP SSD Smart Path");
Scott Teela09c1442014-02-18 13:57:21 -06001803 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001804 break;
1805 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1806 /* Make scsi midlayer do unlimited retries */
1807 cmd->result = DID_IMM_RETRY << 16;
1808 break;
1809 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1810 dev_warn(&h->pdev->dev,
1811 "%s: task complete with aborted status.\n",
1812 "HP SSD Smart Path");
Scott Teela09c1442014-02-18 13:57:21 -06001813 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001814 break;
1815 default:
1816 dev_warn(&h->pdev->dev,
1817 "%s: task complete with unrecognized status: 0x%02x\n",
1818 "HP SSD Smart Path", c2->error_data.status);
Scott Teela09c1442014-02-18 13:57:21 -06001819 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001820 break;
1821 }
1822 break;
1823 case IOACCEL2_SERV_RESPONSE_FAILURE:
1824 /* don't expect to get here. */
1825 dev_warn(&h->pdev->dev,
1826 "unexpected delivery or target failure, status = 0x%02x\n",
1827 c2->error_data.status);
Scott Teela09c1442014-02-18 13:57:21 -06001828 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001829 break;
1830 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1831 break;
1832 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1833 break;
1834 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1835 dev_warn(&h->pdev->dev, "task management function rejected.\n");
Scott Teela09c1442014-02-18 13:57:21 -06001836 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001837 break;
1838 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1839 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1840 break;
1841 default:
1842 dev_warn(&h->pdev->dev,
1843 "%s: Unrecognized server response: 0x%02x\n",
Scott Teela09c1442014-02-18 13:57:21 -06001844 "HP SSD Smart Path",
1845 c2->error_data.serv_response);
1846 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001847 break;
1848 }
Scott Teela09c1442014-02-18 13:57:21 -06001849
1850 return retry; /* retry on raid path? */
Scott Teelc3497752014-02-18 13:56:34 -06001851}
1852
1853static void process_ioaccel2_completion(struct ctlr_info *h,
1854 struct CommandList *c, struct scsi_cmnd *cmd,
1855 struct hpsa_scsi_dev_t *dev)
1856{
1857 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1858
1859 /* check for good status */
1860 if (likely(c2->error_data.serv_response == 0 &&
1861 c2->error_data.status == 0)) {
1862 cmd_free(h, c);
1863 cmd->scsi_done(cmd);
1864 return;
1865 }
1866
1867 /* Any RAID offload error results in retry which will use
1868 * the normal I/O path so the controller can handle whatever's
1869 * wrong.
1870 */
1871 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1872 c2->error_data.serv_response ==
1873 IOACCEL2_SERV_RESPONSE_FAILURE) {
Don Brace080ef1c2015-01-23 16:43:25 -06001874 if (c2->error_data.status ==
1875 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1876 dev->offload_enabled = 0;
1877 goto retry_cmd;
Scott Teelc3497752014-02-18 13:56:34 -06001878 }
Don Brace080ef1c2015-01-23 16:43:25 -06001879
1880 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
1881 goto retry_cmd;
1882
Scott Teelc3497752014-02-18 13:56:34 -06001883 cmd_free(h, c);
1884 cmd->scsi_done(cmd);
Don Brace080ef1c2015-01-23 16:43:25 -06001885 return;
1886
1887retry_cmd:
1888 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
1889 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
Scott Teelc3497752014-02-18 13:56:34 -06001890}
1891
Stephen Cameron9437ac42015-04-23 09:32:16 -05001892/* Returns 0 on success, < 0 otherwise. */
1893static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
1894 struct CommandList *cp)
1895{
1896 u8 tmf_status = cp->err_info->ScsiStatus;
1897
1898 switch (tmf_status) {
1899 case CISS_TMF_COMPLETE:
1900 /*
1901 * CISS_TMF_COMPLETE never happens, instead,
1902 * ei->CommandStatus == 0 for this case.
1903 */
1904 case CISS_TMF_SUCCESS:
1905 return 0;
1906 case CISS_TMF_INVALID_FRAME:
1907 case CISS_TMF_NOT_SUPPORTED:
1908 case CISS_TMF_FAILED:
1909 case CISS_TMF_WRONG_LUN:
1910 case CISS_TMF_OVERLAPPED_TAG:
1911 break;
1912 default:
1913 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
1914 tmf_status);
1915 break;
1916 }
1917 return -tmf_status;
1918}
1919
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05001920static void complete_scsi_command(struct CommandList *cp)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001921{
1922 struct scsi_cmnd *cmd;
1923 struct ctlr_info *h;
1924 struct ErrorInfo *ei;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001925 struct hpsa_scsi_dev_t *dev;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001926
Stephen Cameron9437ac42015-04-23 09:32:16 -05001927 u8 sense_key;
1928 u8 asc; /* additional sense code */
1929 u8 ascq; /* additional sense code qualifier */
Stephen M. Camerondb111e12011-06-03 09:57:34 -05001930 unsigned long sense_data_size;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001931
1932 ei = cp->err_info;
Stephen Cameron7fa30302015-01-23 16:44:30 -06001933 cmd = cp->scsi_cmd;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001934 h = cp->h;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001935 dev = cmd->device->hostdata;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001936
1937 scsi_dma_unmap(cmd); /* undo the DMA mappings */
Matt Gatese1f7de02014-02-18 13:55:17 -06001938 if ((cp->cmd_type == CMD_SCSI) &&
Don Brace2b08b3e2015-01-23 16:41:09 -06001939 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001940 hpsa_unmap_sg_chain_block(h, cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001941
1942 cmd->result = (DID_OK << 16); /* host byte */
1943 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
Scott Teelc3497752014-02-18 13:56:34 -06001944
Don Brace03383732015-01-23 16:43:30 -06001945 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
1946 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1947
Webb Scales25163bd2015-04-23 09:32:00 -05001948 /*
1949 * We check for lockup status here as it may be set for
1950 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
1951 * fail_all_oustanding_cmds()
1952 */
1953 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
1954 /* DID_NO_CONNECT will prevent a retry */
1955 cmd->result = DID_NO_CONNECT << 16;
1956 cmd_free(h, cp);
1957 cmd->scsi_done(cmd);
1958 return;
1959 }
1960
Scott Teelc3497752014-02-18 13:56:34 -06001961 if (cp->cmd_type == CMD_IOACCEL2)
1962 return process_ioaccel2_completion(h, cp, cmd, dev);
1963
Robert Elliott6aa4c362014-07-03 10:18:19 -05001964 scsi_set_resid(cmd, ei->ResidualCnt);
1965 if (ei->CommandStatus == 0) {
Don Brace03383732015-01-23 16:43:30 -06001966 if (cp->cmd_type == CMD_IOACCEL1)
1967 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
Robert Elliott6aa4c362014-07-03 10:18:19 -05001968 cmd_free(h, cp);
1969 cmd->scsi_done(cmd);
1970 return;
1971 }
1972
Matt Gatese1f7de02014-02-18 13:55:17 -06001973 /* For I/O accelerator commands, copy over some fields to the normal
1974 * CISS header used below for error handling.
1975 */
1976 if (cp->cmd_type == CMD_IOACCEL1) {
1977 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
Don Brace2b08b3e2015-01-23 16:41:09 -06001978 cp->Header.SGList = scsi_sg_count(cmd);
1979 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
1980 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
1981 IOACCEL1_IOFLAGS_CDBLEN_MASK;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001982 cp->Header.tag = c->tag;
Matt Gatese1f7de02014-02-18 13:55:17 -06001983 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1984 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001985
1986 /* Any RAID offload error results in retry which will use
1987 * the normal I/O path so the controller can handle whatever's
1988 * wrong.
1989 */
1990 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1991 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1992 dev->offload_enabled = 0;
Don Brace080ef1c2015-01-23 16:43:25 -06001993 INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
1994 queue_work_on(raw_smp_processor_id(),
1995 h->resubmit_wq, &cp->work);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001996 return;
1997 }
Matt Gatese1f7de02014-02-18 13:55:17 -06001998 }
1999
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002000 /* an error has occurred */
2001 switch (ei->CommandStatus) {
2002
2003 case CMD_TARGET_STATUS:
Stephen Cameron9437ac42015-04-23 09:32:16 -05002004 cmd->result |= ei->ScsiStatus;
2005 /* copy the sense data */
2006 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2007 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2008 else
2009 sense_data_size = sizeof(ei->SenseInfo);
2010 if (ei->SenseLen < sense_data_size)
2011 sense_data_size = ei->SenseLen;
2012 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2013 if (ei->ScsiStatus)
2014 decode_sense_data(ei->SenseInfo, sense_data_size,
2015 &sense_key, &asc, &ascq);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002016 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
Matt Gates1d3b3602010-02-04 08:43:00 -06002017 if (sense_key == ABORTED_COMMAND) {
Stephen M. Cameron2e311fb2013-09-23 13:33:41 -05002018 cmd->result |= DID_SOFT_ERROR << 16;
Matt Gates1d3b3602010-02-04 08:43:00 -06002019 break;
2020 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002021 break;
2022 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002023 /* Problem was not a check condition
2024 * Pass it up to the upper layers...
2025 */
2026 if (ei->ScsiStatus) {
2027 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2028 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2029 "Returning result: 0x%x\n",
2030 cp, ei->ScsiStatus,
2031 sense_key, asc, ascq,
2032 cmd->result);
2033 } else { /* scsi status is zero??? How??? */
2034 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2035 "Returning no connection.\n", cp),
2036
2037 /* Ordinarily, this case should never happen,
2038 * but there is a bug in some released firmware
2039 * revisions that allows it to happen if, for
2040 * example, a 4100 backplane loses power and
2041 * the tape drive is in it. We assume that
2042 * it's a fatal error of some kind because we
2043 * can't show that it wasn't. We will make it
2044 * look like selection timeout since that is
2045 * the most common reason for this to occur,
2046 * and it's severe enough.
2047 */
2048
2049 cmd->result = DID_NO_CONNECT << 16;
2050 }
2051 break;
2052
2053 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2054 break;
2055 case CMD_DATA_OVERRUN:
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002056 dev_warn(&h->pdev->dev,
2057 "CDB %16phN data overrun\n", cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002058 break;
2059 case CMD_INVALID: {
2060 /* print_bytes(cp, sizeof(*cp), 1, 0);
2061 print_cmd(cp); */
2062 /* We get CMD_INVALID if you address a non-existent device
2063 * instead of a selection timeout (no response). You will
2064 * see this if you yank out a drive, then try to access it.
2065 * This is kind of a shame because it means that any other
2066 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2067 * missing target. */
2068 cmd->result = DID_NO_CONNECT << 16;
2069 }
2070 break;
2071 case CMD_PROTOCOL_ERR:
Stephen M. Cameron256d0ea2012-09-14 16:34:25 -05002072 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002073 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2074 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002075 break;
2076 case CMD_HARDWARE_ERR:
2077 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002078 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2079 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002080 break;
2081 case CMD_CONNECTION_LOST:
2082 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002083 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2084 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002085 break;
2086 case CMD_ABORTED:
2087 cmd->result = DID_ABORT << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002088 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2089 cp->Request.CDB, ei->ScsiStatus);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002090 break;
2091 case CMD_ABORT_FAILED:
2092 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002093 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2094 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002095 break;
2096 case CMD_UNSOLICITED_ABORT:
Stephen M. Cameronf6e76052011-07-26 11:08:52 -05002097 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002098 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2099 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002100 break;
2101 case CMD_TIMEOUT:
2102 cmd->result = DID_TIME_OUT << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002103 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2104 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002105 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002106 case CMD_UNABORTABLE:
2107 cmd->result = DID_ERROR << 16;
2108 dev_warn(&h->pdev->dev, "Command unabortable\n");
2109 break;
Stephen Cameron9437ac42015-04-23 09:32:16 -05002110 case CMD_TMF_STATUS:
2111 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2112 cmd->result = DID_ERROR << 16;
2113 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002114 case CMD_IOACCEL_DISABLED:
2115 /* This only handles the direct pass-through case since RAID
2116 * offload is handled above. Just attempt a retry.
2117 */
2118 cmd->result = DID_SOFT_ERROR << 16;
2119 dev_warn(&h->pdev->dev,
2120 "cp %p had HP SSD Smart Path error\n", cp);
2121 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002122 default:
2123 cmd->result = DID_ERROR << 16;
2124 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2125 cp, ei->CommandStatus);
2126 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002127 cmd_free(h, cp);
Tomas Henzl2cc5bfa2013-08-01 15:14:00 +02002128 cmd->scsi_done(cmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002129}
2130
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002131static void hpsa_pci_unmap(struct pci_dev *pdev,
2132 struct CommandList *c, int sg_used, int data_direction)
2133{
2134 int i;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002135
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002136 for (i = 0; i < sg_used; i++)
2137 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2138 le32_to_cpu(c->SG[i].Len),
2139 data_direction);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002140}
2141
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002142static int hpsa_map_one(struct pci_dev *pdev,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002143 struct CommandList *cp,
2144 unsigned char *buf,
2145 size_t buflen,
2146 int data_direction)
2147{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002148 u64 addr64;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002149
2150 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2151 cp->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002152 cp->Header.SGTotal = cpu_to_le16(0);
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002153 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002154 }
2155
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002156 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
Shuah Khaneceaae12013-02-20 11:24:34 -06002157 if (dma_mapping_error(&pdev->dev, addr64)) {
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002158 /* Prevent subsequent unmap of something never mapped */
Shuah Khaneceaae12013-02-20 11:24:34 -06002159 cp->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002160 cp->Header.SGTotal = cpu_to_le16(0);
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002161 return -1;
Shuah Khaneceaae12013-02-20 11:24:34 -06002162 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002163 cp->SG[0].Addr = cpu_to_le64(addr64);
2164 cp->SG[0].Len = cpu_to_le32(buflen);
2165 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2166 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2167 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002168 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002169}
2170
Webb Scales25163bd2015-04-23 09:32:00 -05002171#define NO_TIMEOUT ((unsigned long) -1)
2172#define DEFAULT_TIMEOUT 30000 /* milliseconds */
2173static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2174 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002175{
2176 DECLARE_COMPLETION_ONSTACK(wait);
2177
2178 c->waiting = &wait;
Webb Scales25163bd2015-04-23 09:32:00 -05002179 __enqueue_cmd_and_start_io(h, c, reply_queue);
2180 if (timeout_msecs == NO_TIMEOUT) {
2181 /* TODO: get rid of this no-timeout thing */
2182 wait_for_completion_io(&wait);
2183 return IO_OK;
2184 }
2185 if (!wait_for_completion_io_timeout(&wait,
2186 msecs_to_jiffies(timeout_msecs))) {
2187 dev_warn(&h->pdev->dev, "Command timed out.\n");
2188 return -ETIMEDOUT;
2189 }
2190 return IO_OK;
2191}
2192
2193static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2194 int reply_queue, unsigned long timeout_msecs)
2195{
2196 if (unlikely(lockup_detected(h))) {
2197 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2198 return IO_OK;
2199 }
2200 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002201}
2202
Stephen M. Cameron094963d2014-05-29 10:53:18 -05002203static u32 lockup_detected(struct ctlr_info *h)
2204{
2205 int cpu;
2206 u32 rc, *lockup_detected;
2207
2208 cpu = get_cpu();
2209 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2210 rc = *lockup_detected;
2211 put_cpu();
2212 return rc;
2213}
2214
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002215#define MAX_DRIVER_CMD_RETRIES 25
Webb Scales25163bd2015-04-23 09:32:00 -05002216static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2217 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002218{
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002219 int backoff_time = 10, retry_count = 0;
Webb Scales25163bd2015-04-23 09:32:00 -05002220 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002221
2222 do {
Joe Perches7630abd2011-05-08 23:32:40 -07002223 memset(c->err_info, 0, sizeof(*c->err_info));
Webb Scales25163bd2015-04-23 09:32:00 -05002224 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2225 timeout_msecs);
2226 if (rc)
2227 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002228 retry_count++;
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002229 if (retry_count > 3) {
2230 msleep(backoff_time);
2231 if (backoff_time < 1000)
2232 backoff_time *= 2;
2233 }
Matt Bondurant852af202012-05-01 11:42:35 -05002234 } while ((check_for_unit_attention(h, c) ||
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002235 check_for_busy(h, c)) &&
2236 retry_count <= MAX_DRIVER_CMD_RETRIES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002237 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
Webb Scales25163bd2015-04-23 09:32:00 -05002238 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2239 rc = -EIO;
2240 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002241}
2242
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002243static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2244 struct CommandList *c)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002245{
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002246 const u8 *cdb = c->Request.CDB;
2247 const u8 *lun = c->Header.LUN.LunAddrBytes;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002248
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002249 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2250 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2251 txt, lun[0], lun[1], lun[2], lun[3],
2252 lun[4], lun[5], lun[6], lun[7],
2253 cdb[0], cdb[1], cdb[2], cdb[3],
2254 cdb[4], cdb[5], cdb[6], cdb[7],
2255 cdb[8], cdb[9], cdb[10], cdb[11],
2256 cdb[12], cdb[13], cdb[14], cdb[15]);
2257}
2258
2259static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2260 struct CommandList *cp)
2261{
2262 const struct ErrorInfo *ei = cp->err_info;
2263 struct device *d = &cp->h->pdev->dev;
Stephen Cameron9437ac42015-04-23 09:32:16 -05002264 u8 sense_key, asc, ascq;
2265 int sense_len;
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002266
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002267 switch (ei->CommandStatus) {
2268 case CMD_TARGET_STATUS:
Stephen Cameron9437ac42015-04-23 09:32:16 -05002269 if (ei->SenseLen > sizeof(ei->SenseInfo))
2270 sense_len = sizeof(ei->SenseInfo);
2271 else
2272 sense_len = ei->SenseLen;
2273 decode_sense_data(ei->SenseInfo, sense_len,
2274 &sense_key, &asc, &ascq);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002275 hpsa_print_cmd(h, "SCSI status", cp);
2276 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
Stephen Cameron9437ac42015-04-23 09:32:16 -05002277 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2278 sense_key, asc, ascq);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002279 else
Stephen Cameron9437ac42015-04-23 09:32:16 -05002280 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002281 if (ei->ScsiStatus == 0)
2282 dev_warn(d, "SCSI status is abnormally zero. "
2283 "(probably indicates selection timeout "
2284 "reported incorrectly due to a known "
2285 "firmware bug, circa July, 2001.)\n");
2286 break;
2287 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002288 break;
2289 case CMD_DATA_OVERRUN:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002290 hpsa_print_cmd(h, "overrun condition", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002291 break;
2292 case CMD_INVALID: {
2293 /* controller unfortunately reports SCSI passthru's
2294 * to non-existent targets as invalid commands.
2295 */
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002296 hpsa_print_cmd(h, "invalid command", cp);
2297 dev_warn(d, "probably means device no longer present\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002298 }
2299 break;
2300 case CMD_PROTOCOL_ERR:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002301 hpsa_print_cmd(h, "protocol error", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002302 break;
2303 case CMD_HARDWARE_ERR:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002304 hpsa_print_cmd(h, "hardware error", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002305 break;
2306 case CMD_CONNECTION_LOST:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002307 hpsa_print_cmd(h, "connection lost", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002308 break;
2309 case CMD_ABORTED:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002310 hpsa_print_cmd(h, "aborted", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002311 break;
2312 case CMD_ABORT_FAILED:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002313 hpsa_print_cmd(h, "abort failed", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002314 break;
2315 case CMD_UNSOLICITED_ABORT:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002316 hpsa_print_cmd(h, "unsolicited abort", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002317 break;
2318 case CMD_TIMEOUT:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002319 hpsa_print_cmd(h, "timed out", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002320 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002321 case CMD_UNABORTABLE:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002322 hpsa_print_cmd(h, "unabortable", cp);
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002323 break;
Webb Scales25163bd2015-04-23 09:32:00 -05002324 case CMD_CTLR_LOCKUP:
2325 hpsa_print_cmd(h, "controller lockup detected", cp);
2326 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002327 default:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002328 hpsa_print_cmd(h, "unknown status", cp);
2329 dev_warn(d, "Unknown command status %x\n",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002330 ei->CommandStatus);
2331 }
2332}
2333
2334static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002335 u16 page, unsigned char *buf,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002336 unsigned char bufsize)
2337{
2338 int rc = IO_OK;
2339 struct CommandList *c;
2340 struct ErrorInfo *ei;
2341
Stephen Cameron45fcb862015-01-23 16:43:04 -06002342 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002343
Stephen Cameron574f05d2015-01-23 16:43:20 -06002344 if (c == NULL) {
Stephen Cameron45fcb862015-01-23 16:43:04 -06002345 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06002346 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002347 }
2348
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002349 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2350 page, scsi3addr, TYPE_CMD)) {
2351 rc = -1;
2352 goto out;
2353 }
Webb Scales25163bd2015-04-23 09:32:00 -05002354 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2355 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2356 if (rc)
2357 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002358 ei = c->err_info;
2359 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002360 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002361 rc = -1;
2362 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002363out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06002364 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002365 return rc;
2366}
2367
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002368static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2369 unsigned char *scsi3addr, unsigned char page,
2370 struct bmic_controller_parameters *buf, size_t bufsize)
2371{
2372 int rc = IO_OK;
2373 struct CommandList *c;
2374 struct ErrorInfo *ei;
2375
Stephen Cameron45fcb862015-01-23 16:43:04 -06002376 c = cmd_alloc(h);
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002377 if (c == NULL) { /* trouble... */
Stephen Cameron45fcb862015-01-23 16:43:04 -06002378 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002379 return -ENOMEM;
2380 }
2381
2382 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2383 page, scsi3addr, TYPE_CMD)) {
2384 rc = -1;
2385 goto out;
2386 }
Webb Scales25163bd2015-04-23 09:32:00 -05002387 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2388 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2389 if (rc)
2390 goto out;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002391 ei = c->err_info;
2392 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2393 hpsa_scsi_interpret_error(h, c);
2394 rc = -1;
2395 }
2396out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06002397 cmd_free(h, c);
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002398 return rc;
2399 }
2400
Scott Teelbf711ac2014-02-18 13:56:39 -06002401static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
Webb Scales25163bd2015-04-23 09:32:00 -05002402 u8 reset_type, int reply_queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002403{
2404 int rc = IO_OK;
2405 struct CommandList *c;
2406 struct ErrorInfo *ei;
2407
Stephen Cameron45fcb862015-01-23 16:43:04 -06002408 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002409
2410 if (c == NULL) { /* trouble... */
Stephen Cameron45fcb862015-01-23 16:43:04 -06002411 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
Stephen M. Camerone9ea04a2010-02-25 14:03:06 -06002412 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002413 }
2414
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002415 /* fill_cmd can't fail here, no data buffer to map. */
Scott Teelbf711ac2014-02-18 13:56:39 -06002416 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2417 scsi3addr, TYPE_MSG);
2418 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
Webb Scales25163bd2015-04-23 09:32:00 -05002419 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2420 if (rc) {
2421 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2422 goto out;
2423 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002424 /* no unmap needed here because no data xfer. */
2425
2426 ei = c->err_info;
2427 if (ei->CommandStatus != 0) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002428 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002429 rc = -1;
2430 }
Webb Scales25163bd2015-04-23 09:32:00 -05002431out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06002432 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002433 return rc;
2434}
2435
2436static void hpsa_get_raid_level(struct ctlr_info *h,
2437 unsigned char *scsi3addr, unsigned char *raid_level)
2438{
2439 int rc;
2440 unsigned char *buf;
2441
2442 *raid_level = RAID_UNKNOWN;
2443 buf = kzalloc(64, GFP_KERNEL);
2444 if (!buf)
2445 return;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002446 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002447 if (rc == 0)
2448 *raid_level = buf[8];
2449 if (*raid_level > RAID_UNKNOWN)
2450 *raid_level = RAID_UNKNOWN;
2451 kfree(buf);
2452 return;
2453}
2454
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002455#define HPSA_MAP_DEBUG
2456#ifdef HPSA_MAP_DEBUG
2457static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2458 struct raid_map_data *map_buff)
2459{
2460 struct raid_map_disk_data *dd = &map_buff->data[0];
2461 int map, row, col;
2462 u16 map_cnt, row_cnt, disks_per_row;
2463
2464 if (rc != 0)
2465 return;
2466
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06002467 /* Show details only if debugging has been activated. */
2468 if (h->raid_offload_debug < 2)
2469 return;
2470
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002471 dev_info(&h->pdev->dev, "structure_size = %u\n",
2472 le32_to_cpu(map_buff->structure_size));
2473 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2474 le32_to_cpu(map_buff->volume_blk_size));
2475 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2476 le64_to_cpu(map_buff->volume_blk_cnt));
2477 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2478 map_buff->phys_blk_shift);
2479 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2480 map_buff->parity_rotation_shift);
2481 dev_info(&h->pdev->dev, "strip_size = %u\n",
2482 le16_to_cpu(map_buff->strip_size));
2483 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2484 le64_to_cpu(map_buff->disk_starting_blk));
2485 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2486 le64_to_cpu(map_buff->disk_blk_cnt));
2487 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2488 le16_to_cpu(map_buff->data_disks_per_row));
2489 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2490 le16_to_cpu(map_buff->metadata_disks_per_row));
2491 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2492 le16_to_cpu(map_buff->row_cnt));
2493 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2494 le16_to_cpu(map_buff->layout_map_count));
Don Brace2b08b3e2015-01-23 16:41:09 -06002495 dev_info(&h->pdev->dev, "flags = 0x%x\n",
Scott Teeldd0e19f2014-02-18 13:57:31 -06002496 le16_to_cpu(map_buff->flags));
Don Brace2b08b3e2015-01-23 16:41:09 -06002497 dev_info(&h->pdev->dev, "encrypytion = %s\n",
2498 le16_to_cpu(map_buff->flags) &
2499 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
Scott Teeldd0e19f2014-02-18 13:57:31 -06002500 dev_info(&h->pdev->dev, "dekindex = %u\n",
2501 le16_to_cpu(map_buff->dekindex));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002502 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2503 for (map = 0; map < map_cnt; map++) {
2504 dev_info(&h->pdev->dev, "Map%u:\n", map);
2505 row_cnt = le16_to_cpu(map_buff->row_cnt);
2506 for (row = 0; row < row_cnt; row++) {
2507 dev_info(&h->pdev->dev, " Row%u:\n", row);
2508 disks_per_row =
2509 le16_to_cpu(map_buff->data_disks_per_row);
2510 for (col = 0; col < disks_per_row; col++, dd++)
2511 dev_info(&h->pdev->dev,
2512 " D%02u: h=0x%04x xor=%u,%u\n",
2513 col, dd->ioaccel_handle,
2514 dd->xor_mult[0], dd->xor_mult[1]);
2515 disks_per_row =
2516 le16_to_cpu(map_buff->metadata_disks_per_row);
2517 for (col = 0; col < disks_per_row; col++, dd++)
2518 dev_info(&h->pdev->dev,
2519 " M%02u: h=0x%04x xor=%u,%u\n",
2520 col, dd->ioaccel_handle,
2521 dd->xor_mult[0], dd->xor_mult[1]);
2522 }
2523 }
2524}
2525#else
2526static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2527 __attribute__((unused)) int rc,
2528 __attribute__((unused)) struct raid_map_data *map_buff)
2529{
2530}
2531#endif
2532
2533static int hpsa_get_raid_map(struct ctlr_info *h,
2534 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2535{
2536 int rc = 0;
2537 struct CommandList *c;
2538 struct ErrorInfo *ei;
2539
Stephen Cameron45fcb862015-01-23 16:43:04 -06002540 c = cmd_alloc(h);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002541 if (c == NULL) {
Stephen Cameron45fcb862015-01-23 16:43:04 -06002542 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002543 return -ENOMEM;
2544 }
2545 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2546 sizeof(this_device->raid_map), 0,
2547 scsi3addr, TYPE_CMD)) {
2548 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
Webb Scales25163bd2015-04-23 09:32:00 -05002549 rc = -ENOMEM;
2550 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002551 }
Webb Scales25163bd2015-04-23 09:32:00 -05002552 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2553 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2554 if (rc)
2555 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002556 ei = c->err_info;
2557 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002558 hpsa_scsi_interpret_error(h, c);
Webb Scales25163bd2015-04-23 09:32:00 -05002559 rc = -1;
2560 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002561 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06002562 cmd_free(h, c);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002563
2564 /* @todo in the future, dynamically allocate RAID map memory */
2565 if (le32_to_cpu(this_device->raid_map.structure_size) >
2566 sizeof(this_device->raid_map)) {
2567 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2568 rc = -1;
2569 }
2570 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2571 return rc;
Webb Scales25163bd2015-04-23 09:32:00 -05002572out:
2573 cmd_free(h, c);
2574 return rc;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002575}
2576
Don Brace03383732015-01-23 16:43:30 -06002577static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2578 unsigned char scsi3addr[], u16 bmic_device_index,
2579 struct bmic_identify_physical_device *buf, size_t bufsize)
2580{
2581 int rc = IO_OK;
2582 struct CommandList *c;
2583 struct ErrorInfo *ei;
2584
2585 c = cmd_alloc(h);
2586 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2587 0, RAID_CTLR_LUNID, TYPE_CMD);
2588 if (rc)
2589 goto out;
2590
2591 c->Request.CDB[2] = bmic_device_index & 0xff;
2592 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2593
Webb Scales25163bd2015-04-23 09:32:00 -05002594 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
2595 NO_TIMEOUT);
Don Brace03383732015-01-23 16:43:30 -06002596 ei = c->err_info;
2597 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2598 hpsa_scsi_interpret_error(h, c);
2599 rc = -1;
2600 }
2601out:
2602 cmd_free(h, c);
2603 return rc;
2604}
2605
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06002606static int hpsa_vpd_page_supported(struct ctlr_info *h,
2607 unsigned char scsi3addr[], u8 page)
2608{
2609 int rc;
2610 int i;
2611 int pages;
2612 unsigned char *buf, bufsize;
2613
2614 buf = kzalloc(256, GFP_KERNEL);
2615 if (!buf)
2616 return 0;
2617
2618 /* Get the size of the page list first */
2619 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2620 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2621 buf, HPSA_VPD_HEADER_SZ);
2622 if (rc != 0)
2623 goto exit_unsupported;
2624 pages = buf[3];
2625 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2626 bufsize = pages + HPSA_VPD_HEADER_SZ;
2627 else
2628 bufsize = 255;
2629
2630 /* Get the whole VPD page list */
2631 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2632 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2633 buf, bufsize);
2634 if (rc != 0)
2635 goto exit_unsupported;
2636
2637 pages = buf[3];
2638 for (i = 1; i <= pages; i++)
2639 if (buf[3 + i] == page)
2640 goto exit_supported;
2641exit_unsupported:
2642 kfree(buf);
2643 return 0;
2644exit_supported:
2645 kfree(buf);
2646 return 1;
2647}
2648
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002649static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2650 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2651{
2652 int rc;
2653 unsigned char *buf;
2654 u8 ioaccel_status;
2655
2656 this_device->offload_config = 0;
2657 this_device->offload_enabled = 0;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05002658 this_device->offload_to_be_enabled = 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002659
2660 buf = kzalloc(64, GFP_KERNEL);
2661 if (!buf)
2662 return;
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06002663 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2664 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002665 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002666 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002667 if (rc != 0)
2668 goto out;
2669
2670#define IOACCEL_STATUS_BYTE 4
2671#define OFFLOAD_CONFIGURED_BIT 0x01
2672#define OFFLOAD_ENABLED_BIT 0x02
2673 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2674 this_device->offload_config =
2675 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2676 if (this_device->offload_config) {
2677 this_device->offload_enabled =
2678 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2679 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2680 this_device->offload_enabled = 0;
2681 }
Stephen Cameron41ce4c32015-04-23 09:31:47 -05002682 this_device->offload_to_be_enabled = this_device->offload_enabled;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002683out:
2684 kfree(buf);
2685 return;
2686}
2687
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002688/* Get the device id from inquiry page 0x83 */
2689static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2690 unsigned char *device_id, int buflen)
2691{
2692 int rc;
2693 unsigned char *buf;
2694
2695 if (buflen > 16)
2696 buflen = 16;
2697 buf = kzalloc(64, GFP_KERNEL);
2698 if (!buf)
Stephen M. Camerona84d7942014-05-29 10:54:20 -05002699 return -ENOMEM;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002700 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002701 if (rc == 0)
2702 memcpy(device_id, &buf[8], buflen);
2703 kfree(buf);
2704 return rc != 0;
2705}
2706
2707static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
Don Brace03383732015-01-23 16:43:30 -06002708 void *buf, int bufsize,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002709 int extended_response)
2710{
2711 int rc = IO_OK;
2712 struct CommandList *c;
2713 unsigned char scsi3addr[8];
2714 struct ErrorInfo *ei;
2715
Stephen Cameron45fcb862015-01-23 16:43:04 -06002716 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002717 if (c == NULL) { /* trouble... */
Stephen Cameron45fcb862015-01-23 16:43:04 -06002718 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002719 return -1;
2720 }
Stephen M. Camerone89c0ae2010-02-04 08:42:04 -06002721 /* address the controller */
2722 memset(scsi3addr, 0, sizeof(scsi3addr));
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002723 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2724 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2725 rc = -1;
2726 goto out;
2727 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002728 if (extended_response)
2729 c->Request.CDB[1] = extended_response;
Webb Scales25163bd2015-04-23 09:32:00 -05002730 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2731 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2732 if (rc)
2733 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002734 ei = c->err_info;
2735 if (ei->CommandStatus != 0 &&
2736 ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002737 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002738 rc = -1;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002739 } else {
Don Brace03383732015-01-23 16:43:30 -06002740 struct ReportLUNdata *rld = buf;
2741
2742 if (rld->extended_response_flag != extended_response) {
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002743 dev_err(&h->pdev->dev,
2744 "report luns requested format %u, got %u\n",
2745 extended_response,
Don Brace03383732015-01-23 16:43:30 -06002746 rld->extended_response_flag);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002747 rc = -1;
2748 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002749 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002750out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06002751 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002752 return rc;
2753}
2754
2755static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
Don Brace03383732015-01-23 16:43:30 -06002756 struct ReportExtendedLUNdata *buf, int bufsize)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002757{
Don Brace03383732015-01-23 16:43:30 -06002758 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
2759 HPSA_REPORT_PHYS_EXTENDED);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002760}
2761
2762static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2763 struct ReportLUNdata *buf, int bufsize)
2764{
2765 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2766}
2767
2768static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2769 int bus, int target, int lun)
2770{
2771 device->bus = bus;
2772 device->target = target;
2773 device->lun = lun;
2774}
2775
Stephen M. Cameron98465902014-02-21 16:25:00 -06002776/* Use VPD inquiry to get details of volume status */
2777static int hpsa_get_volume_status(struct ctlr_info *h,
2778 unsigned char scsi3addr[])
2779{
2780 int rc;
2781 int status;
2782 int size;
2783 unsigned char *buf;
2784
2785 buf = kzalloc(64, GFP_KERNEL);
2786 if (!buf)
2787 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2788
2789 /* Does controller have VPD for logical volume status? */
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05002790 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
Stephen M. Cameron98465902014-02-21 16:25:00 -06002791 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06002792
2793 /* Get the size of the VPD return buffer */
2794 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2795 buf, HPSA_VPD_HEADER_SZ);
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05002796 if (rc != 0)
Stephen M. Cameron98465902014-02-21 16:25:00 -06002797 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06002798 size = buf[3];
2799
2800 /* Now get the whole VPD buffer */
2801 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2802 buf, size + HPSA_VPD_HEADER_SZ);
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05002803 if (rc != 0)
Stephen M. Cameron98465902014-02-21 16:25:00 -06002804 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06002805 status = buf[4]; /* status byte */
2806
2807 kfree(buf);
2808 return status;
2809exit_failed:
2810 kfree(buf);
2811 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2812}
2813
2814/* Determine offline status of a volume.
2815 * Return either:
2816 * 0 (not offline)
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05002817 * 0xff (offline for unknown reasons)
Stephen M. Cameron98465902014-02-21 16:25:00 -06002818 * # (integer code indicating one of several NOT READY states
2819 * describing why a volume is to be kept offline)
2820 */
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05002821static int hpsa_volume_offline(struct ctlr_info *h,
Stephen M. Cameron98465902014-02-21 16:25:00 -06002822 unsigned char scsi3addr[])
2823{
2824 struct CommandList *c;
Stephen Cameron9437ac42015-04-23 09:32:16 -05002825 unsigned char *sense;
2826 u8 sense_key, asc, ascq;
2827 int sense_len;
Webb Scales25163bd2015-04-23 09:32:00 -05002828 int rc, ldstat = 0;
Stephen M. Cameron98465902014-02-21 16:25:00 -06002829 u16 cmd_status;
2830 u8 scsi_status;
2831#define ASC_LUN_NOT_READY 0x04
2832#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2833#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2834
2835 c = cmd_alloc(h);
2836 if (!c)
2837 return 0;
2838 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
Webb Scales25163bd2015-04-23 09:32:00 -05002839 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
2840 if (rc) {
2841 cmd_free(h, c);
2842 return 0;
2843 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06002844 sense = c->err_info->SenseInfo;
Stephen Cameron9437ac42015-04-23 09:32:16 -05002845 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
2846 sense_len = sizeof(c->err_info->SenseInfo);
2847 else
2848 sense_len = c->err_info->SenseLen;
2849 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
Stephen M. Cameron98465902014-02-21 16:25:00 -06002850 cmd_status = c->err_info->CommandStatus;
2851 scsi_status = c->err_info->ScsiStatus;
2852 cmd_free(h, c);
2853 /* Is the volume 'not ready'? */
2854 if (cmd_status != CMD_TARGET_STATUS ||
2855 scsi_status != SAM_STAT_CHECK_CONDITION ||
2856 sense_key != NOT_READY ||
2857 asc != ASC_LUN_NOT_READY) {
2858 return 0;
2859 }
2860
2861 /* Determine the reason for not ready state */
2862 ldstat = hpsa_get_volume_status(h, scsi3addr);
2863
2864 /* Keep volume offline in certain cases: */
2865 switch (ldstat) {
2866 case HPSA_LV_UNDERGOING_ERASE:
2867 case HPSA_LV_UNDERGOING_RPI:
2868 case HPSA_LV_PENDING_RPI:
2869 case HPSA_LV_ENCRYPTED_NO_KEY:
2870 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2871 case HPSA_LV_UNDERGOING_ENCRYPTION:
2872 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2873 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2874 return ldstat;
2875 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2876 /* If VPD status page isn't available,
2877 * use ASC/ASCQ to determine state
2878 */
2879 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2880 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2881 return ldstat;
2882 break;
2883 default:
2884 break;
2885 }
2886 return 0;
2887}
2888
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05002889/*
2890 * Find out if a logical device supports aborts by simply trying one.
2891 * Smart Array may claim not to support aborts on logical drives, but
2892 * if a MSA2000 * is connected, the drives on that will be presented
2893 * by the Smart Array as logical drives, and aborts may be sent to
2894 * those devices successfully. So the simplest way to find out is
2895 * to simply try an abort and see how the device responds.
2896 */
2897static int hpsa_device_supports_aborts(struct ctlr_info *h,
2898 unsigned char *scsi3addr)
2899{
2900 struct CommandList *c;
2901 struct ErrorInfo *ei;
2902 int rc = 0;
2903
2904 u64 tag = (u64) -1; /* bogus tag */
2905
2906 /* Assume that physical devices support aborts */
2907 if (!is_logical_dev_addr_mode(scsi3addr))
2908 return 1;
2909
2910 c = cmd_alloc(h);
2911 if (!c)
2912 return -ENOMEM;
2913 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
2914 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
2915 /* no unmap needed here because no data xfer. */
2916 ei = c->err_info;
2917 switch (ei->CommandStatus) {
2918 case CMD_INVALID:
2919 rc = 0;
2920 break;
2921 case CMD_UNABORTABLE:
2922 case CMD_ABORT_FAILED:
2923 rc = 1;
2924 break;
Stephen Cameron9437ac42015-04-23 09:32:16 -05002925 case CMD_TMF_STATUS:
2926 rc = hpsa_evaluate_tmf_status(h, c);
2927 break;
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05002928 default:
2929 rc = 0;
2930 break;
2931 }
2932 cmd_free(h, c);
2933 return rc;
2934}
2935
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002936static int hpsa_update_device_info(struct ctlr_info *h,
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002937 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2938 unsigned char *is_OBDR_device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002939{
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002940
2941#define OBDR_SIG_OFFSET 43
2942#define OBDR_TAPE_SIG "$DR-10"
2943#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2944#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2945
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06002946 unsigned char *inq_buff;
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002947 unsigned char *obdr_sig;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002948
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06002949 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002950 if (!inq_buff)
2951 goto bail_out;
2952
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002953 /* Do an inquiry to the device to see what it is. */
2954 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2955 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2956 /* Inquiry failed (msg printed already) */
2957 dev_err(&h->pdev->dev,
2958 "hpsa_update_device_info: inquiry failed\n");
2959 goto bail_out;
2960 }
2961
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002962 this_device->devtype = (inq_buff[0] & 0x1f);
2963 memcpy(this_device->scsi3addr, scsi3addr, 8);
2964 memcpy(this_device->vendor, &inq_buff[8],
2965 sizeof(this_device->vendor));
2966 memcpy(this_device->model, &inq_buff[16],
2967 sizeof(this_device->model));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002968 memset(this_device->device_id, 0,
2969 sizeof(this_device->device_id));
2970 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2971 sizeof(this_device->device_id));
2972
2973 if (this_device->devtype == TYPE_DISK &&
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002974 is_logical_dev_addr_mode(scsi3addr)) {
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05002975 int volume_offline;
2976
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002977 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002978 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2979 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05002980 volume_offline = hpsa_volume_offline(h, scsi3addr);
2981 if (volume_offline < 0 || volume_offline > 0xff)
2982 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2983 this_device->volume_offline = volume_offline & 0xff;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002984 } else {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002985 this_device->raid_level = RAID_UNKNOWN;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002986 this_device->offload_config = 0;
2987 this_device->offload_enabled = 0;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05002988 this_device->offload_to_be_enabled = 0;
Stephen M. Cameron98465902014-02-21 16:25:00 -06002989 this_device->volume_offline = 0;
Don Brace03383732015-01-23 16:43:30 -06002990 this_device->queue_depth = h->nr_cmds;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002991 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002992
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002993 if (is_OBDR_device) {
2994 /* See if this is a One-Button-Disaster-Recovery device
2995 * by looking for "$DR-10" at offset 43 in inquiry data.
2996 */
2997 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2998 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2999 strncmp(obdr_sig, OBDR_TAPE_SIG,
3000 OBDR_SIG_LEN) == 0);
3001 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003002 kfree(inq_buff);
3003 return 0;
3004
3005bail_out:
3006 kfree(inq_buff);
3007 return 1;
3008}
3009
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003010static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3011 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3012{
3013 unsigned long flags;
3014 int rc, entry;
3015 /*
3016 * See if this device supports aborts. If we already know
3017 * the device, we already know if it supports aborts, otherwise
3018 * we have to find out if it supports aborts by trying one.
3019 */
3020 spin_lock_irqsave(&h->devlock, flags);
3021 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3022 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3023 entry >= 0 && entry < h->ndevices) {
3024 dev->supports_aborts = h->dev[entry]->supports_aborts;
3025 spin_unlock_irqrestore(&h->devlock, flags);
3026 } else {
3027 spin_unlock_irqrestore(&h->devlock, flags);
3028 dev->supports_aborts =
3029 hpsa_device_supports_aborts(h, scsi3addr);
3030 if (dev->supports_aborts < 0)
3031 dev->supports_aborts = 0;
3032 }
3033}
3034
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003035static unsigned char *ext_target_model[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003036 "MSA2012",
3037 "MSA2024",
3038 "MSA2312",
3039 "MSA2324",
Stephen M. Cameronfda38512011-05-03 15:00:07 -05003040 "P2000 G3 SAS",
Stephen M. Camerone06c8e52013-09-23 13:33:56 -05003041 "MSA 2040 SAS",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003042 NULL,
3043};
3044
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003045static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003046{
3047 int i;
3048
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003049 for (i = 0; ext_target_model[i]; i++)
3050 if (strncmp(device->model, ext_target_model[i],
3051 strlen(ext_target_model[i])) == 0)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003052 return 1;
3053 return 0;
3054}
3055
3056/* Helper function to assign bus, target, lun mapping of devices.
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003057 * Puts non-external target logical volumes on bus 0, external target logical
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003058 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
3059 * Logical drive target and lun are assigned at this time, but
3060 * physical device lun and target assignment are deferred (assigned
3061 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3062 */
3063static void figure_bus_target_lun(struct ctlr_info *h,
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003064 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003065{
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003066 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003067
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003068 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3069 /* physical device, target and lun filled in later */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003070 if (is_hba_lunid(lunaddrbytes))
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003071 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003072 else
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003073 /* defer target, lun assignment for physical devices */
3074 hpsa_set_bus_target_lun(device, 2, -1, -1);
3075 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003076 }
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003077 /* It's a logical device */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003078 if (is_ext_target(h, device)) {
3079 /* external target way, put logicals on bus 1
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003080 * and match target/lun numbers box
3081 * reports, other smart array, bus 0, target 0, match lunid
3082 */
3083 hpsa_set_bus_target_lun(device,
3084 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
3085 return;
3086 }
3087 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003088}
3089
3090/*
3091 * If there is no lun 0 on a target, linux won't find any devices.
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003092 * For the external targets (arrays), we have to manually detect the enclosure
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003093 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3094 * it for some reason. *tmpdevice is the target we're adding,
3095 * this_device is a pointer into the current element of currentsd[]
3096 * that we're building up in update_scsi_devices(), below.
3097 * lunzerobits is a bitmap that tracks which targets already have a
3098 * lun 0 assigned.
3099 * Returns 1 if an enclosure was added, 0 if not.
3100 */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003101static int add_ext_target_dev(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003102 struct hpsa_scsi_dev_t *tmpdevice,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003103 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003104 unsigned long lunzerobits[], int *n_ext_target_devs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003105{
3106 unsigned char scsi3addr[8];
3107
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003108 if (test_bit(tmpdevice->target, lunzerobits))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003109 return 0; /* There is already a lun 0 on this target. */
3110
3111 if (!is_logical_dev_addr_mode(lunaddrbytes))
3112 return 0; /* It's the logical targets that may lack lun 0. */
3113
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003114 if (!is_ext_target(h, tmpdevice))
3115 return 0; /* Only external target devices have this problem. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003116
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003117 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003118 return 0;
3119
Stephen M. Cameronc4f8a292011-01-07 10:55:43 -06003120 memset(scsi3addr, 0, 8);
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003121 scsi3addr[3] = tmpdevice->target;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003122 if (is_hba_lunid(scsi3addr))
3123 return 0; /* Don't add the RAID controller here. */
3124
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003125 if (is_scsi_rev_5(h))
3126 return 0; /* p1210m doesn't need to do this. */
3127
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003128 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
Scott Teelaca4a522012-01-19 14:01:19 -06003129 dev_warn(&h->pdev->dev, "Maximum number of external "
3130 "target devices exceeded. Check your hardware "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003131 "configuration.");
3132 return 0;
3133 }
3134
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003135 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003136 return 0;
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003137 (*n_ext_target_devs)++;
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003138 hpsa_set_bus_target_lun(this_device,
3139 tmpdevice->bus, tmpdevice->target, 0);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003140 hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003141 set_bit(tmpdevice->target, lunzerobits);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003142 return 1;
3143}
3144
3145/*
Scott Teel54b6e9e2014-02-18 13:56:45 -06003146 * Get address of physical disk used for an ioaccel2 mode command:
3147 * 1. Extract ioaccel2 handle from the command.
3148 * 2. Find a matching ioaccel2 handle from list of physical disks.
3149 * 3. Return:
3150 * 1 and set scsi3addr to address of matching physical
3151 * 0 if no matching physical disk was found.
3152 */
3153static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3154 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3155{
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003156 struct io_accel2_cmd *c2 =
3157 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3158 unsigned long flags;
Scott Teel54b6e9e2014-02-18 13:56:45 -06003159 int i;
Scott Teel54b6e9e2014-02-18 13:56:45 -06003160
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003161 spin_lock_irqsave(&h->devlock, flags);
3162 for (i = 0; i < h->ndevices; i++)
3163 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3164 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3165 sizeof(h->dev[i]->scsi3addr));
3166 spin_unlock_irqrestore(&h->devlock, flags);
3167 return 1;
3168 }
3169 spin_unlock_irqrestore(&h->devlock, flags);
3170 return 0;
Scott Teel54b6e9e2014-02-18 13:56:45 -06003171}
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003172
Scott Teel54b6e9e2014-02-18 13:56:45 -06003173/*
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003174 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3175 * logdev. The number of luns in physdev and logdev are returned in
3176 * *nphysicals and *nlogicals, respectively.
3177 * Returns 0 on success, -1 otherwise.
3178 */
3179static int hpsa_gather_lun_info(struct ctlr_info *h,
Don Brace03383732015-01-23 16:43:30 -06003180 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003181 struct ReportLUNdata *logdev, u32 *nlogicals)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003182{
Don Brace03383732015-01-23 16:43:30 -06003183 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003184 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3185 return -1;
3186 }
Don Brace03383732015-01-23 16:43:30 -06003187 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003188 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
Don Brace03383732015-01-23 16:43:30 -06003189 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3190 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003191 *nphysicals = HPSA_MAX_PHYS_LUN;
3192 }
Don Brace03383732015-01-23 16:43:30 -06003193 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003194 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3195 return -1;
3196 }
Stephen M. Cameron6df1e952010-02-04 08:42:19 -06003197 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003198 /* Reject Logicals in excess of our max capability. */
3199 if (*nlogicals > HPSA_MAX_LUN) {
3200 dev_warn(&h->pdev->dev,
3201 "maximum logical LUNs (%d) exceeded. "
3202 "%d LUNs ignored.\n", HPSA_MAX_LUN,
3203 *nlogicals - HPSA_MAX_LUN);
3204 *nlogicals = HPSA_MAX_LUN;
3205 }
3206 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3207 dev_warn(&h->pdev->dev,
3208 "maximum logical + physical LUNs (%d) exceeded. "
3209 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3210 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3211 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3212 }
3213 return 0;
3214}
3215
Don Brace42a91642014-11-14 17:26:27 -06003216static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3217 int i, int nphysicals, int nlogicals,
Matt Gatesa93aa1f2014-02-18 13:55:07 -06003218 struct ReportExtendedLUNdata *physdev_list,
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003219 struct ReportLUNdata *logdev_list)
3220{
3221 /* Helper function, figure out where the LUN ID info is coming from
3222 * given index i, lists of physical and logical devices, where in
3223 * the list the raid controller is supposed to appear (first or last)
3224 */
3225
3226 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3227 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3228
3229 if (i == raid_ctlr_position)
3230 return RAID_CTLR_LUNID;
3231
3232 if (i < logicals_start)
Stephen M. Camerond5b5d962014-05-29 10:53:34 -05003233 return &physdev_list->LUN[i -
3234 (raid_ctlr_position == 0)].lunid[0];
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003235
3236 if (i < last_device)
3237 return &logdev_list->LUN[i - nphysicals -
3238 (raid_ctlr_position == 0)][0];
3239 BUG();
3240 return NULL;
3241}
3242
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003243static int hpsa_hba_mode_enabled(struct ctlr_info *h)
3244{
3245 int rc;
Joe Handzik6e8e8082014-05-15 15:44:42 -05003246 int hba_mode_enabled;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003247 struct bmic_controller_parameters *ctlr_params;
3248 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3249 GFP_KERNEL);
3250
3251 if (!ctlr_params)
Joe Handzik96444fb2014-05-15 15:44:47 -05003252 return -ENOMEM;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003253 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3254 sizeof(struct bmic_controller_parameters));
Joe Handzik96444fb2014-05-15 15:44:47 -05003255 if (rc) {
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003256 kfree(ctlr_params);
Joe Handzik96444fb2014-05-15 15:44:47 -05003257 return rc;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003258 }
Joe Handzik6e8e8082014-05-15 15:44:42 -05003259
3260 hba_mode_enabled =
3261 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3262 kfree(ctlr_params);
3263 return hba_mode_enabled;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003264}
3265
Don Brace03383732015-01-23 16:43:30 -06003266/* get physical drive ioaccel handle and queue depth */
3267static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3268 struct hpsa_scsi_dev_t *dev,
3269 u8 *lunaddrbytes,
3270 struct bmic_identify_physical_device *id_phys)
3271{
3272 int rc;
3273 struct ext_report_lun_entry *rle =
3274 (struct ext_report_lun_entry *) lunaddrbytes;
3275
3276 dev->ioaccel_handle = rle->ioaccel_handle;
3277 memset(id_phys, 0, sizeof(*id_phys));
3278 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3279 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3280 sizeof(*id_phys));
3281 if (!rc)
3282 /* Reserve space for FW operations */
3283#define DRIVE_CMDS_RESERVED_FOR_FW 2
3284#define DRIVE_QUEUE_DEPTH 7
3285 dev->queue_depth =
3286 le16_to_cpu(id_phys->current_queue_depth_limit) -
3287 DRIVE_CMDS_RESERVED_FOR_FW;
3288 else
3289 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3290 atomic_set(&dev->ioaccel_cmds_out, 0);
3291}
3292
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003293static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3294{
3295 /* the idea here is we could get notified
3296 * that some devices have changed, so we do a report
3297 * physical luns and report logical luns cmd, and adjust
3298 * our list of devices accordingly.
3299 *
3300 * The scsi3addr's of devices won't change so long as the
3301 * adapter is not reset. That means we can rescan and
3302 * tell which devices we already know about, vs. new
3303 * devices, vs. disappearing devices.
3304 */
Matt Gatesa93aa1f2014-02-18 13:55:07 -06003305 struct ReportExtendedLUNdata *physdev_list = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003306 struct ReportLUNdata *logdev_list = NULL;
Don Brace03383732015-01-23 16:43:30 -06003307 struct bmic_identify_physical_device *id_phys = NULL;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003308 u32 nphysicals = 0;
3309 u32 nlogicals = 0;
3310 u32 ndev_allocated = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003311 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3312 int ncurrent = 0;
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003313 int i, n_ext_target_devs, ndevs_to_allocate;
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003314 int raid_ctlr_position;
Joe Handzik2bbf5c72014-05-21 11:16:01 -05003315 int rescan_hba_mode;
Scott Teelaca4a522012-01-19 14:01:19 -06003316 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003317
Scott Teelcfe5bad2011-10-26 16:21:07 -05003318 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameron92084712014-11-14 17:26:54 -06003319 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3320 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003321 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
Don Brace03383732015-01-23 16:43:30 -06003322 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003323
Don Brace03383732015-01-23 16:43:30 -06003324 if (!currentsd || !physdev_list || !logdev_list ||
3325 !tmpdevice || !id_phys) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003326 dev_err(&h->pdev->dev, "out of memory\n");
3327 goto out;
3328 }
3329 memset(lunzerobits, 0, sizeof(lunzerobits));
3330
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003331 rescan_hba_mode = hpsa_hba_mode_enabled(h);
Joe Handzik96444fb2014-05-15 15:44:47 -05003332 if (rescan_hba_mode < 0)
3333 goto out;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003334
3335 if (!h->hba_mode_enabled && rescan_hba_mode)
3336 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3337 else if (h->hba_mode_enabled && !rescan_hba_mode)
3338 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3339
3340 h->hba_mode_enabled = rescan_hba_mode;
3341
Don Brace03383732015-01-23 16:43:30 -06003342 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3343 logdev_list, &nlogicals))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003344 goto out;
3345
Scott Teelaca4a522012-01-19 14:01:19 -06003346 /* We might see up to the maximum number of logical and physical disks
3347 * plus external target devices, and a device for the local RAID
3348 * controller.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003349 */
Scott Teelaca4a522012-01-19 14:01:19 -06003350 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003351
3352 /* Allocate the per device structures */
3353 for (i = 0; i < ndevs_to_allocate; i++) {
Scott Teelb7ec0212011-10-26 16:21:12 -05003354 if (i >= HPSA_MAX_DEVICES) {
3355 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3356 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3357 ndevs_to_allocate - HPSA_MAX_DEVICES);
3358 break;
3359 }
3360
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003361 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3362 if (!currentsd[i]) {
3363 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3364 __FILE__, __LINE__);
3365 goto out;
3366 }
3367 ndev_allocated++;
3368 }
3369
Stephen M. Cameron86452912014-05-29 10:53:49 -05003370 if (is_scsi_rev_5(h))
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003371 raid_ctlr_position = 0;
3372 else
3373 raid_ctlr_position = nphysicals + nlogicals;
3374
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003375 /* adjust our table of devices */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003376 n_ext_target_devs = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003377 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003378 u8 *lunaddrbytes, is_OBDR = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003379
3380 /* Figure out where the LUN ID info is coming from */
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003381 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3382 i, nphysicals, nlogicals, physdev_list, logdev_list);
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003383
3384 /* skip masked non-disk devices */
3385 if (MASKED_DEVICE(lunaddrbytes))
3386 if (i < nphysicals + (raid_ctlr_position == 0) &&
3387 NON_DISK_PHYS_DEV(lunaddrbytes))
3388 continue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003389
3390 /* Get device type, vendor, model, device id */
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003391 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3392 &is_OBDR))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003393 continue; /* skip it if we can't talk to it. */
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003394 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003395 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003396 this_device = currentsd[ncurrent];
3397
3398 /*
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003399 * For external target devices, we have to insert a LUN 0 which
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003400 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3401 * is nonetheless an enclosure device there. We have to
3402 * present that otherwise linux won't find anything if
3403 * there is no lun 0.
3404 */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003405 if (add_ext_target_dev(h, tmpdevice, this_device,
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003406 lunaddrbytes, lunzerobits,
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003407 &n_ext_target_devs)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003408 ncurrent++;
3409 this_device = currentsd[ncurrent];
3410 }
3411
3412 *this_device = *tmpdevice;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003413
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003414 /* do not expose masked devices */
3415 if (MASKED_DEVICE(lunaddrbytes) &&
3416 i < nphysicals + (raid_ctlr_position == 0)) {
3417 if (h->hba_mode_enabled)
3418 dev_warn(&h->pdev->dev,
3419 "Masked physical device detected\n");
3420 this_device->expose_state = HPSA_DO_NOT_EXPOSE;
3421 } else {
3422 this_device->expose_state =
3423 HPSA_SG_ATTACH | HPSA_ULD_ATTACH;
3424 }
3425
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003426 switch (this_device->devtype) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003427 case TYPE_ROM:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003428 /* We don't *really* support actual CD-ROM devices,
3429 * just "One Button Disaster Recovery" tape drive
3430 * which temporarily pretends to be a CD-ROM drive.
3431 * So we check that the device is really an OBDR tape
3432 * device by checking for "$DR-10" in bytes 43-48 of
3433 * the inquiry data.
3434 */
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003435 if (is_OBDR)
3436 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003437 break;
3438 case TYPE_DISK:
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003439 if (h->hba_mode_enabled) {
3440 /* never use raid mapper in HBA mode */
3441 this_device->offload_enabled = 0;
3442 ncurrent++;
3443 break;
3444 } else if (h->acciopath_status) {
3445 if (i >= nphysicals) {
3446 ncurrent++;
3447 break;
3448 }
3449 } else {
3450 if (i < nphysicals)
3451 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003452 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003453 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003454 }
Don Brace03383732015-01-23 16:43:30 -06003455 if (h->transMethod & CFGTBL_Trans_io_accel1 ||
3456 h->transMethod & CFGTBL_Trans_io_accel2) {
3457 hpsa_get_ioaccel_drive_info(h, this_device,
3458 lunaddrbytes, id_phys);
3459 atomic_set(&this_device->ioaccel_cmds_out, 0);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003460 ncurrent++;
3461 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003462 break;
3463 case TYPE_TAPE:
3464 case TYPE_MEDIUM_CHANGER:
3465 ncurrent++;
3466 break;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003467 case TYPE_ENCLOSURE:
3468 if (h->hba_mode_enabled)
3469 ncurrent++;
3470 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003471 case TYPE_RAID:
3472 /* Only present the Smartarray HBA as a RAID controller.
3473 * If it's a RAID controller other than the HBA itself
3474 * (an external RAID controller, MSA500 or similar)
3475 * don't present it.
3476 */
3477 if (!is_hba_lunid(lunaddrbytes))
3478 break;
3479 ncurrent++;
3480 break;
3481 default:
3482 break;
3483 }
Scott Teelcfe5bad2011-10-26 16:21:07 -05003484 if (ncurrent >= HPSA_MAX_DEVICES)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003485 break;
3486 }
3487 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3488out:
3489 kfree(tmpdevice);
3490 for (i = 0; i < ndev_allocated; i++)
3491 kfree(currentsd[i]);
3492 kfree(currentsd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003493 kfree(physdev_list);
3494 kfree(logdev_list);
Don Brace03383732015-01-23 16:43:30 -06003495 kfree(id_phys);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003496}
3497
Webb Scalesec5cbf02015-01-23 16:44:45 -06003498static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3499 struct scatterlist *sg)
3500{
3501 u64 addr64 = (u64) sg_dma_address(sg);
3502 unsigned int len = sg_dma_len(sg);
3503
3504 desc->Addr = cpu_to_le64(addr64);
3505 desc->Len = cpu_to_le32(len);
3506 desc->Ext = 0;
3507}
3508
Webb Scalesc7ee65b2015-01-23 16:42:17 -06003509/*
3510 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003511 * dma mapping and fills in the scatter gather entries of the
3512 * hpsa command, cp.
3513 */
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003514static int hpsa_scatter_gather(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003515 struct CommandList *cp,
3516 struct scsi_cmnd *cmd)
3517{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003518 struct scatterlist *sg;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003519 int use_sg, i, sg_index, chained;
3520 struct SGDescriptor *curr_sg;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003521
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003522 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003523
3524 use_sg = scsi_dma_map(cmd);
3525 if (use_sg < 0)
3526 return use_sg;
3527
3528 if (!use_sg)
3529 goto sglist_finished;
3530
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003531 curr_sg = cp->SG;
3532 chained = 0;
3533 sg_index = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003534 scsi_for_each_sg(cmd, sg, use_sg, i) {
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003535 if (i == h->max_cmd_sg_entries - 1 &&
3536 use_sg > h->max_cmd_sg_entries) {
3537 chained = 1;
3538 curr_sg = h->cmd_sg_list[cp->cmdindex];
3539 sg_index = 0;
3540 }
Webb Scalesec5cbf02015-01-23 16:44:45 -06003541 hpsa_set_sg_descriptor(curr_sg, sg);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003542 curr_sg++;
3543 }
Webb Scalesec5cbf02015-01-23 16:44:45 -06003544
3545 /* Back the pointer up to the last entry and mark it as "last". */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003546 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003547
3548 if (use_sg + chained > h->maxSG)
3549 h->maxSG = use_sg + chained;
3550
3551 if (chained) {
3552 cp->Header.SGList = h->max_cmd_sg_entries;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003553 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06003554 if (hpsa_map_sg_chain_block(h, cp)) {
3555 scsi_dma_unmap(cmd);
3556 return -1;
3557 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003558 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003559 }
3560
3561sglist_finished:
3562
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003563 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
Webb Scalesc7ee65b2015-01-23 16:42:17 -06003564 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003565 return 0;
3566}
3567
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003568#define IO_ACCEL_INELIGIBLE (1)
3569static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3570{
3571 int is_write = 0;
3572 u32 block;
3573 u32 block_cnt;
3574
3575 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3576 switch (cdb[0]) {
3577 case WRITE_6:
3578 case WRITE_12:
3579 is_write = 1;
3580 case READ_6:
3581 case READ_12:
3582 if (*cdb_len == 6) {
3583 block = (((u32) cdb[2]) << 8) | cdb[3];
3584 block_cnt = cdb[4];
3585 } else {
3586 BUG_ON(*cdb_len != 12);
3587 block = (((u32) cdb[2]) << 24) |
3588 (((u32) cdb[3]) << 16) |
3589 (((u32) cdb[4]) << 8) |
3590 cdb[5];
3591 block_cnt =
3592 (((u32) cdb[6]) << 24) |
3593 (((u32) cdb[7]) << 16) |
3594 (((u32) cdb[8]) << 8) |
3595 cdb[9];
3596 }
3597 if (block_cnt > 0xffff)
3598 return IO_ACCEL_INELIGIBLE;
3599
3600 cdb[0] = is_write ? WRITE_10 : READ_10;
3601 cdb[1] = 0;
3602 cdb[2] = (u8) (block >> 24);
3603 cdb[3] = (u8) (block >> 16);
3604 cdb[4] = (u8) (block >> 8);
3605 cdb[5] = (u8) (block);
3606 cdb[6] = 0;
3607 cdb[7] = (u8) (block_cnt >> 8);
3608 cdb[8] = (u8) (block_cnt);
3609 cdb[9] = 0;
3610 *cdb_len = 10;
3611 break;
3612 }
3613 return 0;
3614}
3615
Scott Teelc3497752014-02-18 13:56:34 -06003616static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003617 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -06003618 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
Matt Gatese1f7de02014-02-18 13:55:17 -06003619{
3620 struct scsi_cmnd *cmd = c->scsi_cmd;
Matt Gatese1f7de02014-02-18 13:55:17 -06003621 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3622 unsigned int len;
3623 unsigned int total_len = 0;
3624 struct scatterlist *sg;
3625 u64 addr64;
3626 int use_sg, i;
3627 struct SGDescriptor *curr_sg;
3628 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3629
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003630 /* TODO: implement chaining support */
Don Brace03383732015-01-23 16:43:30 -06003631 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3632 atomic_dec(&phys_disk->ioaccel_cmds_out);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003633 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06003634 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003635
Matt Gatese1f7de02014-02-18 13:55:17 -06003636 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3637
Don Brace03383732015-01-23 16:43:30 -06003638 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3639 atomic_dec(&phys_disk->ioaccel_cmds_out);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003640 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06003641 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003642
Matt Gatese1f7de02014-02-18 13:55:17 -06003643 c->cmd_type = CMD_IOACCEL1;
3644
3645 /* Adjust the DMA address to point to the accelerated command buffer */
3646 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3647 (c->cmdindex * sizeof(*cp));
3648 BUG_ON(c->busaddr & 0x0000007F);
3649
3650 use_sg = scsi_dma_map(cmd);
Don Brace03383732015-01-23 16:43:30 -06003651 if (use_sg < 0) {
3652 atomic_dec(&phys_disk->ioaccel_cmds_out);
Matt Gatese1f7de02014-02-18 13:55:17 -06003653 return use_sg;
Don Brace03383732015-01-23 16:43:30 -06003654 }
Matt Gatese1f7de02014-02-18 13:55:17 -06003655
3656 if (use_sg) {
3657 curr_sg = cp->SG;
3658 scsi_for_each_sg(cmd, sg, use_sg, i) {
3659 addr64 = (u64) sg_dma_address(sg);
3660 len = sg_dma_len(sg);
3661 total_len += len;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003662 curr_sg->Addr = cpu_to_le64(addr64);
3663 curr_sg->Len = cpu_to_le32(len);
3664 curr_sg->Ext = cpu_to_le32(0);
Matt Gatese1f7de02014-02-18 13:55:17 -06003665 curr_sg++;
3666 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003667 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
Matt Gatese1f7de02014-02-18 13:55:17 -06003668
3669 switch (cmd->sc_data_direction) {
3670 case DMA_TO_DEVICE:
3671 control |= IOACCEL1_CONTROL_DATA_OUT;
3672 break;
3673 case DMA_FROM_DEVICE:
3674 control |= IOACCEL1_CONTROL_DATA_IN;
3675 break;
3676 case DMA_NONE:
3677 control |= IOACCEL1_CONTROL_NODATAXFER;
3678 break;
3679 default:
3680 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3681 cmd->sc_data_direction);
3682 BUG();
3683 break;
3684 }
3685 } else {
3686 control |= IOACCEL1_CONTROL_NODATAXFER;
3687 }
3688
Scott Teelc3497752014-02-18 13:56:34 -06003689 c->Header.SGList = use_sg;
Matt Gatese1f7de02014-02-18 13:55:17 -06003690 /* Fill out the command structure to submit */
Don Brace2b08b3e2015-01-23 16:41:09 -06003691 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
3692 cp->transfer_len = cpu_to_le32(total_len);
3693 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
3694 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
3695 cp->control = cpu_to_le32(control);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003696 memcpy(cp->CDB, cdb, cdb_len);
3697 memcpy(cp->CISS_LUN, scsi3addr, 8);
Scott Teelc3497752014-02-18 13:56:34 -06003698 /* Tag was already set at init time. */
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003699 enqueue_cmd_and_start_io(h, c);
Matt Gatese1f7de02014-02-18 13:55:17 -06003700 return 0;
3701}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003702
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003703/*
3704 * Queue a command directly to a device behind the controller using the
3705 * I/O accelerator path.
3706 */
3707static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3708 struct CommandList *c)
3709{
3710 struct scsi_cmnd *cmd = c->scsi_cmd;
3711 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3712
Don Brace03383732015-01-23 16:43:30 -06003713 c->phys_disk = dev;
3714
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003715 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
Don Brace03383732015-01-23 16:43:30 -06003716 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003717}
3718
Scott Teeldd0e19f2014-02-18 13:57:31 -06003719/*
3720 * Set encryption parameters for the ioaccel2 request
3721 */
3722static void set_encrypt_ioaccel2(struct ctlr_info *h,
3723 struct CommandList *c, struct io_accel2_cmd *cp)
3724{
3725 struct scsi_cmnd *cmd = c->scsi_cmd;
3726 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3727 struct raid_map_data *map = &dev->raid_map;
3728 u64 first_block;
3729
Scott Teeldd0e19f2014-02-18 13:57:31 -06003730 /* Are we doing encryption on this device */
Don Brace2b08b3e2015-01-23 16:41:09 -06003731 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
Scott Teeldd0e19f2014-02-18 13:57:31 -06003732 return;
3733 /* Set the data encryption key index. */
3734 cp->dekindex = map->dekindex;
3735
3736 /* Set the encryption enable flag, encoded into direction field. */
3737 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3738
3739 /* Set encryption tweak values based on logical block address
3740 * If block size is 512, tweak value is LBA.
3741 * For other block sizes, tweak is (LBA * block size)/ 512)
3742 */
3743 switch (cmd->cmnd[0]) {
3744 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3745 case WRITE_6:
3746 case READ_6:
Don Brace2b08b3e2015-01-23 16:41:09 -06003747 first_block = get_unaligned_be16(&cmd->cmnd[2]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06003748 break;
3749 case WRITE_10:
3750 case READ_10:
Scott Teeldd0e19f2014-02-18 13:57:31 -06003751 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3752 case WRITE_12:
3753 case READ_12:
Don Brace2b08b3e2015-01-23 16:41:09 -06003754 first_block = get_unaligned_be32(&cmd->cmnd[2]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06003755 break;
3756 case WRITE_16:
3757 case READ_16:
Don Brace2b08b3e2015-01-23 16:41:09 -06003758 first_block = get_unaligned_be64(&cmd->cmnd[2]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06003759 break;
3760 default:
3761 dev_err(&h->pdev->dev,
Don Brace2b08b3e2015-01-23 16:41:09 -06003762 "ERROR: %s: size (0x%x) not supported for encryption\n",
3763 __func__, cmd->cmnd[0]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06003764 BUG();
3765 break;
3766 }
Don Brace2b08b3e2015-01-23 16:41:09 -06003767
3768 if (le32_to_cpu(map->volume_blk_size) != 512)
3769 first_block = first_block *
3770 le32_to_cpu(map->volume_blk_size)/512;
3771
3772 cp->tweak_lower = cpu_to_le32(first_block);
3773 cp->tweak_upper = cpu_to_le32(first_block >> 32);
Scott Teeldd0e19f2014-02-18 13:57:31 -06003774}
3775
Scott Teelc3497752014-02-18 13:56:34 -06003776static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3777 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -06003778 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
Scott Teelc3497752014-02-18 13:56:34 -06003779{
3780 struct scsi_cmnd *cmd = c->scsi_cmd;
3781 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3782 struct ioaccel2_sg_element *curr_sg;
3783 int use_sg, i;
3784 struct scatterlist *sg;
3785 u64 addr64;
3786 u32 len;
3787 u32 total_len = 0;
3788
Don Brace03383732015-01-23 16:43:30 -06003789 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3790 atomic_dec(&phys_disk->ioaccel_cmds_out);
Scott Teelc3497752014-02-18 13:56:34 -06003791 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06003792 }
Scott Teelc3497752014-02-18 13:56:34 -06003793
Don Brace03383732015-01-23 16:43:30 -06003794 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3795 atomic_dec(&phys_disk->ioaccel_cmds_out);
Scott Teelc3497752014-02-18 13:56:34 -06003796 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06003797 }
3798
Scott Teelc3497752014-02-18 13:56:34 -06003799 c->cmd_type = CMD_IOACCEL2;
3800 /* Adjust the DMA address to point to the accelerated command buffer */
3801 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3802 (c->cmdindex * sizeof(*cp));
3803 BUG_ON(c->busaddr & 0x0000007F);
3804
3805 memset(cp, 0, sizeof(*cp));
3806 cp->IU_type = IOACCEL2_IU_TYPE;
3807
3808 use_sg = scsi_dma_map(cmd);
Don Brace03383732015-01-23 16:43:30 -06003809 if (use_sg < 0) {
3810 atomic_dec(&phys_disk->ioaccel_cmds_out);
Scott Teelc3497752014-02-18 13:56:34 -06003811 return use_sg;
Don Brace03383732015-01-23 16:43:30 -06003812 }
Scott Teelc3497752014-02-18 13:56:34 -06003813
3814 if (use_sg) {
3815 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3816 curr_sg = cp->sg;
3817 scsi_for_each_sg(cmd, sg, use_sg, i) {
3818 addr64 = (u64) sg_dma_address(sg);
3819 len = sg_dma_len(sg);
3820 total_len += len;
3821 curr_sg->address = cpu_to_le64(addr64);
3822 curr_sg->length = cpu_to_le32(len);
3823 curr_sg->reserved[0] = 0;
3824 curr_sg->reserved[1] = 0;
3825 curr_sg->reserved[2] = 0;
3826 curr_sg->chain_indicator = 0;
3827 curr_sg++;
3828 }
3829
3830 switch (cmd->sc_data_direction) {
3831 case DMA_TO_DEVICE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06003832 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3833 cp->direction |= IOACCEL2_DIR_DATA_OUT;
Scott Teelc3497752014-02-18 13:56:34 -06003834 break;
3835 case DMA_FROM_DEVICE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06003836 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3837 cp->direction |= IOACCEL2_DIR_DATA_IN;
Scott Teelc3497752014-02-18 13:56:34 -06003838 break;
3839 case DMA_NONE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06003840 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3841 cp->direction |= IOACCEL2_DIR_NO_DATA;
Scott Teelc3497752014-02-18 13:56:34 -06003842 break;
3843 default:
3844 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3845 cmd->sc_data_direction);
3846 BUG();
3847 break;
3848 }
3849 } else {
Scott Teeldd0e19f2014-02-18 13:57:31 -06003850 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3851 cp->direction |= IOACCEL2_DIR_NO_DATA;
Scott Teelc3497752014-02-18 13:56:34 -06003852 }
Scott Teeldd0e19f2014-02-18 13:57:31 -06003853
3854 /* Set encryption parameters, if necessary */
3855 set_encrypt_ioaccel2(h, c, cp);
3856
Don Brace2b08b3e2015-01-23 16:41:09 -06003857 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
Don Bracef2405db2015-01-23 16:43:09 -06003858 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
Scott Teelc3497752014-02-18 13:56:34 -06003859 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
Scott Teelc3497752014-02-18 13:56:34 -06003860
3861 /* fill in sg elements */
3862 cp->sg_count = (u8) use_sg;
3863
3864 cp->data_len = cpu_to_le32(total_len);
3865 cp->err_ptr = cpu_to_le64(c->busaddr +
3866 offsetof(struct io_accel2_cmd, error_data));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003867 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
Scott Teelc3497752014-02-18 13:56:34 -06003868
3869 enqueue_cmd_and_start_io(h, c);
3870 return 0;
3871}
3872
3873/*
3874 * Queue a command to the correct I/O accelerator path.
3875 */
3876static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3877 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -06003878 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
Scott Teelc3497752014-02-18 13:56:34 -06003879{
Don Brace03383732015-01-23 16:43:30 -06003880 /* Try to honor the device's queue depth */
3881 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
3882 phys_disk->queue_depth) {
3883 atomic_dec(&phys_disk->ioaccel_cmds_out);
3884 return IO_ACCEL_INELIGIBLE;
3885 }
Scott Teelc3497752014-02-18 13:56:34 -06003886 if (h->transMethod & CFGTBL_Trans_io_accel1)
3887 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
Don Brace03383732015-01-23 16:43:30 -06003888 cdb, cdb_len, scsi3addr,
3889 phys_disk);
Scott Teelc3497752014-02-18 13:56:34 -06003890 else
3891 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
Don Brace03383732015-01-23 16:43:30 -06003892 cdb, cdb_len, scsi3addr,
3893 phys_disk);
Scott Teelc3497752014-02-18 13:56:34 -06003894}
3895
Scott Teel6b80b182014-02-18 13:56:55 -06003896static void raid_map_helper(struct raid_map_data *map,
3897 int offload_to_mirror, u32 *map_index, u32 *current_group)
3898{
3899 if (offload_to_mirror == 0) {
3900 /* use physical disk in the first mirrored group. */
Don Brace2b08b3e2015-01-23 16:41:09 -06003901 *map_index %= le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06003902 return;
3903 }
3904 do {
3905 /* determine mirror group that *map_index indicates */
Don Brace2b08b3e2015-01-23 16:41:09 -06003906 *current_group = *map_index /
3907 le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06003908 if (offload_to_mirror == *current_group)
3909 continue;
Don Brace2b08b3e2015-01-23 16:41:09 -06003910 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
Scott Teel6b80b182014-02-18 13:56:55 -06003911 /* select map index from next group */
Don Brace2b08b3e2015-01-23 16:41:09 -06003912 *map_index += le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06003913 (*current_group)++;
3914 } else {
3915 /* select map index from first group */
Don Brace2b08b3e2015-01-23 16:41:09 -06003916 *map_index %= le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06003917 *current_group = 0;
3918 }
3919 } while (offload_to_mirror != *current_group);
3920}
3921
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003922/*
3923 * Attempt to perform offload RAID mapping for a logical volume I/O.
3924 */
3925static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3926 struct CommandList *c)
3927{
3928 struct scsi_cmnd *cmd = c->scsi_cmd;
3929 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3930 struct raid_map_data *map = &dev->raid_map;
3931 struct raid_map_disk_data *dd = &map->data[0];
3932 int is_write = 0;
3933 u32 map_index;
3934 u64 first_block, last_block;
3935 u32 block_cnt;
3936 u32 blocks_per_row;
3937 u64 first_row, last_row;
3938 u32 first_row_offset, last_row_offset;
3939 u32 first_column, last_column;
Scott Teel6b80b182014-02-18 13:56:55 -06003940 u64 r0_first_row, r0_last_row;
3941 u32 r5or6_blocks_per_row;
3942 u64 r5or6_first_row, r5or6_last_row;
3943 u32 r5or6_first_row_offset, r5or6_last_row_offset;
3944 u32 r5or6_first_column, r5or6_last_column;
3945 u32 total_disks_per_row;
3946 u32 stripesize;
3947 u32 first_group, last_group, current_group;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003948 u32 map_row;
3949 u32 disk_handle;
3950 u64 disk_block;
3951 u32 disk_block_cnt;
3952 u8 cdb[16];
3953 u8 cdb_len;
Don Brace2b08b3e2015-01-23 16:41:09 -06003954 u16 strip_size;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003955#if BITS_PER_LONG == 32
3956 u64 tmpdiv;
3957#endif
Scott Teel6b80b182014-02-18 13:56:55 -06003958 int offload_to_mirror;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003959
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003960 /* check for valid opcode, get LBA and block count */
3961 switch (cmd->cmnd[0]) {
3962 case WRITE_6:
3963 is_write = 1;
3964 case READ_6:
3965 first_block =
3966 (((u64) cmd->cmnd[2]) << 8) |
3967 cmd->cmnd[3];
3968 block_cnt = cmd->cmnd[4];
Stephen M. Cameron3fa89a02014-07-03 10:18:14 -05003969 if (block_cnt == 0)
3970 block_cnt = 256;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003971 break;
3972 case WRITE_10:
3973 is_write = 1;
3974 case READ_10:
3975 first_block =
3976 (((u64) cmd->cmnd[2]) << 24) |
3977 (((u64) cmd->cmnd[3]) << 16) |
3978 (((u64) cmd->cmnd[4]) << 8) |
3979 cmd->cmnd[5];
3980 block_cnt =
3981 (((u32) cmd->cmnd[7]) << 8) |
3982 cmd->cmnd[8];
3983 break;
3984 case WRITE_12:
3985 is_write = 1;
3986 case READ_12:
3987 first_block =
3988 (((u64) cmd->cmnd[2]) << 24) |
3989 (((u64) cmd->cmnd[3]) << 16) |
3990 (((u64) cmd->cmnd[4]) << 8) |
3991 cmd->cmnd[5];
3992 block_cnt =
3993 (((u32) cmd->cmnd[6]) << 24) |
3994 (((u32) cmd->cmnd[7]) << 16) |
3995 (((u32) cmd->cmnd[8]) << 8) |
3996 cmd->cmnd[9];
3997 break;
3998 case WRITE_16:
3999 is_write = 1;
4000 case READ_16:
4001 first_block =
4002 (((u64) cmd->cmnd[2]) << 56) |
4003 (((u64) cmd->cmnd[3]) << 48) |
4004 (((u64) cmd->cmnd[4]) << 40) |
4005 (((u64) cmd->cmnd[5]) << 32) |
4006 (((u64) cmd->cmnd[6]) << 24) |
4007 (((u64) cmd->cmnd[7]) << 16) |
4008 (((u64) cmd->cmnd[8]) << 8) |
4009 cmd->cmnd[9];
4010 block_cnt =
4011 (((u32) cmd->cmnd[10]) << 24) |
4012 (((u32) cmd->cmnd[11]) << 16) |
4013 (((u32) cmd->cmnd[12]) << 8) |
4014 cmd->cmnd[13];
4015 break;
4016 default:
4017 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4018 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004019 last_block = first_block + block_cnt - 1;
4020
4021 /* check for write to non-RAID-0 */
4022 if (is_write && dev->raid_level != 0)
4023 return IO_ACCEL_INELIGIBLE;
4024
4025 /* check for invalid block or wraparound */
Don Brace2b08b3e2015-01-23 16:41:09 -06004026 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4027 last_block < first_block)
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004028 return IO_ACCEL_INELIGIBLE;
4029
4030 /* calculate stripe information for the request */
Don Brace2b08b3e2015-01-23 16:41:09 -06004031 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4032 le16_to_cpu(map->strip_size);
4033 strip_size = le16_to_cpu(map->strip_size);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004034#if BITS_PER_LONG == 32
4035 tmpdiv = first_block;
4036 (void) do_div(tmpdiv, blocks_per_row);
4037 first_row = tmpdiv;
4038 tmpdiv = last_block;
4039 (void) do_div(tmpdiv, blocks_per_row);
4040 last_row = tmpdiv;
4041 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4042 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4043 tmpdiv = first_row_offset;
Don Brace2b08b3e2015-01-23 16:41:09 -06004044 (void) do_div(tmpdiv, strip_size);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004045 first_column = tmpdiv;
4046 tmpdiv = last_row_offset;
Don Brace2b08b3e2015-01-23 16:41:09 -06004047 (void) do_div(tmpdiv, strip_size);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004048 last_column = tmpdiv;
4049#else
4050 first_row = first_block / blocks_per_row;
4051 last_row = last_block / blocks_per_row;
4052 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4053 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
Don Brace2b08b3e2015-01-23 16:41:09 -06004054 first_column = first_row_offset / strip_size;
4055 last_column = last_row_offset / strip_size;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004056#endif
4057
4058 /* if this isn't a single row/column then give to the controller */
4059 if ((first_row != last_row) || (first_column != last_column))
4060 return IO_ACCEL_INELIGIBLE;
4061
4062 /* proceeding with driver mapping */
Don Brace2b08b3e2015-01-23 16:41:09 -06004063 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4064 le16_to_cpu(map->metadata_disks_per_row);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004065 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
Don Brace2b08b3e2015-01-23 16:41:09 -06004066 le16_to_cpu(map->row_cnt);
Scott Teel6b80b182014-02-18 13:56:55 -06004067 map_index = (map_row * total_disks_per_row) + first_column;
4068
4069 switch (dev->raid_level) {
4070 case HPSA_RAID_0:
4071 break; /* nothing special to do */
4072 case HPSA_RAID_1:
4073 /* Handles load balance across RAID 1 members.
4074 * (2-drive R1 and R10 with even # of drives.)
4075 * Appropriate for SSDs, not optimal for HDDs
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004076 */
Don Brace2b08b3e2015-01-23 16:41:09 -06004077 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004078 if (dev->offload_to_mirror)
Don Brace2b08b3e2015-01-23 16:41:09 -06004079 map_index += le16_to_cpu(map->data_disks_per_row);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004080 dev->offload_to_mirror = !dev->offload_to_mirror;
Scott Teel6b80b182014-02-18 13:56:55 -06004081 break;
4082 case HPSA_RAID_ADM:
4083 /* Handles N-way mirrors (R1-ADM)
4084 * and R10 with # of drives divisible by 3.)
4085 */
Don Brace2b08b3e2015-01-23 16:41:09 -06004086 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
Scott Teel6b80b182014-02-18 13:56:55 -06004087
4088 offload_to_mirror = dev->offload_to_mirror;
4089 raid_map_helper(map, offload_to_mirror,
4090 &map_index, &current_group);
4091 /* set mirror group to use next time */
4092 offload_to_mirror =
Don Brace2b08b3e2015-01-23 16:41:09 -06004093 (offload_to_mirror >=
4094 le16_to_cpu(map->layout_map_count) - 1)
Scott Teel6b80b182014-02-18 13:56:55 -06004095 ? 0 : offload_to_mirror + 1;
Scott Teel6b80b182014-02-18 13:56:55 -06004096 dev->offload_to_mirror = offload_to_mirror;
4097 /* Avoid direct use of dev->offload_to_mirror within this
4098 * function since multiple threads might simultaneously
4099 * increment it beyond the range of dev->layout_map_count -1.
4100 */
4101 break;
4102 case HPSA_RAID_5:
4103 case HPSA_RAID_6:
Don Brace2b08b3e2015-01-23 16:41:09 -06004104 if (le16_to_cpu(map->layout_map_count) <= 1)
Scott Teel6b80b182014-02-18 13:56:55 -06004105 break;
4106
4107 /* Verify first and last block are in same RAID group */
4108 r5or6_blocks_per_row =
Don Brace2b08b3e2015-01-23 16:41:09 -06004109 le16_to_cpu(map->strip_size) *
4110 le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004111 BUG_ON(r5or6_blocks_per_row == 0);
Don Brace2b08b3e2015-01-23 16:41:09 -06004112 stripesize = r5or6_blocks_per_row *
4113 le16_to_cpu(map->layout_map_count);
Scott Teel6b80b182014-02-18 13:56:55 -06004114#if BITS_PER_LONG == 32
4115 tmpdiv = first_block;
4116 first_group = do_div(tmpdiv, stripesize);
4117 tmpdiv = first_group;
4118 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4119 first_group = tmpdiv;
4120 tmpdiv = last_block;
4121 last_group = do_div(tmpdiv, stripesize);
4122 tmpdiv = last_group;
4123 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4124 last_group = tmpdiv;
4125#else
4126 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4127 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
Scott Teel6b80b182014-02-18 13:56:55 -06004128#endif
Stephen M. Cameron000ff7c2014-03-13 17:12:50 -05004129 if (first_group != last_group)
Scott Teel6b80b182014-02-18 13:56:55 -06004130 return IO_ACCEL_INELIGIBLE;
4131
4132 /* Verify request is in a single row of RAID 5/6 */
4133#if BITS_PER_LONG == 32
4134 tmpdiv = first_block;
4135 (void) do_div(tmpdiv, stripesize);
4136 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4137 tmpdiv = last_block;
4138 (void) do_div(tmpdiv, stripesize);
4139 r5or6_last_row = r0_last_row = tmpdiv;
4140#else
4141 first_row = r5or6_first_row = r0_first_row =
4142 first_block / stripesize;
4143 r5or6_last_row = r0_last_row = last_block / stripesize;
4144#endif
4145 if (r5or6_first_row != r5or6_last_row)
4146 return IO_ACCEL_INELIGIBLE;
4147
4148
4149 /* Verify request is in a single column */
4150#if BITS_PER_LONG == 32
4151 tmpdiv = first_block;
4152 first_row_offset = do_div(tmpdiv, stripesize);
4153 tmpdiv = first_row_offset;
4154 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4155 r5or6_first_row_offset = first_row_offset;
4156 tmpdiv = last_block;
4157 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4158 tmpdiv = r5or6_last_row_offset;
4159 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4160 tmpdiv = r5or6_first_row_offset;
4161 (void) do_div(tmpdiv, map->strip_size);
4162 first_column = r5or6_first_column = tmpdiv;
4163 tmpdiv = r5or6_last_row_offset;
4164 (void) do_div(tmpdiv, map->strip_size);
4165 r5or6_last_column = tmpdiv;
4166#else
4167 first_row_offset = r5or6_first_row_offset =
4168 (u32)((first_block % stripesize) %
4169 r5or6_blocks_per_row);
4170
4171 r5or6_last_row_offset =
4172 (u32)((last_block % stripesize) %
4173 r5or6_blocks_per_row);
4174
4175 first_column = r5or6_first_column =
Don Brace2b08b3e2015-01-23 16:41:09 -06004176 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
Scott Teel6b80b182014-02-18 13:56:55 -06004177 r5or6_last_column =
Don Brace2b08b3e2015-01-23 16:41:09 -06004178 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
Scott Teel6b80b182014-02-18 13:56:55 -06004179#endif
4180 if (r5or6_first_column != r5or6_last_column)
4181 return IO_ACCEL_INELIGIBLE;
4182
4183 /* Request is eligible */
4184 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
Don Brace2b08b3e2015-01-23 16:41:09 -06004185 le16_to_cpu(map->row_cnt);
Scott Teel6b80b182014-02-18 13:56:55 -06004186
4187 map_index = (first_group *
Don Brace2b08b3e2015-01-23 16:41:09 -06004188 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
Scott Teel6b80b182014-02-18 13:56:55 -06004189 (map_row * total_disks_per_row) + first_column;
4190 break;
4191 default:
4192 return IO_ACCEL_INELIGIBLE;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004193 }
Scott Teel6b80b182014-02-18 13:56:55 -06004194
Stephen Cameron07543e02015-01-23 16:44:14 -06004195 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4196 return IO_ACCEL_INELIGIBLE;
4197
Don Brace03383732015-01-23 16:43:30 -06004198 c->phys_disk = dev->phys_disk[map_index];
4199
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004200 disk_handle = dd[map_index].ioaccel_handle;
Don Brace2b08b3e2015-01-23 16:41:09 -06004201 disk_block = le64_to_cpu(map->disk_starting_blk) +
4202 first_row * le16_to_cpu(map->strip_size) +
4203 (first_row_offset - first_column *
4204 le16_to_cpu(map->strip_size));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004205 disk_block_cnt = block_cnt;
4206
4207 /* handle differing logical/physical block sizes */
4208 if (map->phys_blk_shift) {
4209 disk_block <<= map->phys_blk_shift;
4210 disk_block_cnt <<= map->phys_blk_shift;
4211 }
4212 BUG_ON(disk_block_cnt > 0xffff);
4213
4214 /* build the new CDB for the physical disk I/O */
4215 if (disk_block > 0xffffffff) {
4216 cdb[0] = is_write ? WRITE_16 : READ_16;
4217 cdb[1] = 0;
4218 cdb[2] = (u8) (disk_block >> 56);
4219 cdb[3] = (u8) (disk_block >> 48);
4220 cdb[4] = (u8) (disk_block >> 40);
4221 cdb[5] = (u8) (disk_block >> 32);
4222 cdb[6] = (u8) (disk_block >> 24);
4223 cdb[7] = (u8) (disk_block >> 16);
4224 cdb[8] = (u8) (disk_block >> 8);
4225 cdb[9] = (u8) (disk_block);
4226 cdb[10] = (u8) (disk_block_cnt >> 24);
4227 cdb[11] = (u8) (disk_block_cnt >> 16);
4228 cdb[12] = (u8) (disk_block_cnt >> 8);
4229 cdb[13] = (u8) (disk_block_cnt);
4230 cdb[14] = 0;
4231 cdb[15] = 0;
4232 cdb_len = 16;
4233 } else {
4234 cdb[0] = is_write ? WRITE_10 : READ_10;
4235 cdb[1] = 0;
4236 cdb[2] = (u8) (disk_block >> 24);
4237 cdb[3] = (u8) (disk_block >> 16);
4238 cdb[4] = (u8) (disk_block >> 8);
4239 cdb[5] = (u8) (disk_block);
4240 cdb[6] = 0;
4241 cdb[7] = (u8) (disk_block_cnt >> 8);
4242 cdb[8] = (u8) (disk_block_cnt);
4243 cdb[9] = 0;
4244 cdb_len = 10;
4245 }
4246 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
Don Brace03383732015-01-23 16:43:30 -06004247 dev->scsi3addr,
4248 dev->phys_disk[map_index]);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004249}
4250
Webb Scales25163bd2015-04-23 09:32:00 -05004251/*
4252 * Submit commands down the "normal" RAID stack path
4253 * All callers to hpsa_ciss_submit must check lockup_detected
4254 * beforehand, before (opt.) and after calling cmd_alloc
4255 */
Stephen Cameron574f05d2015-01-23 16:43:20 -06004256static int hpsa_ciss_submit(struct ctlr_info *h,
4257 struct CommandList *c, struct scsi_cmnd *cmd,
4258 unsigned char scsi3addr[])
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004259{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004260 cmd->host_scribble = (unsigned char *) c;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004261 c->cmd_type = CMD_SCSI;
4262 c->scsi_cmd = cmd;
4263 c->Header.ReplyQueue = 0; /* unused in simple mode */
4264 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
Don Bracef2405db2015-01-23 16:43:09 -06004265 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004266
4267 /* Fill in the request block... */
4268
4269 c->Request.Timeout = 0;
4270 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4271 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4272 c->Request.CDBLen = cmd->cmd_len;
4273 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004274 switch (cmd->sc_data_direction) {
4275 case DMA_TO_DEVICE:
Stephen M. Camerona505b862014-11-14 17:27:04 -06004276 c->Request.type_attr_dir =
4277 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004278 break;
4279 case DMA_FROM_DEVICE:
Stephen M. Camerona505b862014-11-14 17:27:04 -06004280 c->Request.type_attr_dir =
4281 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004282 break;
4283 case DMA_NONE:
Stephen M. Camerona505b862014-11-14 17:27:04 -06004284 c->Request.type_attr_dir =
4285 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004286 break;
4287 case DMA_BIDIRECTIONAL:
4288 /* This can happen if a buggy application does a scsi passthru
4289 * and sets both inlen and outlen to non-zero. ( see
4290 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4291 */
4292
Stephen M. Camerona505b862014-11-14 17:27:04 -06004293 c->Request.type_attr_dir =
4294 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004295 /* This is technically wrong, and hpsa controllers should
4296 * reject it with CMD_INVALID, which is the most correct
4297 * response, but non-fibre backends appear to let it
4298 * slide by, and give the same results as if this field
4299 * were set correctly. Either way is acceptable for
4300 * our purposes here.
4301 */
4302
4303 break;
4304
4305 default:
4306 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4307 cmd->sc_data_direction);
4308 BUG();
4309 break;
4310 }
4311
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06004312 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004313 cmd_free(h, c);
4314 return SCSI_MLQUEUE_HOST_BUSY;
4315 }
4316 enqueue_cmd_and_start_io(h, c);
4317 /* the cmd'll come back via intr handler in complete_scsi_command() */
4318 return 0;
4319}
4320
Don Brace080ef1c2015-01-23 16:43:25 -06004321static void hpsa_command_resubmit_worker(struct work_struct *work)
4322{
4323 struct scsi_cmnd *cmd;
4324 struct hpsa_scsi_dev_t *dev;
4325 struct CommandList *c =
4326 container_of(work, struct CommandList, work);
4327
4328 cmd = c->scsi_cmd;
4329 dev = cmd->device->hostdata;
4330 if (!dev) {
4331 cmd->result = DID_NO_CONNECT << 16;
4332 cmd->scsi_done(cmd);
4333 return;
4334 }
4335 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4336 /*
4337 * If we get here, it means dma mapping failed. Try
4338 * again via scsi mid layer, which will then get
4339 * SCSI_MLQUEUE_HOST_BUSY.
4340 */
4341 cmd->result = DID_IMM_RETRY << 16;
4342 cmd->scsi_done(cmd);
4343 }
4344}
4345
Stephen Cameron574f05d2015-01-23 16:43:20 -06004346/* Running in struct Scsi_Host->host_lock less mode */
4347static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4348{
4349 struct ctlr_info *h;
4350 struct hpsa_scsi_dev_t *dev;
4351 unsigned char scsi3addr[8];
4352 struct CommandList *c;
4353 int rc = 0;
4354
4355 /* Get the ptr to our adapter structure out of cmd->host. */
4356 h = sdev_to_hba(cmd->device);
4357 dev = cmd->device->hostdata;
4358 if (!dev) {
4359 cmd->result = DID_NO_CONNECT << 16;
4360 cmd->scsi_done(cmd);
4361 return 0;
4362 }
4363 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4364
4365 if (unlikely(lockup_detected(h))) {
Webb Scales25163bd2015-04-23 09:32:00 -05004366 cmd->result = DID_NO_CONNECT << 16;
Stephen Cameron574f05d2015-01-23 16:43:20 -06004367 cmd->scsi_done(cmd);
4368 return 0;
4369 }
4370 c = cmd_alloc(h);
4371 if (c == NULL) { /* trouble... */
4372 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4373 return SCSI_MLQUEUE_HOST_BUSY;
4374 }
Stephen Cameron407863c2015-01-23 16:44:19 -06004375 if (unlikely(lockup_detected(h))) {
Webb Scales25163bd2015-04-23 09:32:00 -05004376 cmd->result = DID_NO_CONNECT << 16;
Stephen Cameron407863c2015-01-23 16:44:19 -06004377 cmd_free(h, c);
4378 cmd->scsi_done(cmd);
4379 return 0;
4380 }
Stephen Cameron574f05d2015-01-23 16:43:20 -06004381
Stephen Cameron407863c2015-01-23 16:44:19 -06004382 /*
4383 * Call alternate submit routine for I/O accelerated commands.
Stephen Cameron574f05d2015-01-23 16:43:20 -06004384 * Retries always go down the normal I/O path.
4385 */
4386 if (likely(cmd->retries == 0 &&
4387 cmd->request->cmd_type == REQ_TYPE_FS &&
4388 h->acciopath_status)) {
4389
4390 cmd->host_scribble = (unsigned char *) c;
4391 c->cmd_type = CMD_SCSI;
4392 c->scsi_cmd = cmd;
4393
4394 if (dev->offload_enabled) {
4395 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4396 if (rc == 0)
4397 return 0; /* Sent on ioaccel path */
4398 if (rc < 0) { /* scsi_dma_map failed. */
4399 cmd_free(h, c);
4400 return SCSI_MLQUEUE_HOST_BUSY;
4401 }
4402 } else if (dev->ioaccel_handle) {
4403 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4404 if (rc == 0)
4405 return 0; /* Sent on direct map path */
4406 if (rc < 0) { /* scsi_dma_map failed. */
4407 cmd_free(h, c);
4408 return SCSI_MLQUEUE_HOST_BUSY;
4409 }
4410 }
4411 }
4412 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4413}
4414
Webb Scales8ebc9242015-01-23 16:44:50 -06004415static void hpsa_scan_complete(struct ctlr_info *h)
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004416{
4417 unsigned long flags;
4418
Webb Scales8ebc9242015-01-23 16:44:50 -06004419 spin_lock_irqsave(&h->scan_lock, flags);
4420 h->scan_finished = 1;
4421 wake_up_all(&h->scan_wait_queue);
4422 spin_unlock_irqrestore(&h->scan_lock, flags);
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004423}
4424
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004425static void hpsa_scan_start(struct Scsi_Host *sh)
4426{
4427 struct ctlr_info *h = shost_to_hba(sh);
4428 unsigned long flags;
4429
Webb Scales8ebc9242015-01-23 16:44:50 -06004430 /*
4431 * Don't let rescans be initiated on a controller known to be locked
4432 * up. If the controller locks up *during* a rescan, that thread is
4433 * probably hosed, but at least we can prevent new rescan threads from
4434 * piling up on a locked up controller.
4435 */
4436 if (unlikely(lockup_detected(h)))
4437 return hpsa_scan_complete(h);
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004438
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004439 /* wait until any scan already in progress is finished. */
4440 while (1) {
4441 spin_lock_irqsave(&h->scan_lock, flags);
4442 if (h->scan_finished)
4443 break;
4444 spin_unlock_irqrestore(&h->scan_lock, flags);
4445 wait_event(h->scan_wait_queue, h->scan_finished);
4446 /* Note: We don't need to worry about a race between this
4447 * thread and driver unload because the midlayer will
4448 * have incremented the reference count, so unload won't
4449 * happen if we're in here.
4450 */
4451 }
4452 h->scan_finished = 0; /* mark scan as in progress */
4453 spin_unlock_irqrestore(&h->scan_lock, flags);
4454
Webb Scales8ebc9242015-01-23 16:44:50 -06004455 if (unlikely(lockup_detected(h)))
4456 return hpsa_scan_complete(h);
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004457
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004458 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4459
Webb Scales8ebc9242015-01-23 16:44:50 -06004460 hpsa_scan_complete(h);
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004461}
4462
Don Brace7c0a0222015-01-23 16:41:30 -06004463static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4464{
Don Brace03383732015-01-23 16:43:30 -06004465 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4466
4467 if (!logical_drive)
4468 return -ENODEV;
Don Brace7c0a0222015-01-23 16:41:30 -06004469
4470 if (qdepth < 1)
4471 qdepth = 1;
Don Brace03383732015-01-23 16:43:30 -06004472 else if (qdepth > logical_drive->queue_depth)
4473 qdepth = logical_drive->queue_depth;
4474
4475 return scsi_change_queue_depth(sdev, qdepth);
Don Brace7c0a0222015-01-23 16:41:30 -06004476}
4477
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004478static int hpsa_scan_finished(struct Scsi_Host *sh,
4479 unsigned long elapsed_time)
4480{
4481 struct ctlr_info *h = shost_to_hba(sh);
4482 unsigned long flags;
4483 int finished;
4484
4485 spin_lock_irqsave(&h->scan_lock, flags);
4486 finished = h->scan_finished;
4487 spin_unlock_irqrestore(&h->scan_lock, flags);
4488 return finished;
4489}
4490
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004491static void hpsa_unregister_scsi(struct ctlr_info *h)
4492{
4493 /* we are being forcibly unloaded, and may not refuse. */
4494 scsi_remove_host(h->scsi_host);
4495 scsi_host_put(h->scsi_host);
4496 h->scsi_host = NULL;
4497}
4498
4499static int hpsa_register_scsi(struct ctlr_info *h)
4500{
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004501 struct Scsi_Host *sh;
4502 int error;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004503
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004504 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4505 if (sh == NULL)
4506 goto fail;
4507
4508 sh->io_port = 0;
4509 sh->n_io_port = 0;
4510 sh->this_id = -1;
4511 sh->max_channel = 3;
4512 sh->max_cmd_len = MAX_COMMAND_SIZE;
4513 sh->max_lun = HPSA_MAX_LUN;
4514 sh->max_id = HPSA_MAX_LUN;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05004515 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
Don Brace03383732015-01-23 16:43:30 -06004516 sh->cmd_per_lun = sh->can_queue;
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004517 sh->sg_tablesize = h->maxsgentries;
4518 h->scsi_host = sh;
4519 sh->hostdata[0] = (unsigned long) h;
4520 sh->irq = h->intr[h->intr_mode];
4521 sh->unique_id = sh->irq;
4522 error = scsi_add_host(sh, &h->pdev->dev);
4523 if (error)
4524 goto fail_host_put;
4525 scsi_scan_host(sh);
4526 return 0;
4527
4528 fail_host_put:
4529 dev_err(&h->pdev->dev, "%s: scsi_add_host"
4530 " failed for controller %d\n", __func__, h->ctlr);
4531 scsi_host_put(sh);
4532 return error;
4533 fail:
4534 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4535 " failed for controller %d\n", __func__, h->ctlr);
4536 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004537}
4538
4539static int wait_for_device_to_become_ready(struct ctlr_info *h,
4540 unsigned char lunaddr[])
4541{
Tomas Henzl89193582014-02-21 16:25:05 -06004542 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004543 int count = 0;
4544 int waittime = 1; /* seconds */
4545 struct CommandList *c;
4546
Stephen Cameron45fcb862015-01-23 16:43:04 -06004547 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004548 if (!c) {
4549 dev_warn(&h->pdev->dev, "out of memory in "
4550 "wait_for_device_to_become_ready.\n");
4551 return IO_ERROR;
4552 }
4553
4554 /* Send test unit ready until device ready, or give up. */
4555 while (count < HPSA_TUR_RETRY_LIMIT) {
4556
4557 /* Wait for a bit. do this first, because if we send
4558 * the TUR right away, the reset will just abort it.
4559 */
4560 msleep(1000 * waittime);
4561 count++;
Tomas Henzl89193582014-02-21 16:25:05 -06004562 rc = 0; /* Device ready. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004563
4564 /* Increase wait time with each try, up to a point. */
4565 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4566 waittime = waittime * 2;
4567
Stephen M. Camerona2dac132013-02-20 11:24:41 -06004568 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4569 (void) fill_cmd(c, TEST_UNIT_READY, h,
4570 NULL, 0, 0, lunaddr, TYPE_CMD);
Webb Scales25163bd2015-04-23 09:32:00 -05004571 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
4572 NO_TIMEOUT);
4573 if (rc)
4574 goto do_it_again;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004575 /* no unmap needed here because no data xfer. */
4576
4577 if (c->err_info->CommandStatus == CMD_SUCCESS)
4578 break;
4579
4580 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4581 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4582 (c->err_info->SenseInfo[2] == NO_SENSE ||
4583 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4584 break;
Webb Scales25163bd2015-04-23 09:32:00 -05004585do_it_again:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004586 dev_warn(&h->pdev->dev, "waiting %d secs "
4587 "for device to become ready.\n", waittime);
4588 rc = 1; /* device not ready. */
4589 }
4590
4591 if (rc)
4592 dev_warn(&h->pdev->dev, "giving up on device.\n");
4593 else
4594 dev_warn(&h->pdev->dev, "device is ready.\n");
4595
Stephen Cameron45fcb862015-01-23 16:43:04 -06004596 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004597 return rc;
4598}
4599
4600/* Need at least one of these error handlers to keep ../scsi/hosts.c from
4601 * complaining. Doing a host- or bus-reset can't do anything good here.
4602 */
4603static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4604{
4605 int rc;
4606 struct ctlr_info *h;
4607 struct hpsa_scsi_dev_t *dev;
4608
4609 /* find the controller to which the command to be aborted was sent */
4610 h = sdev_to_hba(scsicmd->device);
4611 if (h == NULL) /* paranoia */
4612 return FAILED;
Don Bracee3458932015-01-23 16:44:24 -06004613
4614 if (lockup_detected(h))
4615 return FAILED;
4616
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004617 dev = scsicmd->device->hostdata;
4618 if (!dev) {
4619 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4620 "device lookup failed.\n");
4621 return FAILED;
4622 }
Webb Scales25163bd2015-04-23 09:32:00 -05004623
4624 /* if controller locked up, we can guarantee command won't complete */
4625 if (lockup_detected(h)) {
4626 dev_warn(&h->pdev->dev,
4627 "scsi %d:%d:%d:%d RESET FAILED, lockup detected\n",
4628 h->scsi_host->host_no, dev->bus, dev->target,
4629 dev->lun);
4630 return FAILED;
4631 }
4632
4633 /* this reset request might be the result of a lockup; check */
4634 if (detect_controller_lockup(h)) {
4635 dev_warn(&h->pdev->dev,
4636 "scsi %d:%d:%d:%d RESET FAILED, new lockup detected\n",
4637 h->scsi_host->host_no, dev->bus, dev->target,
4638 dev->lun);
4639 return FAILED;
4640 }
4641
4642 hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting");
4643
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004644 /* send a reset to the SCSI LUN which the command was sent to */
Webb Scales25163bd2015-04-23 09:32:00 -05004645 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
4646 DEFAULT_REPLY_QUEUE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004647 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4648 return SUCCESS;
4649
Webb Scales25163bd2015-04-23 09:32:00 -05004650 dev_warn(&h->pdev->dev,
4651 "scsi %d:%d:%d:%d reset failed\n",
4652 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004653 return FAILED;
4654}
4655
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004656static void swizzle_abort_tag(u8 *tag)
4657{
4658 u8 original_tag[8];
4659
4660 memcpy(original_tag, tag, 8);
4661 tag[0] = original_tag[3];
4662 tag[1] = original_tag[2];
4663 tag[2] = original_tag[1];
4664 tag[3] = original_tag[0];
4665 tag[4] = original_tag[7];
4666 tag[5] = original_tag[6];
4667 tag[6] = original_tag[5];
4668 tag[7] = original_tag[4];
4669}
4670
Scott Teel17eb87d2014-02-18 13:55:28 -06004671static void hpsa_get_tag(struct ctlr_info *h,
Don Brace2b08b3e2015-01-23 16:41:09 -06004672 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
Scott Teel17eb87d2014-02-18 13:55:28 -06004673{
Don Brace2b08b3e2015-01-23 16:41:09 -06004674 u64 tag;
Scott Teel17eb87d2014-02-18 13:55:28 -06004675 if (c->cmd_type == CMD_IOACCEL1) {
4676 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4677 &h->ioaccel_cmd_pool[c->cmdindex];
Don Brace2b08b3e2015-01-23 16:41:09 -06004678 tag = le64_to_cpu(cm1->tag);
4679 *tagupper = cpu_to_le32(tag >> 32);
4680 *taglower = cpu_to_le32(tag);
Scott Teel54b6e9e2014-02-18 13:56:45 -06004681 return;
Scott Teel17eb87d2014-02-18 13:55:28 -06004682 }
Scott Teel54b6e9e2014-02-18 13:56:45 -06004683 if (c->cmd_type == CMD_IOACCEL2) {
4684 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4685 &h->ioaccel2_cmd_pool[c->cmdindex];
Scott Teeldd0e19f2014-02-18 13:57:31 -06004686 /* upper tag not used in ioaccel2 mode */
4687 memset(tagupper, 0, sizeof(*tagupper));
4688 *taglower = cm2->Tag;
Scott Teel54b6e9e2014-02-18 13:56:45 -06004689 return;
4690 }
Don Brace2b08b3e2015-01-23 16:41:09 -06004691 tag = le64_to_cpu(c->Header.tag);
4692 *tagupper = cpu_to_le32(tag >> 32);
4693 *taglower = cpu_to_le32(tag);
Scott Teel17eb87d2014-02-18 13:55:28 -06004694}
4695
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004696static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05004697 struct CommandList *abort, int reply_queue)
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004698{
4699 int rc = IO_OK;
4700 struct CommandList *c;
4701 struct ErrorInfo *ei;
Don Brace2b08b3e2015-01-23 16:41:09 -06004702 __le32 tagupper, taglower;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004703
Stephen Cameron45fcb862015-01-23 16:43:04 -06004704 c = cmd_alloc(h);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004705 if (c == NULL) { /* trouble... */
Stephen Cameron45fcb862015-01-23 16:43:04 -06004706 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004707 return -ENOMEM;
4708 }
4709
Stephen M. Camerona2dac132013-02-20 11:24:41 -06004710 /* fill_cmd can't fail here, no buffer to map */
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05004711 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
Stephen M. Camerona2dac132013-02-20 11:24:41 -06004712 0, 0, scsi3addr, TYPE_MSG);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05004713 if (h->needs_abort_tags_swizzled)
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004714 swizzle_abort_tag(&c->Request.CDB[4]);
Webb Scales25163bd2015-04-23 09:32:00 -05004715 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
Scott Teel17eb87d2014-02-18 13:55:28 -06004716 hpsa_get_tag(h, abort, &taglower, &tagupper);
Webb Scales25163bd2015-04-23 09:32:00 -05004717 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
Scott Teel17eb87d2014-02-18 13:55:28 -06004718 __func__, tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004719 /* no unmap needed here because no data xfer. */
4720
4721 ei = c->err_info;
4722 switch (ei->CommandStatus) {
4723 case CMD_SUCCESS:
4724 break;
Stephen Cameron9437ac42015-04-23 09:32:16 -05004725 case CMD_TMF_STATUS:
4726 rc = hpsa_evaluate_tmf_status(h, c);
4727 break;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004728 case CMD_UNABORTABLE: /* Very common, don't make noise. */
4729 rc = -1;
4730 break;
4731 default:
4732 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
Scott Teel17eb87d2014-02-18 13:55:28 -06004733 __func__, tagupper, taglower);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06004734 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004735 rc = -1;
4736 break;
4737 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06004738 cmd_free(h, c);
Scott Teeldd0e19f2014-02-18 13:57:31 -06004739 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4740 __func__, tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004741 return rc;
4742}
4743
Scott Teel54b6e9e2014-02-18 13:56:45 -06004744/* ioaccel2 path firmware cannot handle abort task requests.
4745 * Change abort requests to physical target reset, and send to the
4746 * address of the physical disk used for the ioaccel 2 command.
4747 * Return 0 on success (IO_OK)
4748 * -1 on failure
4749 */
4750
4751static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -05004752 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
Scott Teel54b6e9e2014-02-18 13:56:45 -06004753{
4754 int rc = IO_OK;
4755 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4756 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4757 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4758 unsigned char *psa = &phys_scsi3addr[0];
4759
4760 /* Get a pointer to the hpsa logical device. */
Stephen Cameron7fa30302015-01-23 16:44:30 -06004761 scmd = abort->scsi_cmd;
Scott Teel54b6e9e2014-02-18 13:56:45 -06004762 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4763 if (dev == NULL) {
4764 dev_warn(&h->pdev->dev,
4765 "Cannot abort: no device pointer for command.\n");
4766 return -1; /* not abortable */
4767 }
4768
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06004769 if (h->raid_offload_debug > 0)
4770 dev_info(&h->pdev->dev,
Webb Scales0d96ef52015-04-23 09:31:55 -05004771 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06004772 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
Webb Scales0d96ef52015-04-23 09:31:55 -05004773 "Reset as abort",
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06004774 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4775 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4776
Scott Teel54b6e9e2014-02-18 13:56:45 -06004777 if (!dev->offload_enabled) {
4778 dev_warn(&h->pdev->dev,
4779 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4780 return -1; /* not abortable */
4781 }
4782
4783 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4784 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4785 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4786 return -1; /* not abortable */
4787 }
4788
4789 /* send the reset */
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06004790 if (h->raid_offload_debug > 0)
4791 dev_info(&h->pdev->dev,
4792 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4793 psa[0], psa[1], psa[2], psa[3],
4794 psa[4], psa[5], psa[6], psa[7]);
Webb Scales25163bd2015-04-23 09:32:00 -05004795 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
Scott Teel54b6e9e2014-02-18 13:56:45 -06004796 if (rc != 0) {
4797 dev_warn(&h->pdev->dev,
4798 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4799 psa[0], psa[1], psa[2], psa[3],
4800 psa[4], psa[5], psa[6], psa[7]);
4801 return rc; /* failed to reset */
4802 }
4803
4804 /* wait for device to recover */
4805 if (wait_for_device_to_become_ready(h, psa) != 0) {
4806 dev_warn(&h->pdev->dev,
4807 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4808 psa[0], psa[1], psa[2], psa[3],
4809 psa[4], psa[5], psa[6], psa[7]);
4810 return -1; /* failed to recover */
4811 }
4812
4813 /* device recovered */
4814 dev_info(&h->pdev->dev,
4815 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4816 psa[0], psa[1], psa[2], psa[3],
4817 psa[4], psa[5], psa[6], psa[7]);
4818
4819 return rc; /* success */
4820}
4821
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004822static int hpsa_send_abort_both_ways(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -05004823 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004824{
Scott Teel54b6e9e2014-02-18 13:56:45 -06004825 /* ioccelerator mode 2 commands should be aborted via the
4826 * accelerated path, since RAID path is unaware of these commands,
4827 * but underlying firmware can't handle abort TMF.
4828 * Change abort to physical device reset.
4829 */
4830 if (abort->cmd_type == CMD_IOACCEL2)
Webb Scales25163bd2015-04-23 09:32:00 -05004831 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
4832 abort, reply_queue);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05004833 return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
Webb Scales25163bd2015-04-23 09:32:00 -05004834}
4835
4836/* Find out which reply queue a command was meant to return on */
4837static int hpsa_extract_reply_queue(struct ctlr_info *h,
4838 struct CommandList *c)
4839{
4840 if (c->cmd_type == CMD_IOACCEL2)
4841 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
4842 return c->Header.ReplyQueue;
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004843}
4844
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05004845/*
4846 * Limit concurrency of abort commands to prevent
4847 * over-subscription of commands
4848 */
4849static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
4850{
4851#define ABORT_CMD_WAIT_MSECS 5000
4852 return !wait_event_timeout(h->abort_cmd_wait_queue,
4853 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
4854 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
4855}
4856
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004857/* Send an abort for the specified command.
4858 * If the device and controller support it,
4859 * send a task abort request.
4860 */
4861static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4862{
4863
4864 int i, rc;
4865 struct ctlr_info *h;
4866 struct hpsa_scsi_dev_t *dev;
4867 struct CommandList *abort; /* pointer to command to be aborted */
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004868 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
4869 char msg[256]; /* For debug messaging. */
4870 int ml = 0;
Don Brace2b08b3e2015-01-23 16:41:09 -06004871 __le32 tagupper, taglower;
Webb Scales25163bd2015-04-23 09:32:00 -05004872 int refcount, reply_queue;
4873
4874 if (sc == NULL)
4875 return FAILED;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004876
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05004877 if (sc->device == NULL)
4878 return FAILED;
4879
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004880 /* Find the controller of the command to be aborted */
4881 h = sdev_to_hba(sc->device);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05004882 if (h == NULL)
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004883 return FAILED;
4884
Webb Scales25163bd2015-04-23 09:32:00 -05004885 /* Find the device of the command to be aborted */
4886 dev = sc->device->hostdata;
4887 if (!dev) {
4888 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
4889 msg);
Don Bracee3458932015-01-23 16:44:24 -06004890 return FAILED;
Webb Scales25163bd2015-04-23 09:32:00 -05004891 }
4892
4893 /* If controller locked up, we can guarantee command won't complete */
4894 if (lockup_detected(h)) {
4895 hpsa_show_dev_msg(KERN_WARNING, h, dev,
4896 "ABORT FAILED, lockup detected");
4897 return FAILED;
4898 }
4899
4900 /* This is a good time to check if controller lockup has occurred */
4901 if (detect_controller_lockup(h)) {
4902 hpsa_show_dev_msg(KERN_WARNING, h, dev,
4903 "ABORT FAILED, new lockup detected");
4904 return FAILED;
4905 }
Don Bracee3458932015-01-23 16:44:24 -06004906
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004907 /* Check that controller supports some kind of task abort */
4908 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
4909 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
4910 return FAILED;
4911
4912 memset(msg, 0, sizeof(msg));
Webb Scales0d96ef52015-04-23 09:31:55 -05004913 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s",
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004914 h->scsi_host->host_no, sc->device->channel,
Webb Scales0d96ef52015-04-23 09:31:55 -05004915 sc->device->id, sc->device->lun,
4916 "Aborting command");
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004917
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004918 /* Get SCSI command to be aborted */
4919 abort = (struct CommandList *) sc->host_scribble;
4920 if (abort == NULL) {
Webb Scales281a7fd2015-01-23 16:43:35 -06004921 /* This can happen if the command already completed. */
4922 return SUCCESS;
4923 }
4924 refcount = atomic_inc_return(&abort->refcount);
4925 if (refcount == 1) { /* Command is done already. */
4926 cmd_free(h, abort);
4927 return SUCCESS;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004928 }
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05004929
4930 /* Don't bother trying the abort if we know it won't work. */
4931 if (abort->cmd_type != CMD_IOACCEL2 &&
4932 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
4933 cmd_free(h, abort);
4934 return FAILED;
4935 }
4936
Scott Teel17eb87d2014-02-18 13:55:28 -06004937 hpsa_get_tag(h, abort, &taglower, &tagupper);
Webb Scales25163bd2015-04-23 09:32:00 -05004938 reply_queue = hpsa_extract_reply_queue(h, abort);
Scott Teel17eb87d2014-02-18 13:55:28 -06004939 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
Stephen Cameron7fa30302015-01-23 16:44:30 -06004940 as = abort->scsi_cmd;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004941 if (as != NULL)
4942 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
4943 as->cmnd[0], as->serial_number);
4944 dev_dbg(&h->pdev->dev, "%s\n", msg);
Webb Scales0d96ef52015-04-23 09:31:55 -05004945 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004946 /*
4947 * Command is in flight, or possibly already completed
4948 * by the firmware (but not to the scsi mid layer) but we can't
4949 * distinguish which. Send the abort down.
4950 */
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05004951 if (wait_for_available_abort_cmd(h)) {
4952 dev_warn(&h->pdev->dev,
4953 "Timed out waiting for an abort command to become available.\n");
4954 cmd_free(h, abort);
4955 return FAILED;
4956 }
Webb Scales25163bd2015-04-23 09:32:00 -05004957 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05004958 atomic_inc(&h->abort_cmds_available);
4959 wake_up_all(&h->abort_cmd_wait_queue);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004960 if (rc != 0) {
Webb Scales0d96ef52015-04-23 09:31:55 -05004961 hpsa_show_dev_msg(KERN_WARNING, h, dev,
4962 "FAILED to abort command");
Webb Scales281a7fd2015-01-23 16:43:35 -06004963 cmd_free(h, abort);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004964 return FAILED;
4965 }
4966 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
4967
4968 /* If the abort(s) above completed and actually aborted the
4969 * command, then the command to be aborted should already be
4970 * completed. If not, wait around a bit more to see if they
4971 * manage to complete normally.
4972 */
4973#define ABORT_COMPLETE_WAIT_SECS 30
4974 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
Webb Scales281a7fd2015-01-23 16:43:35 -06004975 refcount = atomic_read(&abort->refcount);
4976 if (refcount < 2) {
4977 cmd_free(h, abort);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004978 return SUCCESS;
Webb Scales281a7fd2015-01-23 16:43:35 -06004979 } else {
4980 msleep(100);
4981 }
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004982 }
4983 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4984 msg, ABORT_COMPLETE_WAIT_SECS);
Webb Scales281a7fd2015-01-23 16:43:35 -06004985 cmd_free(h, abort);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004986 return FAILED;
4987}
4988
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004989/*
4990 * For operations that cannot sleep, a command block is allocated at init,
4991 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4992 * which ones are free or in use. Lock must be held when calling this.
4993 * cmd_free() is the complement.
4994 */
Webb Scales281a7fd2015-01-23 16:43:35 -06004995
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004996static struct CommandList *cmd_alloc(struct ctlr_info *h)
4997{
4998 struct CommandList *c;
4999 int i;
5000 union u64bit temp64;
5001 dma_addr_t cmd_dma_handle, err_dma_handle;
Webb Scales281a7fd2015-01-23 16:43:35 -06005002 int refcount;
Robert Elliott33811022015-01-23 16:43:41 -06005003 unsigned long offset;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005004
Robert Elliott33811022015-01-23 16:43:41 -06005005 /*
5006 * There is some *extremely* small but non-zero chance that that
Stephen M. Cameron4c413122014-11-14 17:27:29 -06005007 * multiple threads could get in here, and one thread could
5008 * be scanning through the list of bits looking for a free
5009 * one, but the free ones are always behind him, and other
5010 * threads sneak in behind him and eat them before he can
5011 * get to them, so that while there is always a free one, a
5012 * very unlucky thread might be starved anyway, never able to
5013 * beat the other threads. In reality, this happens so
5014 * infrequently as to be indistinguishable from never.
5015 */
5016
Robert Elliott33811022015-01-23 16:43:41 -06005017 offset = h->last_allocation; /* benignly racy */
Webb Scales281a7fd2015-01-23 16:43:35 -06005018 for (;;) {
5019 i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset);
5020 if (unlikely(i == h->nr_cmds)) {
5021 offset = 0;
5022 continue;
5023 }
5024 c = h->cmd_pool + i;
5025 refcount = atomic_inc_return(&c->refcount);
5026 if (unlikely(refcount > 1)) {
5027 cmd_free(h, c); /* already in use */
5028 offset = (i + 1) % h->nr_cmds;
5029 continue;
5030 }
5031 set_bit(i & (BITS_PER_LONG - 1),
5032 h->cmd_pool_bits + (i / BITS_PER_LONG));
5033 break; /* it's ours now. */
5034 }
Robert Elliott33811022015-01-23 16:43:41 -06005035 h->last_allocation = i; /* benignly racy */
Stephen M. Cameron4c413122014-11-14 17:27:29 -06005036
Webb Scales281a7fd2015-01-23 16:43:35 -06005037 /* Zero out all of commandlist except the last field, refcount */
5038 memset(c, 0, offsetof(struct CommandList, refcount));
5039 c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT));
Don Bracef2405db2015-01-23 16:43:09 -06005040 cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005041 c->err_info = h->errinfo_pool + i;
5042 memset(c->err_info, 0, sizeof(*c->err_info));
5043 err_dma_handle = h->errinfo_pool_dhandle
5044 + i * sizeof(*c->err_info);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005045
5046 c->cmdindex = i;
5047
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06005048 c->busaddr = (u32) cmd_dma_handle;
5049 temp64.val = (u64) err_dma_handle;
Webb Scales281a7fd2015-01-23 16:43:35 -06005050 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5051 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005052
5053 c->h = h;
5054 return c;
5055}
5056
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005057static void cmd_free(struct ctlr_info *h, struct CommandList *c)
5058{
Webb Scales281a7fd2015-01-23 16:43:35 -06005059 if (atomic_dec_and_test(&c->refcount)) {
5060 int i;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005061
Webb Scales281a7fd2015-01-23 16:43:35 -06005062 i = c - h->cmd_pool;
5063 clear_bit(i & (BITS_PER_LONG - 1),
5064 h->cmd_pool_bits + (i / BITS_PER_LONG));
5065 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005066}
5067
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005068#ifdef CONFIG_COMPAT
5069
Don Brace42a91642014-11-14 17:26:27 -06005070static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
5071 void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005072{
5073 IOCTL32_Command_struct __user *arg32 =
5074 (IOCTL32_Command_struct __user *) arg;
5075 IOCTL_Command_struct arg64;
5076 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
5077 int err;
5078 u32 cp;
5079
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06005080 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005081 err = 0;
5082 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5083 sizeof(arg64.LUN_info));
5084 err |= copy_from_user(&arg64.Request, &arg32->Request,
5085 sizeof(arg64.Request));
5086 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5087 sizeof(arg64.error_info));
5088 err |= get_user(arg64.buf_size, &arg32->buf_size);
5089 err |= get_user(cp, &arg32->buf);
5090 arg64.buf = compat_ptr(cp);
5091 err |= copy_to_user(p, &arg64, sizeof(arg64));
5092
5093 if (err)
5094 return -EFAULT;
5095
Don Brace42a91642014-11-14 17:26:27 -06005096 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005097 if (err)
5098 return err;
5099 err |= copy_in_user(&arg32->error_info, &p->error_info,
5100 sizeof(arg32->error_info));
5101 if (err)
5102 return -EFAULT;
5103 return err;
5104}
5105
5106static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
Don Brace42a91642014-11-14 17:26:27 -06005107 int cmd, void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005108{
5109 BIG_IOCTL32_Command_struct __user *arg32 =
5110 (BIG_IOCTL32_Command_struct __user *) arg;
5111 BIG_IOCTL_Command_struct arg64;
5112 BIG_IOCTL_Command_struct __user *p =
5113 compat_alloc_user_space(sizeof(arg64));
5114 int err;
5115 u32 cp;
5116
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06005117 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005118 err = 0;
5119 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5120 sizeof(arg64.LUN_info));
5121 err |= copy_from_user(&arg64.Request, &arg32->Request,
5122 sizeof(arg64.Request));
5123 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5124 sizeof(arg64.error_info));
5125 err |= get_user(arg64.buf_size, &arg32->buf_size);
5126 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
5127 err |= get_user(cp, &arg32->buf);
5128 arg64.buf = compat_ptr(cp);
5129 err |= copy_to_user(p, &arg64, sizeof(arg64));
5130
5131 if (err)
5132 return -EFAULT;
5133
Don Brace42a91642014-11-14 17:26:27 -06005134 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005135 if (err)
5136 return err;
5137 err |= copy_in_user(&arg32->error_info, &p->error_info,
5138 sizeof(arg32->error_info));
5139 if (err)
5140 return -EFAULT;
5141 return err;
5142}
Stephen M. Cameron71fe75a2010-02-04 08:43:51 -06005143
Don Brace42a91642014-11-14 17:26:27 -06005144static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
Stephen M. Cameron71fe75a2010-02-04 08:43:51 -06005145{
5146 switch (cmd) {
5147 case CCISS_GETPCIINFO:
5148 case CCISS_GETINTINFO:
5149 case CCISS_SETINTINFO:
5150 case CCISS_GETNODENAME:
5151 case CCISS_SETNODENAME:
5152 case CCISS_GETHEARTBEAT:
5153 case CCISS_GETBUSTYPES:
5154 case CCISS_GETFIRMVER:
5155 case CCISS_GETDRIVVER:
5156 case CCISS_REVALIDVOLS:
5157 case CCISS_DEREGDISK:
5158 case CCISS_REGNEWDISK:
5159 case CCISS_REGNEWD:
5160 case CCISS_RESCANDISK:
5161 case CCISS_GETLUNINFO:
5162 return hpsa_ioctl(dev, cmd, arg);
5163
5164 case CCISS_PASSTHRU32:
5165 return hpsa_ioctl32_passthru(dev, cmd, arg);
5166 case CCISS_BIG_PASSTHRU32:
5167 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
5168
5169 default:
5170 return -ENOIOCTLCMD;
5171 }
5172}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005173#endif
5174
5175static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
5176{
5177 struct hpsa_pci_info pciinfo;
5178
5179 if (!argp)
5180 return -EINVAL;
5181 pciinfo.domain = pci_domain_nr(h->pdev->bus);
5182 pciinfo.bus = h->pdev->bus->number;
5183 pciinfo.dev_fn = h->pdev->devfn;
5184 pciinfo.board_id = h->board_id;
5185 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
5186 return -EFAULT;
5187 return 0;
5188}
5189
5190static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
5191{
5192 DriverVer_type DriverVer;
5193 unsigned char vmaj, vmin, vsubmin;
5194 int rc;
5195
5196 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
5197 &vmaj, &vmin, &vsubmin);
5198 if (rc != 3) {
5199 dev_info(&h->pdev->dev, "driver version string '%s' "
5200 "unrecognized.", HPSA_DRIVER_VERSION);
5201 vmaj = 0;
5202 vmin = 0;
5203 vsubmin = 0;
5204 }
5205 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
5206 if (!argp)
5207 return -EINVAL;
5208 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
5209 return -EFAULT;
5210 return 0;
5211}
5212
5213static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5214{
5215 IOCTL_Command_struct iocommand;
5216 struct CommandList *c;
5217 char *buff = NULL;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005218 u64 temp64;
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005219 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005220
5221 if (!argp)
5222 return -EINVAL;
5223 if (!capable(CAP_SYS_RAWIO))
5224 return -EPERM;
5225 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
5226 return -EFAULT;
5227 if ((iocommand.buf_size < 1) &&
5228 (iocommand.Request.Type.Direction != XFER_NONE)) {
5229 return -EINVAL;
5230 }
5231 if (iocommand.buf_size > 0) {
5232 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
5233 if (buff == NULL)
5234 return -EFAULT;
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05005235 if (iocommand.Request.Type.Direction & XFER_WRITE) {
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005236 /* Copy the data into the buffer we created */
5237 if (copy_from_user(buff, iocommand.buf,
5238 iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005239 rc = -EFAULT;
5240 goto out_kfree;
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005241 }
5242 } else {
5243 memset(buff, 0, iocommand.buf_size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005244 }
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005245 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06005246 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005247 if (c == NULL) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005248 rc = -ENOMEM;
5249 goto out_kfree;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005250 }
5251 /* Fill in the command type */
5252 c->cmd_type = CMD_IOCTL_PEND;
5253 /* Fill in Command Header */
5254 c->Header.ReplyQueue = 0; /* unused in simple mode */
5255 if (iocommand.buf_size > 0) { /* buffer to fill */
5256 c->Header.SGList = 1;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005257 c->Header.SGTotal = cpu_to_le16(1);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005258 } else { /* no buffers to fill */
5259 c->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005260 c->Header.SGTotal = cpu_to_le16(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005261 }
5262 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005263
5264 /* Fill in Request block */
5265 memcpy(&c->Request, &iocommand.Request,
5266 sizeof(c->Request));
5267
5268 /* Fill in the scatter gather information */
5269 if (iocommand.buf_size > 0) {
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005270 temp64 = pci_map_single(h->pdev, buff,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005271 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005272 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
5273 c->SG[0].Addr = cpu_to_le64(0);
5274 c->SG[0].Len = cpu_to_le32(0);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06005275 rc = -ENOMEM;
5276 goto out;
5277 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005278 c->SG[0].Addr = cpu_to_le64(temp64);
5279 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
5280 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005281 }
Webb Scales25163bd2015-04-23 09:32:00 -05005282 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
Stephen M. Cameronc2dd32e2011-06-03 09:57:29 -05005283 if (iocommand.buf_size > 0)
5284 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005285 check_ioctl_unit_attention(h, c);
Webb Scales25163bd2015-04-23 09:32:00 -05005286 if (rc) {
5287 rc = -EIO;
5288 goto out;
5289 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005290
5291 /* Copy the error information out */
5292 memcpy(&iocommand.error_info, c->err_info,
5293 sizeof(iocommand.error_info));
5294 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005295 rc = -EFAULT;
5296 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005297 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05005298 if ((iocommand.Request.Type.Direction & XFER_READ) &&
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005299 iocommand.buf_size > 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005300 /* Copy the data out of the buffer we created */
5301 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005302 rc = -EFAULT;
5303 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005304 }
5305 }
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005306out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06005307 cmd_free(h, c);
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005308out_kfree:
5309 kfree(buff);
5310 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005311}
5312
5313static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5314{
5315 BIG_IOCTL_Command_struct *ioc;
5316 struct CommandList *c;
5317 unsigned char **buff = NULL;
5318 int *buff_size = NULL;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005319 u64 temp64;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005320 BYTE sg_used = 0;
5321 int status = 0;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06005322 u32 left;
5323 u32 sz;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005324 BYTE __user *data_ptr;
5325
5326 if (!argp)
5327 return -EINVAL;
5328 if (!capable(CAP_SYS_RAWIO))
5329 return -EPERM;
5330 ioc = (BIG_IOCTL_Command_struct *)
5331 kmalloc(sizeof(*ioc), GFP_KERNEL);
5332 if (!ioc) {
5333 status = -ENOMEM;
5334 goto cleanup1;
5335 }
5336 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5337 status = -EFAULT;
5338 goto cleanup1;
5339 }
5340 if ((ioc->buf_size < 1) &&
5341 (ioc->Request.Type.Direction != XFER_NONE)) {
5342 status = -EINVAL;
5343 goto cleanup1;
5344 }
5345 /* Check kmalloc limits using all SGs */
5346 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5347 status = -EINVAL;
5348 goto cleanup1;
5349 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06005350 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005351 status = -EINVAL;
5352 goto cleanup1;
5353 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06005354 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005355 if (!buff) {
5356 status = -ENOMEM;
5357 goto cleanup1;
5358 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06005359 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005360 if (!buff_size) {
5361 status = -ENOMEM;
5362 goto cleanup1;
5363 }
5364 left = ioc->buf_size;
5365 data_ptr = ioc->buf;
5366 while (left) {
5367 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5368 buff_size[sg_used] = sz;
5369 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5370 if (buff[sg_used] == NULL) {
5371 status = -ENOMEM;
5372 goto cleanup1;
5373 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05005374 if (ioc->Request.Type.Direction & XFER_WRITE) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005375 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
Stephen M. Cameron0758f4f2014-07-03 10:18:03 -05005376 status = -EFAULT;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005377 goto cleanup1;
5378 }
5379 } else
5380 memset(buff[sg_used], 0, sz);
5381 left -= sz;
5382 data_ptr += sz;
5383 sg_used++;
5384 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06005385 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005386 if (c == NULL) {
5387 status = -ENOMEM;
5388 goto cleanup1;
5389 }
5390 c->cmd_type = CMD_IOCTL_PEND;
5391 c->Header.ReplyQueue = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005392 c->Header.SGList = (u8) sg_used;
5393 c->Header.SGTotal = cpu_to_le16(sg_used);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005394 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005395 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5396 if (ioc->buf_size > 0) {
5397 int i;
5398 for (i = 0; i < sg_used; i++) {
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005399 temp64 = pci_map_single(h->pdev, buff[i],
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005400 buff_size[i], PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005401 if (dma_mapping_error(&h->pdev->dev,
5402 (dma_addr_t) temp64)) {
5403 c->SG[i].Addr = cpu_to_le64(0);
5404 c->SG[i].Len = cpu_to_le32(0);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06005405 hpsa_pci_unmap(h->pdev, c, i,
5406 PCI_DMA_BIDIRECTIONAL);
5407 status = -ENOMEM;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005408 goto cleanup0;
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06005409 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005410 c->SG[i].Addr = cpu_to_le64(temp64);
5411 c->SG[i].Len = cpu_to_le32(buff_size[i]);
5412 c->SG[i].Ext = cpu_to_le32(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005413 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005414 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005415 }
Webb Scales25163bd2015-04-23 09:32:00 -05005416 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005417 if (sg_used)
5418 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005419 check_ioctl_unit_attention(h, c);
Webb Scales25163bd2015-04-23 09:32:00 -05005420 if (status) {
5421 status = -EIO;
5422 goto cleanup0;
5423 }
5424
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005425 /* Copy the error information out */
5426 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5427 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005428 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005429 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005430 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05005431 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
Don Brace2b08b3e2015-01-23 16:41:09 -06005432 int i;
5433
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005434 /* Copy the data out of the buffer we created */
5435 BYTE __user *ptr = ioc->buf;
5436 for (i = 0; i < sg_used; i++) {
5437 if (copy_to_user(ptr, buff[i], buff_size[i])) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005438 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005439 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005440 }
5441 ptr += buff_size[i];
5442 }
5443 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005444 status = 0;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005445cleanup0:
Stephen Cameron45fcb862015-01-23 16:43:04 -06005446 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005447cleanup1:
5448 if (buff) {
Don Brace2b08b3e2015-01-23 16:41:09 -06005449 int i;
5450
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005451 for (i = 0; i < sg_used; i++)
5452 kfree(buff[i]);
5453 kfree(buff);
5454 }
5455 kfree(buff_size);
5456 kfree(ioc);
5457 return status;
5458}
5459
5460static void check_ioctl_unit_attention(struct ctlr_info *h,
5461 struct CommandList *c)
5462{
5463 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5464 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5465 (void) check_for_unit_attention(h, c);
5466}
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005467
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005468/*
5469 * ioctl
5470 */
Don Brace42a91642014-11-14 17:26:27 -06005471static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005472{
5473 struct ctlr_info *h;
5474 void __user *argp = (void __user *)arg;
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005475 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005476
5477 h = sdev_to_hba(dev);
5478
5479 switch (cmd) {
5480 case CCISS_DEREGDISK:
5481 case CCISS_REGNEWDISK:
5482 case CCISS_REGNEWD:
Stephen M. Camerona08a8472010-02-04 08:43:16 -06005483 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005484 return 0;
5485 case CCISS_GETPCIINFO:
5486 return hpsa_getpciinfo_ioctl(h, argp);
5487 case CCISS_GETDRIVVER:
5488 return hpsa_getdrivver_ioctl(h, argp);
5489 case CCISS_PASSTHRU:
Don Brace34f0c622015-01-23 16:43:46 -06005490 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005491 return -EAGAIN;
5492 rc = hpsa_passthru_ioctl(h, argp);
Don Brace34f0c622015-01-23 16:43:46 -06005493 atomic_inc(&h->passthru_cmds_avail);
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005494 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005495 case CCISS_BIG_PASSTHRU:
Don Brace34f0c622015-01-23 16:43:46 -06005496 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005497 return -EAGAIN;
5498 rc = hpsa_big_passthru_ioctl(h, argp);
Don Brace34f0c622015-01-23 16:43:46 -06005499 atomic_inc(&h->passthru_cmds_avail);
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005500 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005501 default:
5502 return -ENOTTY;
5503 }
5504}
5505
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005506static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5507 u8 reset_type)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005508{
5509 struct CommandList *c;
5510
5511 c = cmd_alloc(h);
5512 if (!c)
5513 return -ENOMEM;
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005514 /* fill_cmd can't fail here, no data buffer to map */
5515 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005516 RAID_CTLR_LUNID, TYPE_MSG);
5517 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
5518 c->waiting = NULL;
5519 enqueue_cmd_and_start_io(h, c);
5520 /* Don't wait for completion, the reset won't complete. Don't free
5521 * the command either. This is the last command we will send before
5522 * re-initializing everything, so it doesn't matter and won't leak.
5523 */
5524 return 0;
5525}
5526
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005527static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06005528 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005529 int cmd_type)
5530{
5531 int pci_dir = XFER_NONE;
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005532 u64 tag; /* for commands to be aborted */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005533
5534 c->cmd_type = CMD_IOCTL_PEND;
5535 c->Header.ReplyQueue = 0;
5536 if (buff != NULL && size > 0) {
5537 c->Header.SGList = 1;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005538 c->Header.SGTotal = cpu_to_le16(1);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005539 } else {
5540 c->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005541 c->Header.SGTotal = cpu_to_le16(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005542 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005543 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5544
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005545 if (cmd_type == TYPE_CMD) {
5546 switch (cmd) {
5547 case HPSA_INQUIRY:
5548 /* are we trying to read a vital product page */
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06005549 if (page_code & VPD_PAGE) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005550 c->Request.CDB[1] = 0x01;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06005551 c->Request.CDB[2] = (page_code & 0xff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005552 }
5553 c->Request.CDBLen = 6;
Stephen M. Camerona505b862014-11-14 17:27:04 -06005554 c->Request.type_attr_dir =
5555 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005556 c->Request.Timeout = 0;
5557 c->Request.CDB[0] = HPSA_INQUIRY;
5558 c->Request.CDB[4] = size & 0xFF;
5559 break;
5560 case HPSA_REPORT_LOG:
5561 case HPSA_REPORT_PHYS:
5562 /* Talking to controller so It's a physical command
5563 mode = 00 target = 0. Nothing to write.
5564 */
5565 c->Request.CDBLen = 12;
Stephen M. Camerona505b862014-11-14 17:27:04 -06005566 c->Request.type_attr_dir =
5567 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005568 c->Request.Timeout = 0;
5569 c->Request.CDB[0] = cmd;
5570 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5571 c->Request.CDB[7] = (size >> 16) & 0xFF;
5572 c->Request.CDB[8] = (size >> 8) & 0xFF;
5573 c->Request.CDB[9] = size & 0xFF;
5574 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005575 case HPSA_CACHE_FLUSH:
5576 c->Request.CDBLen = 12;
Stephen M. Camerona505b862014-11-14 17:27:04 -06005577 c->Request.type_attr_dir =
5578 TYPE_ATTR_DIR(cmd_type,
5579 ATTR_SIMPLE, XFER_WRITE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005580 c->Request.Timeout = 0;
5581 c->Request.CDB[0] = BMIC_WRITE;
5582 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
Stephen M. Cameronbb158ea2011-10-26 16:21:17 -05005583 c->Request.CDB[7] = (size >> 8) & 0xFF;
5584 c->Request.CDB[8] = size & 0xFF;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005585 break;
5586 case TEST_UNIT_READY:
5587 c->Request.CDBLen = 6;
Stephen M. Camerona505b862014-11-14 17:27:04 -06005588 c->Request.type_attr_dir =
5589 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005590 c->Request.Timeout = 0;
5591 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005592 case HPSA_GET_RAID_MAP:
5593 c->Request.CDBLen = 12;
Stephen M. Camerona505b862014-11-14 17:27:04 -06005594 c->Request.type_attr_dir =
5595 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005596 c->Request.Timeout = 0;
5597 c->Request.CDB[0] = HPSA_CISS_READ;
5598 c->Request.CDB[1] = cmd;
5599 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5600 c->Request.CDB[7] = (size >> 16) & 0xFF;
5601 c->Request.CDB[8] = (size >> 8) & 0xFF;
5602 c->Request.CDB[9] = size & 0xFF;
5603 break;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06005604 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5605 c->Request.CDBLen = 10;
Stephen M. Camerona505b862014-11-14 17:27:04 -06005606 c->Request.type_attr_dir =
5607 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameron316b2212014-02-21 16:25:15 -06005608 c->Request.Timeout = 0;
5609 c->Request.CDB[0] = BMIC_READ;
5610 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5611 c->Request.CDB[7] = (size >> 16) & 0xFF;
5612 c->Request.CDB[8] = (size >> 8) & 0xFF;
5613 break;
Don Brace03383732015-01-23 16:43:30 -06005614 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
5615 c->Request.CDBLen = 10;
5616 c->Request.type_attr_dir =
5617 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5618 c->Request.Timeout = 0;
5619 c->Request.CDB[0] = BMIC_READ;
5620 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
5621 c->Request.CDB[7] = (size >> 16) & 0xFF;
5622 c->Request.CDB[8] = (size >> 8) & 0XFF;
5623 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005624 default:
5625 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5626 BUG();
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005627 return -1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005628 }
5629 } else if (cmd_type == TYPE_MSG) {
5630 switch (cmd) {
5631
5632 case HPSA_DEVICE_RESET_MSG:
5633 c->Request.CDBLen = 16;
Stephen M. Camerona505b862014-11-14 17:27:04 -06005634 c->Request.type_attr_dir =
5635 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005636 c->Request.Timeout = 0; /* Don't time out */
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005637 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5638 c->Request.CDB[0] = cmd;
Stephen M. Cameron21e89af2012-07-26 11:34:10 -05005639 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005640 /* If bytes 4-7 are zero, it means reset the */
5641 /* LunID device */
5642 c->Request.CDB[4] = 0x00;
5643 c->Request.CDB[5] = 0x00;
5644 c->Request.CDB[6] = 0x00;
5645 c->Request.CDB[7] = 0x00;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005646 break;
5647 case HPSA_ABORT_MSG:
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005648 memcpy(&tag, buff, sizeof(tag));
Don Brace2b08b3e2015-01-23 16:41:09 -06005649 dev_dbg(&h->pdev->dev,
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005650 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
5651 tag, c->Header.tag);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005652 c->Request.CDBLen = 16;
Stephen M. Camerona505b862014-11-14 17:27:04 -06005653 c->Request.type_attr_dir =
5654 TYPE_ATTR_DIR(cmd_type,
5655 ATTR_SIMPLE, XFER_WRITE);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005656 c->Request.Timeout = 0; /* Don't time out */
5657 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5658 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5659 c->Request.CDB[2] = 0x00; /* reserved */
5660 c->Request.CDB[3] = 0x00; /* reserved */
5661 /* Tag to abort goes in CDB[4]-CDB[11] */
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005662 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005663 c->Request.CDB[12] = 0x00; /* reserved */
5664 c->Request.CDB[13] = 0x00; /* reserved */
5665 c->Request.CDB[14] = 0x00; /* reserved */
5666 c->Request.CDB[15] = 0x00; /* reserved */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005667 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005668 default:
5669 dev_warn(&h->pdev->dev, "unknown message type %d\n",
5670 cmd);
5671 BUG();
5672 }
5673 } else {
5674 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5675 BUG();
5676 }
5677
Stephen M. Camerona505b862014-11-14 17:27:04 -06005678 switch (GET_DIR(c->Request.type_attr_dir)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005679 case XFER_READ:
5680 pci_dir = PCI_DMA_FROMDEVICE;
5681 break;
5682 case XFER_WRITE:
5683 pci_dir = PCI_DMA_TODEVICE;
5684 break;
5685 case XFER_NONE:
5686 pci_dir = PCI_DMA_NONE;
5687 break;
5688 default:
5689 pci_dir = PCI_DMA_BIDIRECTIONAL;
5690 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005691 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5692 return -1;
5693 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005694}
5695
5696/*
5697 * Map (physical) PCI mem into (virtual) kernel space
5698 */
5699static void __iomem *remap_pci_mem(ulong base, ulong size)
5700{
5701 ulong page_base = ((ulong) base) & PAGE_MASK;
5702 ulong page_offs = ((ulong) base) - page_base;
Stephen M. Cameron088ba342012-07-26 11:34:23 -05005703 void __iomem *page_remapped = ioremap_nocache(page_base,
5704 page_offs + size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005705
5706 return page_remapped ? (page_remapped + page_offs) : NULL;
5707}
5708
Matt Gates254f7962012-05-01 11:43:06 -05005709static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005710{
Matt Gates254f7962012-05-01 11:43:06 -05005711 return h->access.command_completed(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005712}
5713
Stephen M. Cameron900c5442010-02-04 08:42:35 -06005714static inline bool interrupt_pending(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005715{
5716 return h->access.intr_pending(h);
5717}
5718
5719static inline long interrupt_not_for_us(struct ctlr_info *h)
5720{
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005721 return (h->access.intr_pending(h) == 0) ||
5722 (h->interrupts_enabled == 0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005723}
5724
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06005725static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5726 u32 raw_tag)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005727{
5728 if (unlikely(tag_index >= h->nr_cmds)) {
5729 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5730 return 1;
5731 }
5732 return 0;
5733}
5734
Stephen M. Cameron5a3d16f2012-05-01 11:42:46 -05005735static inline void finish_cmd(struct CommandList *c)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005736{
Stephen M. Camerone85c5972012-05-01 11:43:42 -05005737 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
Scott Teelc3497752014-02-18 13:56:34 -06005738 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5739 || c->cmd_type == CMD_IOACCEL2))
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05005740 complete_scsi_command(c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005741 else if (c->cmd_type == CMD_IOCTL_PEND)
5742 complete(c->waiting);
Stephen M. Camerona104c992010-02-04 08:42:24 -06005743}
5744
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005745
5746static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
Stephen M. Camerona104c992010-02-04 08:42:24 -06005747{
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005748#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5749#define HPSA_SIMPLE_ERROR_BITS 0x03
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06005750 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005751 return tag & ~HPSA_SIMPLE_ERROR_BITS;
5752 return tag & ~HPSA_PERF_ERROR_BITS;
Stephen M. Camerona104c992010-02-04 08:42:24 -06005753}
5754
Don Brace303932f2010-02-04 08:42:40 -06005755/* process completion of an indexed ("direct lookup") command */
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005756static inline void process_indexed_cmd(struct ctlr_info *h,
Don Brace303932f2010-02-04 08:42:40 -06005757 u32 raw_tag)
5758{
5759 u32 tag_index;
5760 struct CommandList *c;
5761
Don Bracef2405db2015-01-23 16:43:09 -06005762 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005763 if (!bad_tag(h, tag_index, raw_tag)) {
5764 c = h->cmd_pool + tag_index;
5765 finish_cmd(c);
5766 }
Don Brace303932f2010-02-04 08:42:40 -06005767}
5768
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005769/* Some controllers, like p400, will give us one interrupt
5770 * after a soft reset, even if we turned interrupts off.
5771 * Only need to check for this in the hpsa_xxx_discard_completions
5772 * functions.
5773 */
5774static int ignore_bogus_interrupt(struct ctlr_info *h)
5775{
5776 if (likely(!reset_devices))
5777 return 0;
5778
5779 if (likely(h->interrupts_enabled))
5780 return 0;
5781
5782 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5783 "(known firmware bug.) Ignoring.\n");
5784
5785 return 1;
5786}
5787
Matt Gates254f7962012-05-01 11:43:06 -05005788/*
5789 * Convert &h->q[x] (passed to interrupt handlers) back to h.
5790 * Relies on (h-q[x] == x) being true for x such that
5791 * 0 <= x < MAX_REPLY_QUEUES.
5792 */
5793static struct ctlr_info *queue_to_hba(u8 *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005794{
Matt Gates254f7962012-05-01 11:43:06 -05005795 return container_of((queue - *queue), struct ctlr_info, q[0]);
5796}
5797
5798static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5799{
5800 struct ctlr_info *h = queue_to_hba(queue);
5801 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005802 u32 raw_tag;
5803
5804 if (ignore_bogus_interrupt(h))
5805 return IRQ_NONE;
5806
5807 if (interrupt_not_for_us(h))
5808 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005809 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005810 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05005811 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005812 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05005813 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005814 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005815 return IRQ_HANDLED;
5816}
5817
Matt Gates254f7962012-05-01 11:43:06 -05005818static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005819{
Matt Gates254f7962012-05-01 11:43:06 -05005820 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005821 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05005822 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005823
5824 if (ignore_bogus_interrupt(h))
5825 return IRQ_NONE;
5826
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005827 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05005828 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005829 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05005830 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005831 return IRQ_HANDLED;
5832}
5833
Matt Gates254f7962012-05-01 11:43:06 -05005834static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005835{
Matt Gates254f7962012-05-01 11:43:06 -05005836 struct ctlr_info *h = queue_to_hba((u8 *) queue);
Don Brace303932f2010-02-04 08:42:40 -06005837 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05005838 u8 q = *(u8 *) queue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005839
5840 if (interrupt_not_for_us(h))
5841 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005842 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005843 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05005844 raw_tag = get_next_completion(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005845 while (raw_tag != FIFO_EMPTY) {
Don Bracef2405db2015-01-23 16:43:09 -06005846 process_indexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05005847 raw_tag = next_command(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005848 }
5849 }
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005850 return IRQ_HANDLED;
5851}
5852
Matt Gates254f7962012-05-01 11:43:06 -05005853static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005854{
Matt Gates254f7962012-05-01 11:43:06 -05005855 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005856 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05005857 u8 q = *(u8 *) queue;
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005858
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005859 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05005860 raw_tag = get_next_completion(h, q);
Don Brace303932f2010-02-04 08:42:40 -06005861 while (raw_tag != FIFO_EMPTY) {
Don Bracef2405db2015-01-23 16:43:09 -06005862 process_indexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05005863 raw_tag = next_command(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005864 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005865 return IRQ_HANDLED;
5866}
5867
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005868/* Send a message CDB to the firmware. Careful, this only works
5869 * in simple mode, not performant mode due to the tag lookup.
5870 * We only ever use this immediately after a controller reset.
5871 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005872static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5873 unsigned char type)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005874{
5875 struct Command {
5876 struct CommandListHeader CommandHeader;
5877 struct RequestBlock Request;
5878 struct ErrDescriptor ErrorDescriptor;
5879 };
5880 struct Command *cmd;
5881 static const size_t cmd_sz = sizeof(*cmd) +
5882 sizeof(cmd->ErrorDescriptor);
5883 dma_addr_t paddr64;
Don Brace2b08b3e2015-01-23 16:41:09 -06005884 __le32 paddr32;
5885 u32 tag;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005886 void __iomem *vaddr;
5887 int i, err;
5888
5889 vaddr = pci_ioremap_bar(pdev, 0);
5890 if (vaddr == NULL)
5891 return -ENOMEM;
5892
5893 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5894 * CCISS commands, so they must be allocated from the lower 4GiB of
5895 * memory.
5896 */
5897 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5898 if (err) {
5899 iounmap(vaddr);
Robert Elliott1eaec8f2015-01-23 16:42:37 -06005900 return err;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005901 }
5902
5903 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
5904 if (cmd == NULL) {
5905 iounmap(vaddr);
5906 return -ENOMEM;
5907 }
5908
5909 /* This must fit, because of the 32-bit consistent DMA mask. Also,
5910 * although there's no guarantee, we assume that the address is at
5911 * least 4-byte aligned (most likely, it's page-aligned).
5912 */
Don Brace2b08b3e2015-01-23 16:41:09 -06005913 paddr32 = cpu_to_le32(paddr64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005914
5915 cmd->CommandHeader.ReplyQueue = 0;
5916 cmd->CommandHeader.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005917 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
Don Brace2b08b3e2015-01-23 16:41:09 -06005918 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005919 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5920
5921 cmd->Request.CDBLen = 16;
Stephen M. Camerona505b862014-11-14 17:27:04 -06005922 cmd->Request.type_attr_dir =
5923 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005924 cmd->Request.Timeout = 0; /* Don't time out */
5925 cmd->Request.CDB[0] = opcode;
5926 cmd->Request.CDB[1] = type;
5927 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005928 cmd->ErrorDescriptor.Addr =
Don Brace2b08b3e2015-01-23 16:41:09 -06005929 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005930 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005931
Don Brace2b08b3e2015-01-23 16:41:09 -06005932 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005933
5934 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5935 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
Don Brace2b08b3e2015-01-23 16:41:09 -06005936 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005937 break;
5938 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5939 }
5940
5941 iounmap(vaddr);
5942
5943 /* we leak the DMA buffer here ... no choice since the controller could
5944 * still complete the command.
5945 */
5946 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
5947 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
5948 opcode, type);
5949 return -ETIMEDOUT;
5950 }
5951
5952 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
5953
5954 if (tag & HPSA_ERROR_BIT) {
5955 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
5956 opcode, type);
5957 return -EIO;
5958 }
5959
5960 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
5961 opcode, type);
5962 return 0;
5963}
5964
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005965#define hpsa_noop(p) hpsa_message(p, 3, 0)
5966
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005967static int hpsa_controller_hard_reset(struct pci_dev *pdev,
Don Brace42a91642014-11-14 17:26:27 -06005968 void __iomem *vaddr, u32 use_doorbell)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005969{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005970
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005971 if (use_doorbell) {
5972 /* For everything after the P600, the PCI power state method
5973 * of resetting the controller doesn't work, so we have this
5974 * other way using the doorbell register.
5975 */
5976 dev_info(&pdev->dev, "using doorbell to reset controller\n");
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005977 writel(use_doorbell, vaddr + SA5_DOORBELL);
Stephen M. Cameron85009232013-09-23 13:33:36 -05005978
Justin Lindley00701a92014-05-29 10:52:47 -05005979 /* PMC hardware guys tell us we need a 10 second delay after
Stephen M. Cameron85009232013-09-23 13:33:36 -05005980 * doorbell reset and before any attempt to talk to the board
5981 * at all to ensure that this actually works and doesn't fall
5982 * over in some weird corner cases.
5983 */
Justin Lindley00701a92014-05-29 10:52:47 -05005984 msleep(10000);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005985 } else { /* Try to do it the PCI power state way */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005986
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005987 /* Quoting from the Open CISS Specification: "The Power
5988 * Management Control/Status Register (CSR) controls the power
5989 * state of the device. The normal operating state is D0,
5990 * CSR=00h. The software off state is D3, CSR=03h. To reset
5991 * the controller, place the interface device in D3 then to D0,
5992 * this causes a secondary PCI reset which will reset the
5993 * controller." */
5994
Don Brace2662cab2015-01-23 16:41:25 -06005995 int rc = 0;
5996
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005997 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
Don Brace2662cab2015-01-23 16:41:25 -06005998
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005999 /* enter the D3hot power management state */
Don Brace2662cab2015-01-23 16:41:25 -06006000 rc = pci_set_power_state(pdev, PCI_D3hot);
6001 if (rc)
6002 return rc;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006003
6004 msleep(500);
6005
6006 /* enter the D0 power management state */
Don Brace2662cab2015-01-23 16:41:25 -06006007 rc = pci_set_power_state(pdev, PCI_D0);
6008 if (rc)
6009 return rc;
Mike Millerc4853ef2011-10-21 08:19:43 +02006010
6011 /*
6012 * The P600 requires a small delay when changing states.
6013 * Otherwise we may think the board did not reset and we bail.
6014 * This for kdump only and is particular to the P600.
6015 */
6016 msleep(500);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006017 }
6018 return 0;
6019}
6020
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006021static void init_driver_version(char *driver_version, int len)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006022{
6023 memset(driver_version, 0, len);
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06006024 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006025}
6026
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006027static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006028{
6029 char *driver_version;
6030 int i, size = sizeof(cfgtable->driver_version);
6031
6032 driver_version = kmalloc(size, GFP_KERNEL);
6033 if (!driver_version)
6034 return -ENOMEM;
6035
6036 init_driver_version(driver_version, size);
6037 for (i = 0; i < size; i++)
6038 writeb(driver_version[i], &cfgtable->driver_version[i]);
6039 kfree(driver_version);
6040 return 0;
6041}
6042
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006043static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
6044 unsigned char *driver_ver)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006045{
6046 int i;
6047
6048 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
6049 driver_ver[i] = readb(&cfgtable->driver_version[i]);
6050}
6051
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006052static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006053{
6054
6055 char *driver_ver, *old_driver_ver;
6056 int rc, size = sizeof(cfgtable->driver_version);
6057
6058 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
6059 if (!old_driver_ver)
6060 return -ENOMEM;
6061 driver_ver = old_driver_ver + size;
6062
6063 /* After a reset, the 32 bytes of "driver version" in the cfgtable
6064 * should have been changed, otherwise we know the reset failed.
6065 */
6066 init_driver_version(old_driver_ver, size);
6067 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
6068 rc = !memcmp(driver_ver, old_driver_ver, size);
6069 kfree(old_driver_ver);
6070 return rc;
6071}
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006072/* This does a hard reset of the controller using PCI power management
6073 * states or the using the doorbell register.
6074 */
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02006075static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006076{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006077 u64 cfg_offset;
6078 u32 cfg_base_addr;
6079 u64 cfg_base_addr_index;
6080 void __iomem *vaddr;
6081 unsigned long paddr;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006082 u32 misc_fw_support;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006083 int rc;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006084 struct CfgTable __iomem *cfgtable;
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006085 u32 use_doorbell;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006086 u16 command_register;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006087
6088 /* For controllers as old as the P600, this is very nearly
6089 * the same thing as
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006090 *
6091 * pci_save_state(pci_dev);
6092 * pci_set_power_state(pci_dev, PCI_D3hot);
6093 * pci_set_power_state(pci_dev, PCI_D0);
6094 * pci_restore_state(pci_dev);
6095 *
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006096 * For controllers newer than the P600, the pci power state
6097 * method of resetting doesn't work so we have another way
6098 * using the doorbell register.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006099 */
Stephen M. Cameron18867652010-06-16 13:51:45 -05006100
Robert Elliott60f923b2015-01-23 16:42:06 -06006101 if (!ctlr_is_resettable(board_id)) {
6102 dev_warn(&pdev->dev, "Controller not resettable\n");
Stephen M. Cameron25c1e56a2011-01-06 14:48:18 -06006103 return -ENODEV;
6104 }
Stephen M. Cameron46380782011-05-03 15:00:01 -05006105
6106 /* if controller is soft- but not hard resettable... */
6107 if (!ctlr_is_hard_resettable(board_id))
6108 return -ENOTSUPP; /* try soft reset later. */
Stephen M. Cameron18867652010-06-16 13:51:45 -05006109
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006110 /* Save the PCI command register */
6111 pci_read_config_word(pdev, 4, &command_register);
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006112 pci_save_state(pdev);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006113
6114 /* find the first memory BAR, so we can find the cfg table */
6115 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
6116 if (rc)
6117 return rc;
6118 vaddr = remap_pci_mem(paddr, 0x250);
6119 if (!vaddr)
6120 return -ENOMEM;
6121
6122 /* find cfgtable in order to check if reset via doorbell is supported */
6123 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
6124 &cfg_base_addr_index, &cfg_offset);
6125 if (rc)
6126 goto unmap_vaddr;
6127 cfgtable = remap_pci_mem(pci_resource_start(pdev,
6128 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
6129 if (!cfgtable) {
6130 rc = -ENOMEM;
6131 goto unmap_vaddr;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006132 }
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006133 rc = write_driver_ver_to_cfgtable(cfgtable);
6134 if (rc)
Tomas Henzl03741d92015-01-23 16:41:14 -06006135 goto unmap_cfgtable;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006136
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006137 /* If reset via doorbell register is supported, use that.
6138 * There are two such methods. Favor the newest method.
6139 */
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006140 misc_fw_support = readl(&cfgtable->misc_fw_support);
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006141 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6142 if (use_doorbell) {
6143 use_doorbell = DOORBELL_CTLR_RESET2;
6144 } else {
6145 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6146 if (use_doorbell) {
Stephen Cameron050f7142015-01-23 16:42:22 -06006147 dev_warn(&pdev->dev,
6148 "Soft reset not supported. Firmware update is required.\n");
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006149 rc = -ENOTSUPP; /* try soft reset */
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006150 goto unmap_cfgtable;
6151 }
6152 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006153
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006154 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6155 if (rc)
6156 goto unmap_cfgtable;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006157
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006158 pci_restore_state(pdev);
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006159 pci_write_config_word(pdev, 4, command_register);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006160
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006161 /* Some devices (notably the HP Smart Array 5i Controller)
6162 need a little pause here */
6163 msleep(HPSA_POST_RESET_PAUSE_MSECS);
6164
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006165 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6166 if (rc) {
6167 dev_warn(&pdev->dev,
Stephen Cameron050f7142015-01-23 16:42:22 -06006168 "Failed waiting for board to become ready after hard reset\n");
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006169 goto unmap_cfgtable;
6170 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006171
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006172 rc = controller_reset_failed(vaddr);
6173 if (rc < 0)
6174 goto unmap_cfgtable;
6175 if (rc) {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006176 dev_warn(&pdev->dev, "Unable to successfully reset "
6177 "controller. Will try soft reset.\n");
6178 rc = -ENOTSUPP;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006179 } else {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006180 dev_info(&pdev->dev, "board ready after hard reset.\n");
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006181 }
6182
6183unmap_cfgtable:
6184 iounmap(cfgtable);
6185
6186unmap_vaddr:
6187 iounmap(vaddr);
6188 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006189}
6190
6191/*
6192 * We cannot read the structure directly, for portability we must use
6193 * the io functions.
6194 * This is for debug only.
6195 */
Don Brace42a91642014-11-14 17:26:27 -06006196static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006197{
Stephen M. Cameron58f86652010-05-27 15:13:58 -05006198#ifdef HPSA_DEBUG
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006199 int i;
6200 char temp_name[17];
6201
6202 dev_info(dev, "Controller Configuration information\n");
6203 dev_info(dev, "------------------------------------\n");
6204 for (i = 0; i < 4; i++)
6205 temp_name[i] = readb(&(tb->Signature[i]));
6206 temp_name[4] = '\0';
6207 dev_info(dev, " Signature = %s\n", temp_name);
6208 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
6209 dev_info(dev, " Transport methods supported = 0x%x\n",
6210 readl(&(tb->TransportSupport)));
6211 dev_info(dev, " Transport methods active = 0x%x\n",
6212 readl(&(tb->TransportActive)));
6213 dev_info(dev, " Requested transport Method = 0x%x\n",
6214 readl(&(tb->HostWrite.TransportRequest)));
6215 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
6216 readl(&(tb->HostWrite.CoalIntDelay)));
6217 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
6218 readl(&(tb->HostWrite.CoalIntCount)));
Robert Elliott69d6e332015-01-23 16:41:56 -06006219 dev_info(dev, " Max outstanding commands = %d\n",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006220 readl(&(tb->CmdsOutMax)));
6221 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6222 for (i = 0; i < 16; i++)
6223 temp_name[i] = readb(&(tb->ServerName[i]));
6224 temp_name[16] = '\0';
6225 dev_info(dev, " Server Name = %s\n", temp_name);
6226 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6227 readl(&(tb->HeartBeat)));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006228#endif /* HPSA_DEBUG */
Stephen M. Cameron58f86652010-05-27 15:13:58 -05006229}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006230
6231static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6232{
6233 int i, offset, mem_type, bar_type;
6234
6235 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6236 return 0;
6237 offset = 0;
6238 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6239 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6240 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6241 offset += 4;
6242 else {
6243 mem_type = pci_resource_flags(pdev, i) &
6244 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6245 switch (mem_type) {
6246 case PCI_BASE_ADDRESS_MEM_TYPE_32:
6247 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6248 offset += 4; /* 32 bit */
6249 break;
6250 case PCI_BASE_ADDRESS_MEM_TYPE_64:
6251 offset += 8;
6252 break;
6253 default: /* reserved in PCI 2.2 */
6254 dev_warn(&pdev->dev,
6255 "base address is invalid\n");
6256 return -1;
6257 break;
6258 }
6259 }
6260 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6261 return i + 1;
6262 }
6263 return -1;
6264}
6265
6266/* If MSI/MSI-X is supported by the kernel we will try to enable it on
Stephen Cameron050f7142015-01-23 16:42:22 -06006267 * controllers that are capable. If not, we use legacy INTx mode.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006268 */
6269
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006270static void hpsa_interrupt_mode(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006271{
6272#ifdef CONFIG_PCI_MSI
Matt Gates254f7962012-05-01 11:43:06 -05006273 int err, i;
6274 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6275
6276 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6277 hpsa_msix_entries[i].vector = 0;
6278 hpsa_msix_entries[i].entry = i;
6279 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006280
6281 /* Some boards advertise MSI but don't really support it */
Stephen M. Cameron6b3f4c52010-05-27 15:13:02 -05006282 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6283 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006284 goto default_int_mode;
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006285 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06006286 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006287 h->msix_vector = MAX_REPLY_QUEUES;
Stephen M. Cameronf89439b2014-05-29 10:53:02 -05006288 if (h->msix_vector > num_online_cpus())
6289 h->msix_vector = num_online_cpus();
Alexander Gordeev18fce3c2014-08-18 08:01:42 +02006290 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
6291 1, h->msix_vector);
6292 if (err < 0) {
6293 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
6294 h->msix_vector = 0;
6295 goto single_msi_mode;
6296 } else if (err < h->msix_vector) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006297 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006298 "available\n", err);
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006299 }
Alexander Gordeev18fce3c2014-08-18 08:01:42 +02006300 h->msix_vector = err;
6301 for (i = 0; i < h->msix_vector; i++)
6302 h->intr[i] = hpsa_msix_entries[i].vector;
6303 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006304 }
Alexander Gordeev18fce3c2014-08-18 08:01:42 +02006305single_msi_mode:
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006306 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06006307 dev_info(&h->pdev->dev, "MSI capable controller\n");
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006308 if (!pci_enable_msi(h->pdev))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006309 h->msi_vector = 1;
6310 else
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006311 dev_warn(&h->pdev->dev, "MSI init failed\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006312 }
6313default_int_mode:
6314#endif /* CONFIG_PCI_MSI */
6315 /* if we get here we're going to use the default interrupt mode */
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006316 h->intr[h->intr_mode] = h->pdev->irq;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006317}
6318
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006319static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006320{
6321 int i;
6322 u32 subsystem_vendor_id, subsystem_device_id;
6323
6324 subsystem_vendor_id = pdev->subsystem_vendor;
6325 subsystem_device_id = pdev->subsystem_device;
6326 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6327 subsystem_vendor_id;
6328
6329 for (i = 0; i < ARRAY_SIZE(products); i++)
6330 if (*board_id == products[i].board_id)
6331 return i;
6332
Stephen M. Cameron6798cc02010-06-16 13:51:20 -05006333 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6334 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6335 !hpsa_allow_any) {
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006336 dev_warn(&pdev->dev, "unrecognized board ID: "
6337 "0x%08x, ignoring.\n", *board_id);
6338 return -ENODEV;
6339 }
6340 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6341}
6342
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006343static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6344 unsigned long *memory_bar)
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006345{
6346 int i;
6347
6348 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006349 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006350 /* addressing mode bits already removed */
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006351 *memory_bar = pci_resource_start(pdev, i);
6352 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006353 *memory_bar);
6354 return 0;
6355 }
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006356 dev_warn(&pdev->dev, "no memory BAR found\n");
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006357 return -ENODEV;
6358}
6359
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006360static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6361 int wait_for_ready)
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006362{
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006363 int i, iterations;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006364 u32 scratchpad;
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006365 if (wait_for_ready)
6366 iterations = HPSA_BOARD_READY_ITERATIONS;
6367 else
6368 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006369
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006370 for (i = 0; i < iterations; i++) {
6371 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6372 if (wait_for_ready) {
6373 if (scratchpad == HPSA_FIRMWARE_READY)
6374 return 0;
6375 } else {
6376 if (scratchpad != HPSA_FIRMWARE_READY)
6377 return 0;
6378 }
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006379 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6380 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006381 dev_warn(&pdev->dev, "board not ready, timed out.\n");
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006382 return -ENODEV;
6383}
6384
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006385static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6386 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6387 u64 *cfg_offset)
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006388{
6389 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6390 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6391 *cfg_base_addr &= (u32) 0x0000ffff;
6392 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6393 if (*cfg_base_addr_index == -1) {
6394 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6395 return -ENODEV;
6396 }
6397 return 0;
6398}
6399
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006400static int hpsa_find_cfgtables(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006401{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06006402 u64 cfg_offset;
6403 u32 cfg_base_addr;
6404 u64 cfg_base_addr_index;
Don Brace303932f2010-02-04 08:42:40 -06006405 u32 trans_offset;
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006406 int rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006407
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006408 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6409 &cfg_base_addr_index, &cfg_offset);
6410 if (rc)
6411 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006412 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006413 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
Robert Elliottcd3c81c2015-01-23 16:42:27 -06006414 if (!h->cfgtable) {
6415 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006416 return -ENOMEM;
Robert Elliottcd3c81c2015-01-23 16:42:27 -06006417 }
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006418 rc = write_driver_ver_to_cfgtable(h->cfgtable);
6419 if (rc)
6420 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006421 /* Find performant mode table. */
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006422 trans_offset = readl(&h->cfgtable->TransMethodOffset);
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006423 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6424 cfg_base_addr_index)+cfg_offset+trans_offset,
6425 sizeof(*h->transtable));
6426 if (!h->transtable)
6427 return -ENOMEM;
6428 return 0;
6429}
6430
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006431static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05006432{
Stephen Cameron41ce4c32015-04-23 09:31:47 -05006433#define MIN_MAX_COMMANDS 16
6434 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
6435
6436 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
Stephen M. Cameron72ceeae2011-01-06 14:48:13 -06006437
6438 /* Limit commands in memory limited kdump scenario. */
6439 if (reset_devices && h->max_commands > 32)
6440 h->max_commands = 32;
6441
Stephen Cameron41ce4c32015-04-23 09:31:47 -05006442 if (h->max_commands < MIN_MAX_COMMANDS) {
6443 dev_warn(&h->pdev->dev,
6444 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
6445 h->max_commands,
6446 MIN_MAX_COMMANDS);
6447 h->max_commands = MIN_MAX_COMMANDS;
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05006448 }
6449}
6450
Webb Scalesc7ee65b2015-01-23 16:42:17 -06006451/* If the controller reports that the total max sg entries is greater than 512,
6452 * then we know that chained SG blocks work. (Original smart arrays did not
6453 * support chained SG blocks and would return zero for max sg entries.)
6454 */
6455static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
6456{
6457 return h->maxsgentries > 512;
6458}
6459
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006460/* Interrogate the hardware for some limits:
6461 * max commands, max SG elements without chaining, and with chaining,
6462 * SG chain block size, etc.
6463 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006464static void hpsa_find_board_params(struct ctlr_info *h)
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006465{
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05006466 hpsa_get_max_perf_mode_cmds(h);
Stephen Cameron45fcb862015-01-23 16:43:04 -06006467 h->nr_cmds = h->max_commands;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006468 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006469 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
Webb Scalesc7ee65b2015-01-23 16:42:17 -06006470 if (hpsa_supports_chained_sg_blocks(h)) {
6471 /* Limit in-command s/g elements to 32 save dma'able memory. */
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006472 h->max_cmd_sg_entries = 32;
Webb Scales1a63ea62014-11-14 17:26:43 -06006473 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006474 h->maxsgentries--; /* save one for chain pointer */
6475 } else {
Webb Scalesc7ee65b2015-01-23 16:42:17 -06006476 /*
6477 * Original smart arrays supported at most 31 s/g entries
6478 * embedded inline in the command (trying to use more
6479 * would lock up the controller)
6480 */
6481 h->max_cmd_sg_entries = 31;
Webb Scales1a63ea62014-11-14 17:26:43 -06006482 h->maxsgentries = 31; /* default to traditional values */
Webb Scalesc7ee65b2015-01-23 16:42:17 -06006483 h->chainsize = 0;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006484 }
Stephen M. Cameron75167d22012-05-01 11:42:51 -05006485
6486 /* Find out what task management functions are supported and cache */
6487 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
Scott Teel0e7a7fc2014-02-18 13:55:59 -06006488 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6489 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6490 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6491 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006492}
6493
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05006494static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
6495{
Akinobu Mita0fc9fd42012-04-04 22:14:59 +09006496 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06006497 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05006498 return false;
6499 }
6500 return true;
6501}
6502
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006503static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05006504{
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006505 u32 driver_support;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05006506
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006507 driver_support = readl(&(h->cfgtable->driver_support));
Arnd Bergmann0b9e7b72014-06-26 15:44:52 +02006508 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6509#ifdef CONFIG_X86
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006510 driver_support |= ENABLE_SCSI_PREFETCH;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05006511#endif
Stephen M. Cameron28e13442013-12-04 17:10:21 -06006512 driver_support |= ENABLE_UNIT_ATTN;
6513 writel(driver_support, &(h->cfgtable->driver_support));
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05006514}
6515
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05006516/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
6517 * in a prefetch beyond physical memory.
6518 */
6519static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
6520{
6521 u32 dma_prefetch;
6522
6523 if (h->board_id != 0x3225103C)
6524 return;
6525 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
6526 dma_prefetch |= 0x8000;
6527 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
6528}
6529
Robert Elliottc706a792015-01-23 16:45:01 -06006530static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006531{
6532 int i;
6533 u32 doorbell_value;
6534 unsigned long flags;
6535 /* wait until the clear_event_notify bit 6 is cleared by controller. */
Robert Elliott007e7aa2015-01-23 16:44:56 -06006536 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006537 spin_lock_irqsave(&h->lock, flags);
6538 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6539 spin_unlock_irqrestore(&h->lock, flags);
6540 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
Robert Elliottc706a792015-01-23 16:45:01 -06006541 goto done;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006542 /* delay and try again */
Robert Elliott007e7aa2015-01-23 16:44:56 -06006543 msleep(CLEAR_EVENT_WAIT_INTERVAL);
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006544 }
Robert Elliottc706a792015-01-23 16:45:01 -06006545 return -ENODEV;
6546done:
6547 return 0;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006548}
6549
Robert Elliottc706a792015-01-23 16:45:01 -06006550static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006551{
6552 int i;
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06006553 u32 doorbell_value;
6554 unsigned long flags;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006555
6556 /* under certain very rare conditions, this can take awhile.
6557 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6558 * as we enter this code.)
6559 */
Robert Elliott007e7aa2015-01-23 16:44:56 -06006560 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
Webb Scales25163bd2015-04-23 09:32:00 -05006561 if (h->remove_in_progress)
6562 goto done;
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06006563 spin_lock_irqsave(&h->lock, flags);
6564 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6565 spin_unlock_irqrestore(&h->lock, flags);
Dan Carpenter382be662011-02-15 15:33:13 -06006566 if (!(doorbell_value & CFGTBL_ChangeReq))
Robert Elliottc706a792015-01-23 16:45:01 -06006567 goto done;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006568 /* delay and try again */
Robert Elliott007e7aa2015-01-23 16:44:56 -06006569 msleep(MODE_CHANGE_WAIT_INTERVAL);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006570 }
Robert Elliottc706a792015-01-23 16:45:01 -06006571 return -ENODEV;
6572done:
6573 return 0;
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05006574}
6575
Robert Elliottc706a792015-01-23 16:45:01 -06006576/* return -ENODEV or other reason on error, 0 on success */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006577static int hpsa_enter_simple_mode(struct ctlr_info *h)
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05006578{
6579 u32 trans_support;
6580
6581 trans_support = readl(&(h->cfgtable->TransportSupport));
6582 if (!(trans_support & SIMPLE_MODE))
6583 return -ENOTSUPP;
6584
6585 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006586
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05006587 /* Update the field, and then ring the doorbell */
6588 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06006589 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05006590 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Robert Elliottc706a792015-01-23 16:45:01 -06006591 if (hpsa_wait_for_mode_change_ack(h))
6592 goto error;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006593 print_cfg_table(&h->pdev->dev, h->cfgtable);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006594 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6595 goto error;
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06006596 h->transMethod = CFGTBL_Trans_Simple;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006597 return 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006598error:
Stephen Cameron050f7142015-01-23 16:42:22 -06006599 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006600 return -ENODEV;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006601}
6602
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006603static int hpsa_pci_init(struct ctlr_info *h)
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006604{
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006605 int prod_index, err;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006606
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006607 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6608 if (prod_index < 0)
Robert Elliott60f923b2015-01-23 16:42:06 -06006609 return prod_index;
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006610 h->product_name = products[prod_index].product_name;
6611 h->access = *(products[prod_index].access);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006612
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05006613 h->needs_abort_tags_swizzled =
6614 ctlr_needs_abort_tags_swizzled(h->board_id);
6615
Matthew Garrette5a44df2011-11-11 11:14:23 -05006616 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6617 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6618
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006619 err = pci_enable_device(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006620 if (err) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006621 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006622 return err;
6623 }
6624
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06006625 err = pci_request_regions(h->pdev, HPSA);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006626 if (err) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006627 dev_err(&h->pdev->dev,
6628 "cannot obtain PCI resources, aborting\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006629 return err;
6630 }
Robert Elliott4fa604e2014-11-14 17:27:24 -06006631
6632 pci_set_master(h->pdev);
6633
Stephen M. Cameron6b3f4c52010-05-27 15:13:02 -05006634 hpsa_interrupt_mode(h);
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006635 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006636 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006637 goto err_out_free_res;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006638 h->vaddr = remap_pci_mem(h->paddr, 0x250);
Stephen M. Cameron204892e2010-05-27 15:13:22 -05006639 if (!h->vaddr) {
6640 err = -ENOMEM;
6641 goto err_out_free_res;
6642 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006643 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006644 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006645 goto err_out_free_res;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006646 err = hpsa_find_cfgtables(h);
6647 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006648 goto err_out_free_res;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006649 hpsa_find_board_params(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006650
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05006651 if (!hpsa_CISS_signature_present(h)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006652 err = -ENODEV;
6653 goto err_out_free_res;
6654 }
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006655 hpsa_set_driver_support_bits(h);
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05006656 hpsa_p600_dma_prefetch_quirk(h);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006657 err = hpsa_enter_simple_mode(h);
6658 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006659 goto err_out_free_res;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006660 return 0;
6661
6662err_out_free_res:
Stephen M. Cameron204892e2010-05-27 15:13:22 -05006663 if (h->transtable)
6664 iounmap(h->transtable);
6665 if (h->cfgtable)
6666 iounmap(h->cfgtable);
6667 if (h->vaddr)
6668 iounmap(h->vaddr);
Stephen M. Cameronf0bd0b682012-05-01 11:42:09 -05006669 pci_disable_device(h->pdev);
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006670 pci_release_regions(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006671 return err;
6672}
6673
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006674static void hpsa_hba_inquiry(struct ctlr_info *h)
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06006675{
6676 int rc;
6677
6678#define HBA_INQUIRY_BYTE_COUNT 64
6679 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6680 if (!h->hba_inquiry_data)
6681 return;
6682 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6683 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6684 if (rc != 0) {
6685 kfree(h->hba_inquiry_data);
6686 h->hba_inquiry_data = NULL;
6687 }
6688}
6689
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02006690static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006691{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006692 int rc, i;
Tomas Henzl3b747292015-01-23 16:41:20 -06006693 void __iomem *vaddr;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006694
6695 if (!reset_devices)
6696 return 0;
6697
Tomas Henzl132aa222014-08-14 16:12:39 +02006698 /* kdump kernel is loading, we don't know in which state is
6699 * the pci interface. The dev->enable_cnt is equal zero
6700 * so we call enable+disable, wait a while and switch it on.
6701 */
6702 rc = pci_enable_device(pdev);
6703 if (rc) {
6704 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
6705 return -ENODEV;
6706 }
6707 pci_disable_device(pdev);
6708 msleep(260); /* a randomly chosen number */
6709 rc = pci_enable_device(pdev);
6710 if (rc) {
6711 dev_warn(&pdev->dev, "failed to enable device.\n");
6712 return -ENODEV;
6713 }
Robert Elliott4fa604e2014-11-14 17:27:24 -06006714
Tomas Henzl859c75a2014-09-12 14:44:15 +02006715 pci_set_master(pdev);
Robert Elliott4fa604e2014-11-14 17:27:24 -06006716
Tomas Henzl3b747292015-01-23 16:41:20 -06006717 vaddr = pci_ioremap_bar(pdev, 0);
6718 if (vaddr == NULL) {
6719 rc = -ENOMEM;
6720 goto out_disable;
6721 }
6722 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
6723 iounmap(vaddr);
6724
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006725 /* Reset the controller with a PCI power-cycle or via doorbell */
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02006726 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006727
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006728 /* -ENOTSUPP here means we cannot reset the controller
6729 * but it's already (and still) up and running in
Stephen M. Cameron18867652010-06-16 13:51:45 -05006730 * "performant mode". Or, it might be 640x, which can't reset
6731 * due to concerns about shared bbwc between 6402/6404 pair.
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006732 */
Robert Elliottadf1b3a2015-01-23 16:42:01 -06006733 if (rc)
Tomas Henzl132aa222014-08-14 16:12:39 +02006734 goto out_disable;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006735
6736 /* Now try to get the controller to respond to a no-op */
Robert Elliott1ba66c92015-01-23 16:42:11 -06006737 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006738 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6739 if (hpsa_noop(pdev) == 0)
6740 break;
6741 else
6742 dev_warn(&pdev->dev, "no-op failed%s\n",
6743 (i < 11 ? "; re-trying" : ""));
6744 }
Tomas Henzl132aa222014-08-14 16:12:39 +02006745
6746out_disable:
6747
6748 pci_disable_device(pdev);
6749 return rc;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006750}
6751
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006752static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006753{
6754 h->cmd_pool_bits = kzalloc(
6755 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6756 sizeof(unsigned long), GFP_KERNEL);
6757 h->cmd_pool = pci_alloc_consistent(h->pdev,
6758 h->nr_cmds * sizeof(*h->cmd_pool),
6759 &(h->cmd_pool_dhandle));
6760 h->errinfo_pool = pci_alloc_consistent(h->pdev,
6761 h->nr_cmds * sizeof(*h->errinfo_pool),
6762 &(h->errinfo_pool_dhandle));
6763 if ((h->cmd_pool_bits == NULL)
6764 || (h->cmd_pool == NULL)
6765 || (h->errinfo_pool == NULL)) {
6766 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
Robert Elliott2c143342015-01-23 16:42:48 -06006767 goto clean_up;
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006768 }
6769 return 0;
Robert Elliott2c143342015-01-23 16:42:48 -06006770clean_up:
6771 hpsa_free_cmd_pool(h);
6772 return -ENOMEM;
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006773}
6774
6775static void hpsa_free_cmd_pool(struct ctlr_info *h)
6776{
6777 kfree(h->cmd_pool_bits);
6778 if (h->cmd_pool)
6779 pci_free_consistent(h->pdev,
6780 h->nr_cmds * sizeof(struct CommandList),
6781 h->cmd_pool, h->cmd_pool_dhandle);
Stephen M. Cameronaca90122014-02-18 13:56:14 -06006782 if (h->ioaccel2_cmd_pool)
6783 pci_free_consistent(h->pdev,
6784 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6785 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006786 if (h->errinfo_pool)
6787 pci_free_consistent(h->pdev,
6788 h->nr_cmds * sizeof(struct ErrorInfo),
6789 h->errinfo_pool,
6790 h->errinfo_pool_dhandle);
Matt Gatese1f7de02014-02-18 13:55:17 -06006791 if (h->ioaccel_cmd_pool)
6792 pci_free_consistent(h->pdev,
6793 h->nr_cmds * sizeof(struct io_accel1_cmd),
6794 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006795}
6796
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05006797static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6798{
Fabian Frederickec429952015-01-23 16:41:46 -06006799 int i, cpu;
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05006800
6801 cpu = cpumask_first(cpu_online_mask);
6802 for (i = 0; i < h->msix_vector; i++) {
Fabian Frederickec429952015-01-23 16:41:46 -06006803 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05006804 cpu = cpumask_next(cpu, cpu_online_mask);
6805 }
6806}
6807
Robert Elliottec501a12015-01-23 16:41:40 -06006808/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
6809static void hpsa_free_irqs(struct ctlr_info *h)
6810{
6811 int i;
6812
6813 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6814 /* Single reply queue, only one irq to free */
6815 i = h->intr_mode;
6816 irq_set_affinity_hint(h->intr[i], NULL);
6817 free_irq(h->intr[i], &h->q[i]);
6818 return;
6819 }
6820
6821 for (i = 0; i < h->msix_vector; i++) {
6822 irq_set_affinity_hint(h->intr[i], NULL);
6823 free_irq(h->intr[i], &h->q[i]);
6824 }
Robert Elliotta4e17fc2015-01-23 16:41:51 -06006825 for (; i < MAX_REPLY_QUEUES; i++)
6826 h->q[i] = 0;
Robert Elliottec501a12015-01-23 16:41:40 -06006827}
6828
Robert Elliott9ee61792015-01-23 16:42:32 -06006829/* returns 0 on success; cleans up and returns -Enn on error */
6830static int hpsa_request_irqs(struct ctlr_info *h,
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05006831 irqreturn_t (*msixhandler)(int, void *),
6832 irqreturn_t (*intxhandler)(int, void *))
6833{
Matt Gates254f7962012-05-01 11:43:06 -05006834 int rc, i;
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05006835
Matt Gates254f7962012-05-01 11:43:06 -05006836 /*
6837 * initialize h->q[x] = x so that interrupt handlers know which
6838 * queue to process.
6839 */
6840 for (i = 0; i < MAX_REPLY_QUEUES; i++)
6841 h->q[i] = (u8) i;
6842
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006843 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
Matt Gates254f7962012-05-01 11:43:06 -05006844 /* If performant mode and MSI-X, use multiple reply queues */
Robert Elliotta4e17fc2015-01-23 16:41:51 -06006845 for (i = 0; i < h->msix_vector; i++) {
Matt Gates254f7962012-05-01 11:43:06 -05006846 rc = request_irq(h->intr[i], msixhandler,
6847 0, h->devname,
6848 &h->q[i]);
Robert Elliotta4e17fc2015-01-23 16:41:51 -06006849 if (rc) {
6850 int j;
6851
6852 dev_err(&h->pdev->dev,
6853 "failed to get irq %d for %s\n",
6854 h->intr[i], h->devname);
6855 for (j = 0; j < i; j++) {
6856 free_irq(h->intr[j], &h->q[j]);
6857 h->q[j] = 0;
6858 }
6859 for (; j < MAX_REPLY_QUEUES; j++)
6860 h->q[j] = 0;
6861 return rc;
6862 }
6863 }
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05006864 hpsa_irq_affinity_hints(h);
Matt Gates254f7962012-05-01 11:43:06 -05006865 } else {
6866 /* Use single reply pool */
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006867 if (h->msix_vector > 0 || h->msi_vector) {
Matt Gates254f7962012-05-01 11:43:06 -05006868 rc = request_irq(h->intr[h->intr_mode],
6869 msixhandler, 0, h->devname,
6870 &h->q[h->intr_mode]);
6871 } else {
6872 rc = request_irq(h->intr[h->intr_mode],
6873 intxhandler, IRQF_SHARED, h->devname,
6874 &h->q[h->intr_mode]);
6875 }
6876 }
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05006877 if (rc) {
6878 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
6879 h->intr[h->intr_mode], h->devname);
6880 return -ENODEV;
6881 }
6882 return 0;
6883}
6884
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006885static int hpsa_kdump_soft_reset(struct ctlr_info *h)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006886{
6887 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
6888 HPSA_RESET_TYPE_CONTROLLER)) {
6889 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6890 return -EIO;
6891 }
6892
6893 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6894 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
6895 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6896 return -1;
6897 }
6898
6899 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6900 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
6901 dev_warn(&h->pdev->dev, "Board failed to become ready "
6902 "after soft reset.\n");
6903 return -1;
6904 }
6905
6906 return 0;
6907}
6908
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05006909static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006910{
Robert Elliottec501a12015-01-23 16:41:40 -06006911 hpsa_free_irqs(h);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006912#ifdef CONFIG_PCI_MSI
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05006913 if (h->msix_vector) {
6914 if (h->pdev->msix_enabled)
6915 pci_disable_msix(h->pdev);
6916 } else if (h->msi_vector) {
6917 if (h->pdev->msi_enabled)
6918 pci_disable_msi(h->pdev);
6919 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006920#endif /* CONFIG_PCI_MSI */
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05006921}
6922
Stephen M. Cameron072b0512014-05-29 10:53:07 -05006923static void hpsa_free_reply_queues(struct ctlr_info *h)
6924{
6925 int i;
6926
6927 for (i = 0; i < h->nreply_queues; i++) {
6928 if (!h->reply_queue[i].head)
6929 continue;
6930 pci_free_consistent(h->pdev, h->reply_queue_size,
6931 h->reply_queue[i].head, h->reply_queue[i].busaddr);
6932 h->reply_queue[i].head = NULL;
6933 h->reply_queue[i].busaddr = 0;
6934 }
6935}
6936
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05006937static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6938{
6939 hpsa_free_irqs_and_disable_msix(h);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006940 hpsa_free_sg_chain_blocks(h);
6941 hpsa_free_cmd_pool(h);
Matt Gatese1f7de02014-02-18 13:55:17 -06006942 kfree(h->ioaccel1_blockFetchTable);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006943 kfree(h->blockFetchTable);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05006944 hpsa_free_reply_queues(h);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006945 if (h->vaddr)
6946 iounmap(h->vaddr);
6947 if (h->transtable)
6948 iounmap(h->transtable);
6949 if (h->cfgtable)
6950 iounmap(h->cfgtable);
Tomas Henzl132aa222014-08-14 16:12:39 +02006951 pci_disable_device(h->pdev);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006952 pci_release_regions(h->pdev);
6953 kfree(h);
6954}
6955
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006956/* Called when controller lockup detected. */
Don Bracef2405db2015-01-23 16:43:09 -06006957static void fail_all_outstanding_cmds(struct ctlr_info *h)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006958{
Webb Scales281a7fd2015-01-23 16:43:35 -06006959 int i, refcount;
6960 struct CommandList *c;
Webb Scales25163bd2015-04-23 09:32:00 -05006961 int failcount = 0;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006962
Don Brace080ef1c2015-01-23 16:43:25 -06006963 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
Don Bracef2405db2015-01-23 16:43:09 -06006964 for (i = 0; i < h->nr_cmds; i++) {
Don Bracef2405db2015-01-23 16:43:09 -06006965 c = h->cmd_pool + i;
Webb Scales281a7fd2015-01-23 16:43:35 -06006966 refcount = atomic_inc_return(&c->refcount);
6967 if (refcount > 1) {
Webb Scales25163bd2015-04-23 09:32:00 -05006968 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
Webb Scales281a7fd2015-01-23 16:43:35 -06006969 finish_cmd(c);
Stephen Cameron433b5f42015-04-23 09:32:11 -05006970 atomic_dec(&h->commands_outstanding);
Webb Scales25163bd2015-04-23 09:32:00 -05006971 failcount++;
Webb Scales281a7fd2015-01-23 16:43:35 -06006972 }
6973 cmd_free(h, c);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006974 }
Webb Scales25163bd2015-04-23 09:32:00 -05006975 dev_warn(&h->pdev->dev,
6976 "failed %d commands in fail_all\n", failcount);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006977}
6978
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006979static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6980{
Rusty Russellc8ed0012015-03-05 10:49:19 +10306981 int cpu;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006982
Rusty Russellc8ed0012015-03-05 10:49:19 +10306983 for_each_online_cpu(cpu) {
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006984 u32 *lockup_detected;
6985 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6986 *lockup_detected = value;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006987 }
6988 wmb(); /* be sure the per-cpu variables are out to memory */
6989}
6990
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006991static void controller_lockup_detected(struct ctlr_info *h)
6992{
6993 unsigned long flags;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006994 u32 lockup_detected;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006995
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006996 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6997 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006998 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6999 if (!lockup_detected) {
7000 /* no heartbeat, but controller gave us a zero. */
7001 dev_warn(&h->pdev->dev,
Webb Scales25163bd2015-04-23 09:32:00 -05007002 "lockup detected after %d but scratchpad register is zero\n",
7003 h->heartbeat_sample_interval / HZ);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007004 lockup_detected = 0xffffffff;
7005 }
7006 set_lockup_detected_for_all_cpus(h, lockup_detected);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007007 spin_unlock_irqrestore(&h->lock, flags);
Webb Scales25163bd2015-04-23 09:32:00 -05007008 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
7009 lockup_detected, h->heartbeat_sample_interval / HZ);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007010 pci_disable_device(h->pdev);
Don Bracef2405db2015-01-23 16:43:09 -06007011 fail_all_outstanding_cmds(h);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007012}
7013
Webb Scales25163bd2015-04-23 09:32:00 -05007014static int detect_controller_lockup(struct ctlr_info *h)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007015{
7016 u64 now;
7017 u32 heartbeat;
7018 unsigned long flags;
7019
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007020 now = get_jiffies_64();
7021 /* If we've received an interrupt recently, we're ok. */
7022 if (time_after64(h->last_intr_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05007023 (h->heartbeat_sample_interval), now))
Webb Scales25163bd2015-04-23 09:32:00 -05007024 return false;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007025
7026 /*
7027 * If we've already checked the heartbeat recently, we're ok.
7028 * This could happen if someone sends us a signal. We
7029 * otherwise don't care about signals in this thread.
7030 */
7031 if (time_after64(h->last_heartbeat_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05007032 (h->heartbeat_sample_interval), now))
Webb Scales25163bd2015-04-23 09:32:00 -05007033 return false;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007034
7035 /* If heartbeat has not changed since we last looked, we're not ok. */
7036 spin_lock_irqsave(&h->lock, flags);
7037 heartbeat = readl(&h->cfgtable->HeartBeat);
7038 spin_unlock_irqrestore(&h->lock, flags);
7039 if (h->last_heartbeat == heartbeat) {
7040 controller_lockup_detected(h);
Webb Scales25163bd2015-04-23 09:32:00 -05007041 return true;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007042 }
7043
7044 /* We're ok. */
7045 h->last_heartbeat = heartbeat;
7046 h->last_heartbeat_timestamp = now;
Webb Scales25163bd2015-04-23 09:32:00 -05007047 return false;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007048}
7049
Stephen M. Cameron98465902014-02-21 16:25:00 -06007050static void hpsa_ack_ctlr_events(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007051{
7052 int i;
7053 char *event_type;
7054
Stephen Camerone4aa3e62015-01-23 16:44:07 -06007055 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7056 return;
7057
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007058 /* Ask the controller to clear the events we're handling. */
Stephen M. Cameron1f7cee82014-02-18 13:56:09 -06007059 if ((h->transMethod & (CFGTBL_Trans_io_accel1
7060 | CFGTBL_Trans_io_accel2)) &&
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007061 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
7062 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
7063
7064 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
7065 event_type = "state change";
7066 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
7067 event_type = "configuration change";
7068 /* Stop sending new RAID offload reqs via the IO accelerator */
7069 scsi_block_requests(h->scsi_host);
7070 for (i = 0; i < h->ndevices; i++)
7071 h->dev[i]->offload_enabled = 0;
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007072 hpsa_drain_accel_commands(h);
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007073 /* Set 'accelerator path config change' bit */
7074 dev_warn(&h->pdev->dev,
7075 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
7076 h->events, event_type);
7077 writel(h->events, &(h->cfgtable->clear_event_notify));
7078 /* Set the "clear event notify field update" bit 6 */
7079 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7080 /* Wait until ctlr clears 'clear event notify field', bit 6 */
7081 hpsa_wait_for_clear_event_notify_ack(h);
7082 scsi_unblock_requests(h->scsi_host);
7083 } else {
7084 /* Acknowledge controller notification events. */
7085 writel(h->events, &(h->cfgtable->clear_event_notify));
7086 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7087 hpsa_wait_for_clear_event_notify_ack(h);
7088#if 0
7089 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7090 hpsa_wait_for_mode_change_ack(h);
7091#endif
7092 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06007093 return;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007094}
7095
7096/* Check a register on the controller to see if there are configuration
7097 * changes (added/changed/removed logical drives, etc.) which mean that
Scott Teele863d682014-02-18 13:57:05 -06007098 * we should rescan the controller for devices.
7099 * Also check flag for driver-initiated rescan.
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007100 */
Stephen M. Cameron98465902014-02-21 16:25:00 -06007101static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007102{
7103 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
Stephen M. Cameron98465902014-02-21 16:25:00 -06007104 return 0;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007105
7106 h->events = readl(&(h->cfgtable->event_notify));
Stephen M. Cameron98465902014-02-21 16:25:00 -06007107 return h->events & RESCAN_REQUIRED_EVENT_BITS;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007108}
7109
Stephen M. Cameron98465902014-02-21 16:25:00 -06007110/*
7111 * Check if any of the offline devices have become ready
7112 */
7113static int hpsa_offline_devices_ready(struct ctlr_info *h)
7114{
7115 unsigned long flags;
7116 struct offline_device_entry *d;
7117 struct list_head *this, *tmp;
7118
7119 spin_lock_irqsave(&h->offline_device_lock, flags);
7120 list_for_each_safe(this, tmp, &h->offline_device_list) {
7121 d = list_entry(this, struct offline_device_entry,
7122 offline_list);
7123 spin_unlock_irqrestore(&h->offline_device_lock, flags);
Stephen M. Camerond1fea472014-07-03 10:17:58 -05007124 if (!hpsa_volume_offline(h, d->scsi3addr)) {
7125 spin_lock_irqsave(&h->offline_device_lock, flags);
7126 list_del(&d->offline_list);
7127 spin_unlock_irqrestore(&h->offline_device_lock, flags);
Stephen M. Cameron98465902014-02-21 16:25:00 -06007128 return 1;
Stephen M. Camerond1fea472014-07-03 10:17:58 -05007129 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06007130 spin_lock_irqsave(&h->offline_device_lock, flags);
7131 }
7132 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7133 return 0;
7134}
7135
Don Brace6636e7f2015-01-23 16:45:17 -06007136static void hpsa_rescan_ctlr_worker(struct work_struct *work)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007137{
7138 unsigned long flags;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007139 struct ctlr_info *h = container_of(to_delayed_work(work),
Don Brace6636e7f2015-01-23 16:45:17 -06007140 struct ctlr_info, rescan_ctlr_work);
7141
7142
7143 if (h->remove_in_progress)
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007144 return;
Stephen M. Cameron98465902014-02-21 16:25:00 -06007145
7146 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
7147 scsi_host_get(h->scsi_host);
Stephen M. Cameron98465902014-02-21 16:25:00 -06007148 hpsa_ack_ctlr_events(h);
7149 hpsa_scan_start(h->scsi_host);
7150 scsi_host_put(h->scsi_host);
7151 }
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007152 spin_lock_irqsave(&h->lock, flags);
Don Brace6636e7f2015-01-23 16:45:17 -06007153 if (!h->remove_in_progress)
7154 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007155 h->heartbeat_sample_interval);
7156 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007157}
7158
Don Brace6636e7f2015-01-23 16:45:17 -06007159static void hpsa_monitor_ctlr_worker(struct work_struct *work)
7160{
7161 unsigned long flags;
7162 struct ctlr_info *h = container_of(to_delayed_work(work),
7163 struct ctlr_info, monitor_ctlr_work);
7164
7165 detect_controller_lockup(h);
7166 if (lockup_detected(h))
7167 return;
7168
7169 spin_lock_irqsave(&h->lock, flags);
7170 if (!h->remove_in_progress)
7171 schedule_delayed_work(&h->monitor_ctlr_work,
7172 h->heartbeat_sample_interval);
7173 spin_unlock_irqrestore(&h->lock, flags);
7174}
7175
7176static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
7177 char *name)
7178{
7179 struct workqueue_struct *wq = NULL;
Don Brace6636e7f2015-01-23 16:45:17 -06007180
Don Brace397ea9c2015-02-06 17:44:15 -06007181 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
Don Brace6636e7f2015-01-23 16:45:17 -06007182 if (!wq)
7183 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
7184
7185 return wq;
7186}
7187
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007188static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007189{
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007190 int dac, rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007191 struct ctlr_info *h;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007192 int try_soft_reset = 0;
7193 unsigned long flags;
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02007194 u32 board_id;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007195
7196 if (number_of_controllers == 0)
7197 printk(KERN_INFO DRIVER_NAME "\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007198
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02007199 rc = hpsa_lookup_board_id(pdev, &board_id);
7200 if (rc < 0) {
7201 dev_warn(&pdev->dev, "Board ID not found\n");
7202 return rc;
7203 }
7204
7205 rc = hpsa_init_reset_devices(pdev, board_id);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007206 if (rc) {
7207 if (rc != -ENOTSUPP)
7208 return rc;
7209 /* If the reset fails in a particular way (it has no way to do
7210 * a proper hard reset, so returns -ENOTSUPP) we can try to do
7211 * a soft reset once we get the controller configured up to the
7212 * point that it can accept a command.
7213 */
7214 try_soft_reset = 1;
7215 rc = 0;
7216 }
7217
7218reinit_after_soft_reset:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007219
Don Brace303932f2010-02-04 08:42:40 -06007220 /* Command structures must be aligned on a 32-byte boundary because
7221 * the 5 lower bits of the address are used by the hardware. and by
7222 * the driver. See comments in hpsa.h for more info.
7223 */
Don Brace303932f2010-02-04 08:42:40 -06007224 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007225 h = kzalloc(sizeof(*h), GFP_KERNEL);
7226 if (!h)
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007227 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007228
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05007229 h->pdev = pdev;
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06007230 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
Stephen M. Cameron98465902014-02-21 16:25:00 -06007231 INIT_LIST_HEAD(&h->offline_device_list);
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06007232 spin_lock_init(&h->lock);
Stephen M. Cameron98465902014-02-21 16:25:00 -06007233 spin_lock_init(&h->offline_device_lock);
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06007234 spin_lock_init(&h->scan_lock);
Don Brace34f0c622015-01-23 16:43:46 -06007235 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05007236 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007237
Don Brace6636e7f2015-01-23 16:45:17 -06007238 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
7239 if (!h->rescan_ctlr_wq) {
Don Brace080ef1c2015-01-23 16:43:25 -06007240 rc = -ENOMEM;
7241 goto clean1;
7242 }
Don Brace6636e7f2015-01-23 16:45:17 -06007243
7244 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
7245 if (!h->resubmit_wq) {
7246 rc = -ENOMEM;
7247 goto clean1;
7248 }
7249
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007250 /* Allocate and clear per-cpu variable lockup_detected */
7251 h->lockup_detected = alloc_percpu(u32);
Stephen M. Cameron2a5ac322014-07-03 10:18:08 -05007252 if (!h->lockup_detected) {
7253 rc = -ENOMEM;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007254 goto clean1;
Stephen M. Cameron2a5ac322014-07-03 10:18:08 -05007255 }
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007256 set_lockup_detected_for_all_cpus(h, 0);
7257
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05007258 rc = hpsa_pci_init(h);
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007259 if (rc != 0)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007260 goto clean1;
7261
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06007262 sprintf(h->devname, HPSA "%d", number_of_controllers);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007263 h->ctlr = number_of_controllers;
7264 number_of_controllers++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007265
7266 /* configure PCI DMA stuff */
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007267 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7268 if (rc == 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007269 dac = 1;
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007270 } else {
7271 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7272 if (rc == 0) {
7273 dac = 0;
7274 } else {
7275 dev_err(&pdev->dev, "no suitable DMA available\n");
7276 goto clean1;
7277 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007278 }
7279
7280 /* make sure the board interrupts are off */
7281 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05007282
Robert Elliott9ee61792015-01-23 16:42:32 -06007283 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007284 goto clean2;
Don Brace303932f2010-02-04 08:42:40 -06007285 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
7286 h->devname, pdev->device,
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06007287 h->intr[h->intr_mode], dac ? "" : " not");
Robert Elliott8947fd12015-01-23 16:42:54 -06007288 rc = hpsa_allocate_cmd_pool(h);
7289 if (rc)
7290 goto clean2_and_free_irqs;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06007291 if (hpsa_allocate_sg_chain_blocks(h))
7292 goto clean4;
Stephen M. Camerona08a8472010-02-04 08:43:16 -06007293 init_waitqueue_head(&h->scan_wait_queue);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05007294 init_waitqueue_head(&h->abort_cmd_wait_queue);
Stephen M. Camerona08a8472010-02-04 08:43:16 -06007295 h->scan_finished = 1; /* no scan currently in progress */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007296
7297 pci_set_drvdata(pdev, h);
Stephen M. Cameron9a413382011-05-03 14:59:41 -05007298 h->ndevices = 0;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06007299 h->hba_mode_enabled = 0;
Stephen M. Cameron9a413382011-05-03 14:59:41 -05007300 h->scsi_host = NULL;
7301 spin_lock_init(&h->devlock);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007302 hpsa_put_ctlr_into_performant_mode(h);
7303
7304 /* At this point, the controller is ready to take commands.
7305 * Now, if reset_devices and the hard reset didn't work, try
7306 * the soft reset and see if that works.
7307 */
7308 if (try_soft_reset) {
7309
7310 /* This is kind of gross. We may or may not get a completion
7311 * from the soft reset command, and if we do, then the value
7312 * from the fifo may or may not be valid. So, we wait 10 secs
7313 * after the reset throwing away any completions we get during
7314 * that time. Unregister the interrupt handler and register
7315 * fake ones to scoop up any residual completions.
7316 */
7317 spin_lock_irqsave(&h->lock, flags);
7318 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7319 spin_unlock_irqrestore(&h->lock, flags);
Robert Elliottec501a12015-01-23 16:41:40 -06007320 hpsa_free_irqs(h);
Robert Elliott9ee61792015-01-23 16:42:32 -06007321 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007322 hpsa_intx_discard_completions);
7323 if (rc) {
Robert Elliott9ee61792015-01-23 16:42:32 -06007324 dev_warn(&h->pdev->dev,
7325 "Failed to request_irq after soft reset.\n");
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007326 goto clean4;
7327 }
7328
7329 rc = hpsa_kdump_soft_reset(h);
7330 if (rc)
7331 /* Neither hard nor soft reset worked, we're hosed. */
7332 goto clean4;
7333
7334 dev_info(&h->pdev->dev, "Board READY.\n");
7335 dev_info(&h->pdev->dev,
7336 "Waiting for stale completions to drain.\n");
7337 h->access.set_intr_mask(h, HPSA_INTR_ON);
7338 msleep(10000);
7339 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7340
7341 rc = controller_reset_failed(h->cfgtable);
7342 if (rc)
7343 dev_info(&h->pdev->dev,
7344 "Soft reset appears to have failed.\n");
7345
7346 /* since the controller's reset, we have to go back and re-init
7347 * everything. Easiest to just forget what we've done and do it
7348 * all over again.
7349 */
7350 hpsa_undo_allocations_after_kdump_soft_reset(h);
7351 try_soft_reset = 0;
7352 if (rc)
7353 /* don't go to clean4, we already unallocated */
7354 return -ENODEV;
7355
7356 goto reinit_after_soft_reset;
7357 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007358
Stephen M. Cameron316b2212014-02-21 16:25:15 -06007359 /* Enable Accelerated IO path at driver layer */
7360 h->acciopath_status = 1;
Scott Teelda0697b2014-02-18 13:57:00 -06007361
Scott Teele863d682014-02-18 13:57:05 -06007362
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007363 /* Turn the interrupts on so we can service requests */
7364 h->access.set_intr_mask(h, HPSA_INTR_ON);
7365
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06007366 hpsa_hba_inquiry(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007367 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007368
7369 /* Monitor the controller for firmware lockups */
7370 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
7371 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
7372 schedule_delayed_work(&h->monitor_ctlr_work,
7373 h->heartbeat_sample_interval);
Don Brace6636e7f2015-01-23 16:45:17 -06007374 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
7375 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7376 h->heartbeat_sample_interval);
Stephen M. Cameron88bf6d62013-11-01 11:02:25 -05007377 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007378
7379clean4:
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06007380 hpsa_free_sg_chain_blocks(h);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007381 hpsa_free_cmd_pool(h);
Robert Elliott8947fd12015-01-23 16:42:54 -06007382clean2_and_free_irqs:
Robert Elliottec501a12015-01-23 16:41:40 -06007383 hpsa_free_irqs(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007384clean2:
7385clean1:
Don Brace080ef1c2015-01-23 16:43:25 -06007386 if (h->resubmit_wq)
7387 destroy_workqueue(h->resubmit_wq);
Don Brace6636e7f2015-01-23 16:45:17 -06007388 if (h->rescan_ctlr_wq)
7389 destroy_workqueue(h->rescan_ctlr_wq);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007390 if (h->lockup_detected)
7391 free_percpu(h->lockup_detected);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007392 kfree(h);
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007393 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007394}
7395
7396static void hpsa_flush_cache(struct ctlr_info *h)
7397{
7398 char *flush_buf;
7399 struct CommandList *c;
Webb Scales25163bd2015-04-23 09:32:00 -05007400 int rc;
Stephen M. Cameron702890e2013-09-23 13:33:30 -05007401
7402 /* Don't bother trying to flush the cache if locked up */
Webb Scales25163bd2015-04-23 09:32:00 -05007403 /* FIXME not necessary if do_simple_cmd does the check */
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007404 if (unlikely(lockup_detected(h)))
Stephen M. Cameron702890e2013-09-23 13:33:30 -05007405 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007406 flush_buf = kzalloc(4, GFP_KERNEL);
7407 if (!flush_buf)
7408 return;
7409
Stephen Cameron45fcb862015-01-23 16:43:04 -06007410 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007411 if (!c) {
Stephen Cameron45fcb862015-01-23 16:43:04 -06007412 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007413 goto out_of_memory;
7414 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06007415 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
7416 RAID_CTLR_LUNID, TYPE_CMD)) {
7417 goto out;
7418 }
Webb Scales25163bd2015-04-23 09:32:00 -05007419 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
7420 PCI_DMA_TODEVICE, NO_TIMEOUT);
7421 if (rc)
7422 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007423 if (c->err_info->CommandStatus != 0)
Stephen M. Camerona2dac132013-02-20 11:24:41 -06007424out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007425 dev_warn(&h->pdev->dev,
7426 "error flushing cache on controller\n");
Stephen Cameron45fcb862015-01-23 16:43:04 -06007427 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007428out_of_memory:
7429 kfree(flush_buf);
7430}
7431
7432static void hpsa_shutdown(struct pci_dev *pdev)
7433{
7434 struct ctlr_info *h;
7435
7436 h = pci_get_drvdata(pdev);
7437 /* Turn board interrupts off and send the flush cache command
7438 * sendcmd will turn off interrupt, and send the flush...
7439 * To write all data in the battery backed cache to disks
7440 */
7441 hpsa_flush_cache(h);
7442 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05007443 hpsa_free_irqs_and_disable_msix(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007444}
7445
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007446static void hpsa_free_device_info(struct ctlr_info *h)
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06007447{
7448 int i;
7449
7450 for (i = 0; i < h->ndevices; i++)
7451 kfree(h->dev[i]);
7452}
7453
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007454static void hpsa_remove_one(struct pci_dev *pdev)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007455{
7456 struct ctlr_info *h;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007457 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007458
7459 if (pci_get_drvdata(pdev) == NULL) {
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007460 dev_err(&pdev->dev, "unable to remove device\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007461 return;
7462 }
7463 h = pci_get_drvdata(pdev);
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007464
7465 /* Get rid of any controller monitoring work items */
7466 spin_lock_irqsave(&h->lock, flags);
7467 h->remove_in_progress = 1;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007468 spin_unlock_irqrestore(&h->lock, flags);
Don Brace6636e7f2015-01-23 16:45:17 -06007469 cancel_delayed_work_sync(&h->monitor_ctlr_work);
7470 cancel_delayed_work_sync(&h->rescan_ctlr_work);
7471 destroy_workqueue(h->rescan_ctlr_wq);
7472 destroy_workqueue(h->resubmit_wq);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007473 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
7474 hpsa_shutdown(pdev);
7475 iounmap(h->vaddr);
Stephen M. Cameron204892e2010-05-27 15:13:22 -05007476 iounmap(h->transtable);
7477 iounmap(h->cfgtable);
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06007478 hpsa_free_device_info(h);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06007479 hpsa_free_sg_chain_blocks(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007480 pci_free_consistent(h->pdev,
7481 h->nr_cmds * sizeof(struct CommandList),
7482 h->cmd_pool, h->cmd_pool_dhandle);
7483 pci_free_consistent(h->pdev,
7484 h->nr_cmds * sizeof(struct ErrorInfo),
7485 h->errinfo_pool, h->errinfo_pool_dhandle);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007486 hpsa_free_reply_queues(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007487 kfree(h->cmd_pool_bits);
Don Brace303932f2010-02-04 08:42:40 -06007488 kfree(h->blockFetchTable);
Matt Gatese1f7de02014-02-18 13:55:17 -06007489 kfree(h->ioaccel1_blockFetchTable);
Stephen M. Cameronaca90122014-02-18 13:56:14 -06007490 kfree(h->ioaccel2_blockFetchTable);
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06007491 kfree(h->hba_inquiry_data);
Stephen M. Cameronf0bd0b682012-05-01 11:42:09 -05007492 pci_disable_device(pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007493 pci_release_regions(pdev);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007494 free_percpu(h->lockup_detected);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007495 kfree(h);
7496}
7497
7498static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7499 __attribute__((unused)) pm_message_t state)
7500{
7501 return -ENOSYS;
7502}
7503
7504static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
7505{
7506 return -ENOSYS;
7507}
7508
7509static struct pci_driver hpsa_pci_driver = {
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06007510 .name = HPSA,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007511 .probe = hpsa_init_one,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007512 .remove = hpsa_remove_one,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007513 .id_table = hpsa_pci_device_id, /* id_table */
7514 .shutdown = hpsa_shutdown,
7515 .suspend = hpsa_suspend,
7516 .resume = hpsa_resume,
7517};
7518
Don Brace303932f2010-02-04 08:42:40 -06007519/* Fill in bucket_map[], given nsgs (the max number of
7520 * scatter gather elements supported) and bucket[],
7521 * which is an array of 8 integers. The bucket[] array
7522 * contains 8 different DMA transfer sizes (in 16
7523 * byte increments) which the controller uses to fetch
7524 * commands. This function fills in bucket_map[], which
7525 * maps a given number of scatter gather elements to one of
7526 * the 8 DMA transfer sizes. The point of it is to allow the
7527 * controller to only do as much DMA as needed to fetch the
7528 * command, with the DMA transfer size encoded in the lower
7529 * bits of the command address.
7530 */
7531static void calc_bucket_map(int bucket[], int num_buckets,
Don Brace2b08b3e2015-01-23 16:41:09 -06007532 int nsgs, int min_blocks, u32 *bucket_map)
Don Brace303932f2010-02-04 08:42:40 -06007533{
7534 int i, j, b, size;
7535
Don Brace303932f2010-02-04 08:42:40 -06007536 /* Note, bucket_map must have nsgs+1 entries. */
7537 for (i = 0; i <= nsgs; i++) {
7538 /* Compute size of a command with i SG entries */
Matt Gatese1f7de02014-02-18 13:55:17 -06007539 size = i + min_blocks;
Don Brace303932f2010-02-04 08:42:40 -06007540 b = num_buckets; /* Assume the biggest bucket */
7541 /* Find the bucket that is just big enough */
Matt Gatese1f7de02014-02-18 13:55:17 -06007542 for (j = 0; j < num_buckets; j++) {
Don Brace303932f2010-02-04 08:42:40 -06007543 if (bucket[j] >= size) {
7544 b = j;
7545 break;
7546 }
7547 }
7548 /* for a command with i SG entries, use bucket b. */
7549 bucket_map[i] = b;
7550 }
7551}
7552
Robert Elliottc706a792015-01-23 16:45:01 -06007553/* return -ENODEV or other reason on error, 0 on success */
7554static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
Don Brace303932f2010-02-04 08:42:40 -06007555{
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007556 int i;
7557 unsigned long register_value;
Matt Gatese1f7de02014-02-18 13:55:17 -06007558 unsigned long transMethod = CFGTBL_Trans_Performant |
7559 (trans_support & CFGTBL_Trans_use_short_tags) |
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007560 CFGTBL_Trans_enable_directed_msix |
7561 (trans_support & (CFGTBL_Trans_io_accel1 |
7562 CFGTBL_Trans_io_accel2));
Matt Gatese1f7de02014-02-18 13:55:17 -06007563 struct access_method access = SA5_performant_access;
Stephen M. Camerondef342b2010-05-27 15:14:39 -05007564
7565 /* This is a bit complicated. There are 8 registers on
7566 * the controller which we write to to tell it 8 different
7567 * sizes of commands which there may be. It's a way of
7568 * reducing the DMA done to fetch each command. Encoded into
7569 * each command's tag are 3 bits which communicate to the controller
7570 * which of the eight sizes that command fits within. The size of
7571 * each command depends on how many scatter gather entries there are.
7572 * Each SG entry requires 16 bytes. The eight registers are programmed
7573 * with the number of 16-byte blocks a command of that size requires.
7574 * The smallest command possible requires 5 such 16 byte blocks.
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007575 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
Stephen M. Camerondef342b2010-05-27 15:14:39 -05007576 * blocks. Note, this only extends to the SG entries contained
7577 * within the command block, and does not extend to chained blocks
7578 * of SG elements. bft[] contains the eight values we write to
7579 * the registers. They are not evenly distributed, but have more
7580 * sizes for small commands, and fewer sizes for larger commands.
7581 */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007582 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007583#define MIN_IOACCEL2_BFT_ENTRY 5
7584#define HPSA_IOACCEL2_HEADER_SZ 4
7585 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7586 13, 14, 15, 16, 17, 18, 19,
7587 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7588 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7589 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7590 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7591 16 * MIN_IOACCEL2_BFT_ENTRY);
7592 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007593 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
Don Brace303932f2010-02-04 08:42:40 -06007594 /* 5 = 1 s/g entry or 4k
7595 * 6 = 2 s/g entry or 8k
7596 * 8 = 4 s/g entry or 16k
7597 * 10 = 6 s/g entry or 24k
7598 */
Don Brace303932f2010-02-04 08:42:40 -06007599
Stephen M. Cameronb3a52e72014-05-29 10:53:23 -05007600 /* If the controller supports either ioaccel method then
7601 * we can also use the RAID stack submit path that does not
7602 * perform the superfluous readl() after each command submission.
7603 */
7604 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
7605 access = SA5_performant_access_no_read;
7606
Don Brace303932f2010-02-04 08:42:40 -06007607 /* Controller spec: zero out this buffer. */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007608 for (i = 0; i < h->nreply_queues; i++)
7609 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
Don Brace303932f2010-02-04 08:42:40 -06007610
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007611 bft[7] = SG_ENTRIES_IN_CMD + 4;
7612 calc_bucket_map(bft, ARRAY_SIZE(bft),
Matt Gatese1f7de02014-02-18 13:55:17 -06007613 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
Don Brace303932f2010-02-04 08:42:40 -06007614 for (i = 0; i < 8; i++)
7615 writel(bft[i], &h->transtable->BlockFetch[i]);
7616
7617 /* size of controller ring buffer */
7618 writel(h->max_commands, &h->transtable->RepQSize);
Matt Gates254f7962012-05-01 11:43:06 -05007619 writel(h->nreply_queues, &h->transtable->RepQCount);
Don Brace303932f2010-02-04 08:42:40 -06007620 writel(0, &h->transtable->RepQCtrAddrLow32);
7621 writel(0, &h->transtable->RepQCtrAddrHigh32);
Matt Gates254f7962012-05-01 11:43:06 -05007622
7623 for (i = 0; i < h->nreply_queues; i++) {
7624 writel(0, &h->transtable->RepQAddr[i].upper);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007625 writel(h->reply_queue[i].busaddr,
Matt Gates254f7962012-05-01 11:43:06 -05007626 &h->transtable->RepQAddr[i].lower);
7627 }
7628
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007629 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Matt Gatese1f7de02014-02-18 13:55:17 -06007630 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
7631 /*
7632 * enable outbound interrupt coalescing in accelerator mode;
7633 */
7634 if (trans_support & CFGTBL_Trans_io_accel1) {
7635 access = SA5_ioaccel_mode1_access;
7636 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7637 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
Scott Teelc3497752014-02-18 13:56:34 -06007638 } else {
7639 if (trans_support & CFGTBL_Trans_io_accel2) {
7640 access = SA5_ioaccel_mode2_access;
7641 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7642 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7643 }
Matt Gatese1f7de02014-02-18 13:55:17 -06007644 }
Don Brace303932f2010-02-04 08:42:40 -06007645 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Robert Elliottc706a792015-01-23 16:45:01 -06007646 if (hpsa_wait_for_mode_change_ack(h)) {
7647 dev_err(&h->pdev->dev,
7648 "performant mode problem - doorbell timeout\n");
7649 return -ENODEV;
7650 }
Don Brace303932f2010-02-04 08:42:40 -06007651 register_value = readl(&(h->cfgtable->TransportActive));
7652 if (!(register_value & CFGTBL_Trans_Performant)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06007653 dev_err(&h->pdev->dev,
7654 "performant mode problem - transport not active\n");
Robert Elliottc706a792015-01-23 16:45:01 -06007655 return -ENODEV;
Don Brace303932f2010-02-04 08:42:40 -06007656 }
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06007657 /* Change the access methods to the performant access methods */
Matt Gatese1f7de02014-02-18 13:55:17 -06007658 h->access = access;
7659 h->transMethod = transMethod;
7660
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007661 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7662 (trans_support & CFGTBL_Trans_io_accel2)))
Robert Elliottc706a792015-01-23 16:45:01 -06007663 return 0;
Matt Gatese1f7de02014-02-18 13:55:17 -06007664
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007665 if (trans_support & CFGTBL_Trans_io_accel1) {
7666 /* Set up I/O accelerator mode */
7667 for (i = 0; i < h->nreply_queues; i++) {
7668 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7669 h->reply_queue[i].current_entry =
7670 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7671 }
7672 bft[7] = h->ioaccel_maxsg + 8;
7673 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7674 h->ioaccel1_blockFetchTable);
7675
7676 /* initialize all reply queue entries to unused */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007677 for (i = 0; i < h->nreply_queues; i++)
7678 memset(h->reply_queue[i].head,
7679 (u8) IOACCEL_MODE1_REPLY_UNUSED,
7680 h->reply_queue_size);
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007681
7682 /* set all the constant fields in the accelerator command
7683 * frames once at init time to save CPU cycles later.
7684 */
7685 for (i = 0; i < h->nr_cmds; i++) {
7686 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7687
7688 cp->function = IOACCEL1_FUNCTION_SCSIIO;
7689 cp->err_info = (u32) (h->errinfo_pool_dhandle +
7690 (i * sizeof(struct ErrorInfo)));
7691 cp->err_info_len = sizeof(struct ErrorInfo);
7692 cp->sgl_offset = IOACCEL1_SGLOFFSET;
Don Brace2b08b3e2015-01-23 16:41:09 -06007693 cp->host_context_flags =
7694 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007695 cp->timeout_sec = 0;
7696 cp->ReplyQueue = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06007697 cp->tag =
Don Bracef2405db2015-01-23 16:43:09 -06007698 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06007699 cp->host_addr =
7700 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007701 (i * sizeof(struct io_accel1_cmd)));
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007702 }
7703 } else if (trans_support & CFGTBL_Trans_io_accel2) {
7704 u64 cfg_offset, cfg_base_addr_index;
7705 u32 bft2_offset, cfg_base_addr;
7706 int rc;
7707
7708 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7709 &cfg_base_addr_index, &cfg_offset);
7710 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7711 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7712 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7713 4, h->ioaccel2_blockFetchTable);
7714 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7715 BUILD_BUG_ON(offsetof(struct CfgTable,
7716 io_accel_request_size_offset) != 0xb8);
7717 h->ioaccel2_bft2_regs =
7718 remap_pci_mem(pci_resource_start(h->pdev,
7719 cfg_base_addr_index) +
7720 cfg_offset + bft2_offset,
7721 ARRAY_SIZE(bft2) *
7722 sizeof(*h->ioaccel2_bft2_regs));
7723 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7724 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
Matt Gatese1f7de02014-02-18 13:55:17 -06007725 }
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007726 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Robert Elliottc706a792015-01-23 16:45:01 -06007727 if (hpsa_wait_for_mode_change_ack(h)) {
7728 dev_err(&h->pdev->dev,
7729 "performant mode problem - enabling ioaccel mode\n");
7730 return -ENODEV;
7731 }
7732 return 0;
Matt Gatese1f7de02014-02-18 13:55:17 -06007733}
7734
7735static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7736{
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007737 h->ioaccel_maxsg =
7738 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7739 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7740 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7741
Matt Gatese1f7de02014-02-18 13:55:17 -06007742 /* Command structures must be aligned on a 128-byte boundary
7743 * because the 7 lower bits of the address are used by the
7744 * hardware.
7745 */
Matt Gatese1f7de02014-02-18 13:55:17 -06007746 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7747 IOACCEL1_COMMANDLIST_ALIGNMENT);
7748 h->ioaccel_cmd_pool =
7749 pci_alloc_consistent(h->pdev,
7750 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7751 &(h->ioaccel_cmd_pool_dhandle));
7752
7753 h->ioaccel1_blockFetchTable =
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007754 kmalloc(((h->ioaccel_maxsg + 1) *
Matt Gatese1f7de02014-02-18 13:55:17 -06007755 sizeof(u32)), GFP_KERNEL);
7756
7757 if ((h->ioaccel_cmd_pool == NULL) ||
7758 (h->ioaccel1_blockFetchTable == NULL))
7759 goto clean_up;
7760
7761 memset(h->ioaccel_cmd_pool, 0,
7762 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7763 return 0;
7764
7765clean_up:
7766 if (h->ioaccel_cmd_pool)
7767 pci_free_consistent(h->pdev,
7768 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7769 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7770 kfree(h->ioaccel1_blockFetchTable);
7771 return 1;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007772}
7773
Stephen M. Cameronaca90122014-02-18 13:56:14 -06007774static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7775{
7776 /* Allocate ioaccel2 mode command blocks and block fetch table */
7777
7778 h->ioaccel_maxsg =
7779 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7780 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7781 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7782
Stephen M. Cameronaca90122014-02-18 13:56:14 -06007783 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7784 IOACCEL2_COMMANDLIST_ALIGNMENT);
7785 h->ioaccel2_cmd_pool =
7786 pci_alloc_consistent(h->pdev,
7787 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7788 &(h->ioaccel2_cmd_pool_dhandle));
7789
7790 h->ioaccel2_blockFetchTable =
7791 kmalloc(((h->ioaccel_maxsg + 1) *
7792 sizeof(u32)), GFP_KERNEL);
7793
7794 if ((h->ioaccel2_cmd_pool == NULL) ||
7795 (h->ioaccel2_blockFetchTable == NULL))
7796 goto clean_up;
7797
7798 memset(h->ioaccel2_cmd_pool, 0,
7799 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7800 return 0;
7801
7802clean_up:
7803 if (h->ioaccel2_cmd_pool)
7804 pci_free_consistent(h->pdev,
7805 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7806 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7807 kfree(h->ioaccel2_blockFetchTable);
7808 return 1;
7809}
7810
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007811static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007812{
7813 u32 trans_support;
Matt Gatese1f7de02014-02-18 13:55:17 -06007814 unsigned long transMethod = CFGTBL_Trans_Performant |
7815 CFGTBL_Trans_use_short_tags;
Matt Gates254f7962012-05-01 11:43:06 -05007816 int i;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007817
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -06007818 if (hpsa_simple_mode)
7819 return;
7820
scameron@beardog.cce.hp.com67c99a72014-04-14 14:01:09 -05007821 trans_support = readl(&(h->cfgtable->TransportSupport));
7822 if (!(trans_support & PERFORMANT_MODE))
7823 return;
7824
Matt Gatese1f7de02014-02-18 13:55:17 -06007825 /* Check for I/O accelerator mode support */
7826 if (trans_support & CFGTBL_Trans_io_accel1) {
7827 transMethod |= CFGTBL_Trans_io_accel1 |
7828 CFGTBL_Trans_enable_directed_msix;
7829 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7830 goto clean_up;
Stephen M. Cameronaca90122014-02-18 13:56:14 -06007831 } else {
7832 if (trans_support & CFGTBL_Trans_io_accel2) {
7833 transMethod |= CFGTBL_Trans_io_accel2 |
7834 CFGTBL_Trans_enable_directed_msix;
7835 if (ioaccel2_alloc_cmds_and_bft(h))
7836 goto clean_up;
7837 }
Matt Gatese1f7de02014-02-18 13:55:17 -06007838 }
7839
Hannes Reineckeeee0f032014-01-15 13:30:53 +01007840 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05007841 hpsa_get_max_perf_mode_cmds(h);
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007842 /* Performant mode ring buffer and supporting data structures */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007843 h->reply_queue_size = h->max_commands * sizeof(u64);
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007844
Matt Gates254f7962012-05-01 11:43:06 -05007845 for (i = 0; i < h->nreply_queues; i++) {
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007846 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7847 h->reply_queue_size,
7848 &(h->reply_queue[i].busaddr));
7849 if (!h->reply_queue[i].head)
7850 goto clean_up;
Matt Gates254f7962012-05-01 11:43:06 -05007851 h->reply_queue[i].size = h->max_commands;
7852 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
7853 h->reply_queue[i].current_entry = 0;
7854 }
7855
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007856 /* Need a block fetch table for performant mode */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007857 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007858 sizeof(u32)), GFP_KERNEL);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007859 if (!h->blockFetchTable)
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007860 goto clean_up;
7861
Matt Gatese1f7de02014-02-18 13:55:17 -06007862 hpsa_enter_performant_mode(h, trans_support);
Don Brace303932f2010-02-04 08:42:40 -06007863 return;
7864
7865clean_up:
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007866 hpsa_free_reply_queues(h);
Don Brace303932f2010-02-04 08:42:40 -06007867 kfree(h->blockFetchTable);
7868}
7869
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007870static int is_accelerated_cmd(struct CommandList *c)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007871{
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007872 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7873}
7874
7875static void hpsa_drain_accel_commands(struct ctlr_info *h)
7876{
7877 struct CommandList *c = NULL;
Don Bracef2405db2015-01-23 16:43:09 -06007878 int i, accel_cmds_out;
Webb Scales281a7fd2015-01-23 16:43:35 -06007879 int refcount;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007880
Don Bracef2405db2015-01-23 16:43:09 -06007881 do { /* wait for all outstanding ioaccel commands to drain out */
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007882 accel_cmds_out = 0;
Don Bracef2405db2015-01-23 16:43:09 -06007883 for (i = 0; i < h->nr_cmds; i++) {
Don Bracef2405db2015-01-23 16:43:09 -06007884 c = h->cmd_pool + i;
Webb Scales281a7fd2015-01-23 16:43:35 -06007885 refcount = atomic_inc_return(&c->refcount);
7886 if (refcount > 1) /* Command is allocated */
7887 accel_cmds_out += is_accelerated_cmd(c);
7888 cmd_free(h, c);
Don Bracef2405db2015-01-23 16:43:09 -06007889 }
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007890 if (accel_cmds_out <= 0)
Webb Scales281a7fd2015-01-23 16:43:35 -06007891 break;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007892 msleep(100);
7893 } while (1);
7894}
7895
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007896/*
7897 * This is it. Register the PCI driver information for the cards we control
7898 * the OS will call our registered routines when it finds one of our cards.
7899 */
7900static int __init hpsa_init(void)
7901{
Mike Miller31468402010-02-25 14:03:12 -06007902 return pci_register_driver(&hpsa_pci_driver);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007903}
7904
7905static void __exit hpsa_cleanup(void)
7906{
7907 pci_unregister_driver(&hpsa_pci_driver);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007908}
7909
Matt Gatese1f7de02014-02-18 13:55:17 -06007910static void __attribute__((unused)) verify_offsets(void)
7911{
7912#define VERIFY_OFFSET(member, offset) \
Scott Teeldd0e19f2014-02-18 13:57:31 -06007913 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7914
7915 VERIFY_OFFSET(structure_size, 0);
7916 VERIFY_OFFSET(volume_blk_size, 4);
7917 VERIFY_OFFSET(volume_blk_cnt, 8);
7918 VERIFY_OFFSET(phys_blk_shift, 16);
7919 VERIFY_OFFSET(parity_rotation_shift, 17);
7920 VERIFY_OFFSET(strip_size, 18);
7921 VERIFY_OFFSET(disk_starting_blk, 20);
7922 VERIFY_OFFSET(disk_blk_cnt, 28);
7923 VERIFY_OFFSET(data_disks_per_row, 36);
7924 VERIFY_OFFSET(metadata_disks_per_row, 38);
7925 VERIFY_OFFSET(row_cnt, 40);
7926 VERIFY_OFFSET(layout_map_count, 42);
7927 VERIFY_OFFSET(flags, 44);
7928 VERIFY_OFFSET(dekindex, 46);
7929 /* VERIFY_OFFSET(reserved, 48 */
7930 VERIFY_OFFSET(data, 64);
7931
7932#undef VERIFY_OFFSET
7933
7934#define VERIFY_OFFSET(member, offset) \
Mike Millerb66cc252014-02-18 13:56:04 -06007935 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7936
7937 VERIFY_OFFSET(IU_type, 0);
7938 VERIFY_OFFSET(direction, 1);
7939 VERIFY_OFFSET(reply_queue, 2);
7940 /* VERIFY_OFFSET(reserved1, 3); */
7941 VERIFY_OFFSET(scsi_nexus, 4);
7942 VERIFY_OFFSET(Tag, 8);
7943 VERIFY_OFFSET(cdb, 16);
7944 VERIFY_OFFSET(cciss_lun, 32);
7945 VERIFY_OFFSET(data_len, 40);
7946 VERIFY_OFFSET(cmd_priority_task_attr, 44);
7947 VERIFY_OFFSET(sg_count, 45);
7948 /* VERIFY_OFFSET(reserved3 */
7949 VERIFY_OFFSET(err_ptr, 48);
7950 VERIFY_OFFSET(err_len, 56);
7951 /* VERIFY_OFFSET(reserved4 */
7952 VERIFY_OFFSET(sg, 64);
7953
7954#undef VERIFY_OFFSET
7955
7956#define VERIFY_OFFSET(member, offset) \
Matt Gatese1f7de02014-02-18 13:55:17 -06007957 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7958
7959 VERIFY_OFFSET(dev_handle, 0x00);
7960 VERIFY_OFFSET(reserved1, 0x02);
7961 VERIFY_OFFSET(function, 0x03);
7962 VERIFY_OFFSET(reserved2, 0x04);
7963 VERIFY_OFFSET(err_info, 0x0C);
7964 VERIFY_OFFSET(reserved3, 0x10);
7965 VERIFY_OFFSET(err_info_len, 0x12);
7966 VERIFY_OFFSET(reserved4, 0x13);
7967 VERIFY_OFFSET(sgl_offset, 0x14);
7968 VERIFY_OFFSET(reserved5, 0x15);
7969 VERIFY_OFFSET(transfer_len, 0x1C);
7970 VERIFY_OFFSET(reserved6, 0x20);
7971 VERIFY_OFFSET(io_flags, 0x24);
7972 VERIFY_OFFSET(reserved7, 0x26);
7973 VERIFY_OFFSET(LUN, 0x34);
7974 VERIFY_OFFSET(control, 0x3C);
7975 VERIFY_OFFSET(CDB, 0x40);
7976 VERIFY_OFFSET(reserved8, 0x50);
7977 VERIFY_OFFSET(host_context_flags, 0x60);
7978 VERIFY_OFFSET(timeout_sec, 0x62);
7979 VERIFY_OFFSET(ReplyQueue, 0x64);
7980 VERIFY_OFFSET(reserved9, 0x65);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06007981 VERIFY_OFFSET(tag, 0x68);
Matt Gatese1f7de02014-02-18 13:55:17 -06007982 VERIFY_OFFSET(host_addr, 0x70);
7983 VERIFY_OFFSET(CISS_LUN, 0x78);
7984 VERIFY_OFFSET(SG, 0x78 + 8);
7985#undef VERIFY_OFFSET
7986}
7987
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007988module_init(hpsa_init);
7989module_exit(hpsa_cleanup);