blob: 984bbd9e0535e6f457068ea5cfbdc7979257cd1b [file] [log] [blame]
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001/*
2 * Disk Array driver for HP Smart Array SAS controllers
Don Brace1358f6d2015-07-18 11:12:38 -05003 * Copyright 2014-2015 PMC-Sierra, Inc.
4 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
Don Brace1358f6d2015-07-18 11:12:38 -050015 * Questions/Comments/Bugfixes to storagedev@pmcs.com
Stephen M. Cameronedd16362009-12-08 14:09:11 -080016 *
17 */
18
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/types.h>
22#include <linux/pci.h>
Matthew Garrette5a44df2011-11-11 11:14:23 -050023#include <linux/pci-aspm.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080024#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/fs.h>
28#include <linux/timer.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080029#include <linux/init.h>
30#include <linux/spinlock.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080031#include <linux/compat.h>
32#include <linux/blktrace_api.h>
33#include <linux/uaccess.h>
34#include <linux/io.h>
35#include <linux/dma-mapping.h>
36#include <linux/completion.h>
37#include <linux/moduleparam.h>
38#include <scsi/scsi.h>
39#include <scsi/scsi_cmnd.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_host.h>
Stephen M. Cameron667e23d2010-02-25 14:02:51 -060042#include <scsi/scsi_tcq.h>
Stephen Cameron9437ac42015-04-23 09:32:16 -050043#include <scsi/scsi_eh.h>
Webb Scales73153fe2015-04-23 09:35:04 -050044#include <scsi/scsi_dbg.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080045#include <linux/cciss_ioctl.h>
46#include <linux/string.h>
47#include <linux/bitmap.h>
Arun Sharma600634972011-07-26 16:09:06 -070048#include <linux/atomic.h>
Stephen M. Camerona0c12412011-10-26 16:22:04 -050049#include <linux/jiffies.h>
Don Brace42a91642014-11-14 17:26:27 -060050#include <linux/percpu-defs.h>
Stephen M. Cameron094963d2014-05-29 10:53:18 -050051#include <linux/percpu.h>
Don Brace2b08b3e2015-01-23 16:41:09 -060052#include <asm/unaligned.h>
Stephen M. Cameron283b4a92014-02-18 13:55:33 -060053#include <asm/div64.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080054#include "hpsa_cmd.h"
55#include "hpsa.h"
56
57/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
Don Bracef532a3f2015-04-23 09:35:33 -050058#define HPSA_DRIVER_VERSION "3.4.10-0"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080059#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -060060#define HPSA "hpsa"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080061
Robert Elliott007e7aa2015-01-23 16:44:56 -060062/* How long to wait for CISS doorbell communication */
63#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
64#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
65#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
66#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
Stephen M. Cameronedd16362009-12-08 14:09:11 -080067#define MAX_IOCTL_CONFIG_WAIT 1000
68
69/*define how many times we will try a command because of bus resets */
70#define MAX_CMD_RETRIES 3
71
72/* Embedded module documentation macros - see modules.h */
73MODULE_AUTHOR("Hewlett-Packard Company");
74MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
75 HPSA_DRIVER_VERSION);
76MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
77MODULE_VERSION(HPSA_DRIVER_VERSION);
78MODULE_LICENSE("GPL");
79
80static int hpsa_allow_any;
81module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(hpsa_allow_any,
83 "Allow hpsa driver to access unknown HP Smart Array hardware");
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -060084static int hpsa_simple_mode;
85module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
86MODULE_PARM_DESC(hpsa_simple_mode,
87 "Use 'simple mode' rather than 'performant mode'");
Stephen M. Cameronedd16362009-12-08 14:09:11 -080088
89/* define the PCI info for the cards we can control */
90static const struct pci_device_id hpsa_pci_device_id[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -080091 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
Mike Miller163dbcd2013-09-04 15:11:10 -050096 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
Mike Millerf8b01eb2010-02-04 08:42:45 -060098 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
scameron@beardog.cce.hp.com9143a962011-03-07 10:44:16 -060099 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
Mike Millerfe0c9612012-09-20 16:05:18 -0500106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
Mike Millerfe0c9612012-09-20 16:05:18 -0500110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
Mike Miller97b9f532013-09-04 15:05:55 -0500112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
Mike Miller97b9f532013-09-04 15:05:55 -0500123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
Don Bracefdfa4b62015-04-23 09:35:27 -0500131 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
Stephen M. Cameron8e616a52014-02-18 13:58:02 -0600132 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
133 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
134 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
135 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
136 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
Mike Miller7c03b872010-12-01 11:16:07 -0600137 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
Stephen M. Cameron6798cc02010-06-16 13:51:20 -0500138 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800139 {0,}
140};
141
142MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
143
144/* board_id = Subsystem Device ID & Vendor ID
145 * product = Marketing Name for the board
146 * access = Address of the struct of function pointers
147 */
148static struct board_type products[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800149 {0x3241103C, "Smart Array P212", &SA5_access},
150 {0x3243103C, "Smart Array P410", &SA5_access},
151 {0x3245103C, "Smart Array P410i", &SA5_access},
152 {0x3247103C, "Smart Array P411", &SA5_access},
153 {0x3249103C, "Smart Array P812", &SA5_access},
Mike Miller163dbcd2013-09-04 15:11:10 -0500154 {0x324A103C, "Smart Array P712m", &SA5_access},
155 {0x324B103C, "Smart Array P711m", &SA5_access},
Stephen M. Cameron7d2cce52014-11-14 17:26:38 -0600156 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
Mike Millerfe0c9612012-09-20 16:05:18 -0500157 {0x3350103C, "Smart Array P222", &SA5_access},
158 {0x3351103C, "Smart Array P420", &SA5_access},
159 {0x3352103C, "Smart Array P421", &SA5_access},
160 {0x3353103C, "Smart Array P822", &SA5_access},
161 {0x3354103C, "Smart Array P420i", &SA5_access},
162 {0x3355103C, "Smart Array P220i", &SA5_access},
163 {0x3356103C, "Smart Array P721m", &SA5_access},
Mike Miller1fd6c8e2013-09-04 15:08:29 -0500164 {0x1921103C, "Smart Array P830i", &SA5_access},
165 {0x1922103C, "Smart Array P430", &SA5_access},
166 {0x1923103C, "Smart Array P431", &SA5_access},
167 {0x1924103C, "Smart Array P830", &SA5_access},
168 {0x1926103C, "Smart Array P731m", &SA5_access},
169 {0x1928103C, "Smart Array P230i", &SA5_access},
170 {0x1929103C, "Smart Array P530", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600171 {0x21BD103C, "Smart Array P244br", &SA5_access},
172 {0x21BE103C, "Smart Array P741m", &SA5_access},
173 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
174 {0x21C0103C, "Smart Array P440ar", &SA5_access},
Don Bracec8ae0ab2015-01-23 16:45:12 -0600175 {0x21C1103C, "Smart Array P840ar", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600176 {0x21C2103C, "Smart Array P440", &SA5_access},
177 {0x21C3103C, "Smart Array P441", &SA5_access},
Mike Miller97b9f532013-09-04 15:05:55 -0500178 {0x21C4103C, "Smart Array", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600179 {0x21C5103C, "Smart Array P841", &SA5_access},
180 {0x21C6103C, "Smart HBA H244br", &SA5_access},
181 {0x21C7103C, "Smart HBA H240", &SA5_access},
182 {0x21C8103C, "Smart HBA H241", &SA5_access},
Mike Miller97b9f532013-09-04 15:05:55 -0500183 {0x21C9103C, "Smart Array", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600184 {0x21CA103C, "Smart Array P246br", &SA5_access},
185 {0x21CB103C, "Smart Array P840", &SA5_access},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500186 {0x21CC103C, "Smart Array", &SA5_access},
187 {0x21CD103C, "Smart Array", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600188 {0x21CE103C, "Smart HBA", &SA5_access},
Don Bracefdfa4b62015-04-23 09:35:27 -0500189 {0x05809005, "SmartHBA-SA", &SA5_access},
Stephen M. Cameron8e616a52014-02-18 13:58:02 -0600190 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
191 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
192 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
193 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
194 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800195 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
196};
197
Webb Scalesa58e7e52015-04-23 09:34:16 -0500198#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
199static const struct scsi_cmnd hpsa_cmd_busy;
200#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
201static const struct scsi_cmnd hpsa_cmd_idle;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800202static int number_of_controllers;
203
Stephen M. Cameron10f66012010-06-16 13:51:50 -0500204static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
205static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
Don Brace42a91642014-11-14 17:26:27 -0600206static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800207
208#ifdef CONFIG_COMPAT
Don Brace42a91642014-11-14 17:26:27 -0600209static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
210 void __user *arg);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800211#endif
212
213static void cmd_free(struct ctlr_info *h, struct CommandList *c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800214static struct CommandList *cmd_alloc(struct ctlr_info *h);
Webb Scales73153fe2015-04-23 09:35:04 -0500215static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
216static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
217 struct scsi_cmnd *scmd);
Stephen M. Camerona2dac132013-02-20 11:24:41 -0600218static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -0600219 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800220 int cmd_type);
Robert Elliott2c143342015-01-23 16:42:48 -0600221static void hpsa_free_cmd_pool(struct ctlr_info *h);
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -0600222#define VPD_PAGE (1 << 8)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800223
Jeff Garzikf2812332010-11-16 02:10:29 -0500224static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
Stephen M. Camerona08a8472010-02-04 08:43:16 -0600225static void hpsa_scan_start(struct Scsi_Host *);
226static int hpsa_scan_finished(struct Scsi_Host *sh,
227 unsigned long elapsed_time);
Don Brace7c0a0222015-01-23 16:41:30 -0600228static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800229
230static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500231static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800232static int hpsa_slave_alloc(struct scsi_device *sdev);
Stephen Cameron41ce4c32015-04-23 09:31:47 -0500233static int hpsa_slave_configure(struct scsi_device *sdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800234static void hpsa_slave_destroy(struct scsi_device *sdev);
235
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800236static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800237static int check_for_unit_attention(struct ctlr_info *h,
238 struct CommandList *c);
239static void check_ioctl_unit_attention(struct ctlr_info *h,
240 struct CommandList *c);
Don Brace303932f2010-02-04 08:42:40 -0600241/* performant mode helper functions */
242static void calc_bucket_map(int *bucket, int num_buckets,
Don Brace2b08b3e2015-01-23 16:41:09 -0600243 int nsgs, int min_blocks, u32 *bucket_map);
Robert Elliott105a3db2015-04-23 09:33:48 -0500244static void hpsa_free_performant_mode(struct ctlr_info *h);
245static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
Matt Gates254f7962012-05-01 11:43:06 -0500246static inline u32 next_command(struct ctlr_info *h, u8 q);
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800247static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
248 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
249 u64 *cfg_offset);
250static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
251 unsigned long *memory_bar);
252static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
253static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
254 int wait_for_ready);
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500255static inline void finish_cmd(struct CommandList *c);
Robert Elliottc706a792015-01-23 16:45:01 -0600256static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -0600257#define BOARD_NOT_READY 0
258#define BOARD_READY 1
Stephen M. Cameron23100dd2014-02-18 13:57:37 -0600259static void hpsa_drain_accel_commands(struct ctlr_info *h);
Stephen M. Cameron76438d02014-02-18 13:55:43 -0600260static void hpsa_flush_cache(struct ctlr_info *h);
Scott Teelc3497752014-02-18 13:56:34 -0600261static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
262 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -0600263 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
Don Brace080ef1c2015-01-23 16:43:25 -0600264static void hpsa_command_resubmit_worker(struct work_struct *work);
Webb Scales25163bd2015-04-23 09:32:00 -0500265static u32 lockup_detected(struct ctlr_info *h);
266static int detect_controller_lockup(struct ctlr_info *h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800267
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800268static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
269{
270 unsigned long *priv = shost_priv(sdev->host);
271 return (struct ctlr_info *) *priv;
272}
273
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600274static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
275{
276 unsigned long *priv = shost_priv(sh);
277 return (struct ctlr_info *) *priv;
278}
279
Webb Scalesa58e7e52015-04-23 09:34:16 -0500280static inline bool hpsa_is_cmd_idle(struct CommandList *c)
281{
282 return c->scsi_cmd == SCSI_CMD_IDLE;
283}
284
Webb Scalesd604f532015-04-23 09:35:22 -0500285static inline bool hpsa_is_pending_event(struct CommandList *c)
286{
287 return c->abort_pending || c->reset_pending;
288}
289
Stephen Cameron9437ac42015-04-23 09:32:16 -0500290/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
291static void decode_sense_data(const u8 *sense_data, int sense_data_len,
292 u8 *sense_key, u8 *asc, u8 *ascq)
293{
294 struct scsi_sense_hdr sshdr;
295 bool rc;
296
297 *sense_key = -1;
298 *asc = -1;
299 *ascq = -1;
300
301 if (sense_data_len < 1)
302 return;
303
304 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
305 if (rc) {
306 *sense_key = sshdr.sense_key;
307 *asc = sshdr.asc;
308 *ascq = sshdr.ascq;
309 }
310}
311
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800312static int check_for_unit_attention(struct ctlr_info *h,
313 struct CommandList *c)
314{
Stephen Cameron9437ac42015-04-23 09:32:16 -0500315 u8 sense_key, asc, ascq;
316 int sense_len;
317
318 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
319 sense_len = sizeof(c->err_info->SenseInfo);
320 else
321 sense_len = c->err_info->SenseLen;
322
323 decode_sense_data(c->err_info->SenseInfo, sense_len,
324 &sense_key, &asc, &ascq);
Don Brace81c27552015-07-18 11:12:28 -0500325 if (sense_key != UNIT_ATTENTION || asc == 0xff)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800326 return 0;
327
Stephen Cameron9437ac42015-04-23 09:32:16 -0500328 switch (asc) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800329 case STATE_CHANGED:
Stephen Cameron9437ac42015-04-23 09:32:16 -0500330 dev_warn(&h->pdev->dev,
Robert Elliott2946e822015-04-23 09:35:09 -0500331 "%s: a state change detected, command retried\n",
332 h->devname);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800333 break;
334 case LUN_FAILED:
Stephen M. Cameron7f736952014-11-14 17:26:48 -0600335 dev_warn(&h->pdev->dev,
Robert Elliott2946e822015-04-23 09:35:09 -0500336 "%s: LUN failure detected\n", h->devname);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800337 break;
338 case REPORT_LUNS_CHANGED:
Stephen M. Cameron7f736952014-11-14 17:26:48 -0600339 dev_warn(&h->pdev->dev,
Robert Elliott2946e822015-04-23 09:35:09 -0500340 "%s: report LUN data changed\n", h->devname);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800341 /*
Scott Teel4f4eb9f2012-01-19 14:01:25 -0600342 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
343 * target (array) devices.
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800344 */
345 break;
346 case POWER_OR_RESET:
Robert Elliott2946e822015-04-23 09:35:09 -0500347 dev_warn(&h->pdev->dev,
348 "%s: a power on or device reset detected\n",
349 h->devname);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800350 break;
351 case UNIT_ATTENTION_CLEARED:
Robert Elliott2946e822015-04-23 09:35:09 -0500352 dev_warn(&h->pdev->dev,
353 "%s: unit attention cleared by another initiator\n",
354 h->devname);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800355 break;
356 default:
Robert Elliott2946e822015-04-23 09:35:09 -0500357 dev_warn(&h->pdev->dev,
358 "%s: unknown unit attention detected\n",
359 h->devname);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800360 break;
361 }
362 return 1;
363}
364
Matt Bondurant852af202012-05-01 11:42:35 -0500365static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
366{
367 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
368 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
369 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
370 return 0;
371 dev_warn(&h->pdev->dev, HPSA "device busy");
372 return 1;
373}
374
Stephen Camerone985c582015-04-23 09:32:22 -0500375static u32 lockup_detected(struct ctlr_info *h);
376static ssize_t host_show_lockup_detected(struct device *dev,
377 struct device_attribute *attr, char *buf)
378{
379 int ld;
380 struct ctlr_info *h;
381 struct Scsi_Host *shost = class_to_shost(dev);
382
383 h = shost_to_hba(shost);
384 ld = lockup_detected(h);
385
386 return sprintf(buf, "ld=%d\n", ld);
387}
388
Scott Teelda0697b2014-02-18 13:57:00 -0600389static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
390 struct device_attribute *attr,
391 const char *buf, size_t count)
392{
393 int status, len;
394 struct ctlr_info *h;
395 struct Scsi_Host *shost = class_to_shost(dev);
396 char tmpbuf[10];
397
398 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
399 return -EACCES;
400 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
401 strncpy(tmpbuf, buf, len);
402 tmpbuf[len] = '\0';
403 if (sscanf(tmpbuf, "%d", &status) != 1)
404 return -EINVAL;
405 h = shost_to_hba(shost);
406 h->acciopath_status = !!status;
407 dev_warn(&h->pdev->dev,
408 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
409 h->acciopath_status ? "enabled" : "disabled");
410 return count;
411}
412
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600413static ssize_t host_store_raid_offload_debug(struct device *dev,
414 struct device_attribute *attr,
415 const char *buf, size_t count)
416{
417 int debug_level, len;
418 struct ctlr_info *h;
419 struct Scsi_Host *shost = class_to_shost(dev);
420 char tmpbuf[10];
421
422 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
423 return -EACCES;
424 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
425 strncpy(tmpbuf, buf, len);
426 tmpbuf[len] = '\0';
427 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
428 return -EINVAL;
429 if (debug_level < 0)
430 debug_level = 0;
431 h = shost_to_hba(shost);
432 h->raid_offload_debug = debug_level;
433 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
434 h->raid_offload_debug);
435 return count;
436}
437
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800438static ssize_t host_store_rescan(struct device *dev,
439 struct device_attribute *attr,
440 const char *buf, size_t count)
441{
442 struct ctlr_info *h;
443 struct Scsi_Host *shost = class_to_shost(dev);
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600444 h = shost_to_hba(shost);
Mike Miller31468402010-02-25 14:03:12 -0600445 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800446 return count;
447}
448
Stephen M. Camerond28ce022010-05-27 15:14:34 -0500449static ssize_t host_show_firmware_revision(struct device *dev,
450 struct device_attribute *attr, char *buf)
451{
452 struct ctlr_info *h;
453 struct Scsi_Host *shost = class_to_shost(dev);
454 unsigned char *fwrev;
455
456 h = shost_to_hba(shost);
457 if (!h->hba_inquiry_data)
458 return 0;
459 fwrev = &h->hba_inquiry_data[32];
460 return snprintf(buf, 20, "%c%c%c%c\n",
461 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
462}
463
Stephen M. Cameron94a13642011-01-06 14:48:39 -0600464static ssize_t host_show_commands_outstanding(struct device *dev,
465 struct device_attribute *attr, char *buf)
466{
467 struct Scsi_Host *shost = class_to_shost(dev);
468 struct ctlr_info *h = shost_to_hba(shost);
469
Stephen M. Cameron0cbf7682014-11-14 17:27:09 -0600470 return snprintf(buf, 20, "%d\n",
471 atomic_read(&h->commands_outstanding));
Stephen M. Cameron94a13642011-01-06 14:48:39 -0600472}
473
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600474static ssize_t host_show_transport_mode(struct device *dev,
475 struct device_attribute *attr, char *buf)
476{
477 struct ctlr_info *h;
478 struct Scsi_Host *shost = class_to_shost(dev);
479
480 h = shost_to_hba(shost);
481 return snprintf(buf, 20, "%s\n",
Stephen M. Cameron960a30e72011-02-15 15:33:03 -0600482 h->transMethod & CFGTBL_Trans_Performant ?
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600483 "performant" : "simple");
484}
485
Scott Teelda0697b2014-02-18 13:57:00 -0600486static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
487 struct device_attribute *attr, char *buf)
488{
489 struct ctlr_info *h;
490 struct Scsi_Host *shost = class_to_shost(dev);
491
492 h = shost_to_hba(shost);
493 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
494 (h->acciopath_status == 1) ? "enabled" : "disabled");
495}
496
Stephen M. Cameron46380782011-05-03 15:00:01 -0500497/* List of controllers which cannot be hard reset on kexec with reset_devices */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600498static u32 unresettable_controller[] = {
499 0x324a103C, /* Smart Array P712m */
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500500 0x324b103C, /* Smart Array P711m */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600501 0x3223103C, /* Smart Array P800 */
502 0x3234103C, /* Smart Array P400 */
503 0x3235103C, /* Smart Array P400i */
504 0x3211103C, /* Smart Array E200i */
505 0x3212103C, /* Smart Array E200 */
506 0x3213103C, /* Smart Array E200i */
507 0x3214103C, /* Smart Array E200i */
508 0x3215103C, /* Smart Array E200i */
509 0x3237103C, /* Smart Array E500 */
510 0x323D103C, /* Smart Array P700m */
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100511 0x40800E11, /* Smart Array 5i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600512 0x409C0E11, /* Smart Array 6400 */
513 0x409D0E11, /* Smart Array 6400 EM */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100514 0x40700E11, /* Smart Array 5300 */
515 0x40820E11, /* Smart Array 532 */
516 0x40830E11, /* Smart Array 5312 */
517 0x409A0E11, /* Smart Array 641 */
518 0x409B0E11, /* Smart Array 642 */
519 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600520};
521
Stephen M. Cameron46380782011-05-03 15:00:01 -0500522/* List of controllers which cannot even be soft reset */
523static u32 soft_unresettable_controller[] = {
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100524 0x40800E11, /* Smart Array 5i */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100525 0x40700E11, /* Smart Array 5300 */
526 0x40820E11, /* Smart Array 532 */
527 0x40830E11, /* Smart Array 5312 */
528 0x409A0E11, /* Smart Array 641 */
529 0x409B0E11, /* Smart Array 642 */
530 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron46380782011-05-03 15:00:01 -0500531 /* Exclude 640x boards. These are two pci devices in one slot
532 * which share a battery backed cache module. One controls the
533 * cache, the other accesses the cache through the one that controls
534 * it. If we reset the one controlling the cache, the other will
535 * likely not be happy. Just forbid resetting this conjoined mess.
536 * The 640x isn't really supported by hpsa anyway.
537 */
538 0x409C0E11, /* Smart Array 6400 */
539 0x409D0E11, /* Smart Array 6400 EM */
540};
541
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500542static u32 needs_abort_tags_swizzled[] = {
543 0x323D103C, /* Smart Array P700m */
544 0x324a103C, /* Smart Array P712m */
545 0x324b103C, /* SmartArray P711m */
546};
547
548static int board_id_in_array(u32 a[], int nelems, u32 board_id)
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600549{
550 int i;
551
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500552 for (i = 0; i < nelems; i++)
553 if (a[i] == board_id)
554 return 1;
555 return 0;
556}
557
558static int ctlr_is_hard_resettable(u32 board_id)
559{
560 return !board_id_in_array(unresettable_controller,
561 ARRAY_SIZE(unresettable_controller), board_id);
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600562}
563
Stephen M. Cameron46380782011-05-03 15:00:01 -0500564static int ctlr_is_soft_resettable(u32 board_id)
565{
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500566 return !board_id_in_array(soft_unresettable_controller,
567 ARRAY_SIZE(soft_unresettable_controller), board_id);
Stephen M. Cameron46380782011-05-03 15:00:01 -0500568}
569
570static int ctlr_is_resettable(u32 board_id)
571{
572 return ctlr_is_hard_resettable(board_id) ||
573 ctlr_is_soft_resettable(board_id);
574}
575
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500576static int ctlr_needs_abort_tags_swizzled(u32 board_id)
577{
578 return board_id_in_array(needs_abort_tags_swizzled,
579 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
580}
581
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600582static ssize_t host_show_resettable(struct device *dev,
583 struct device_attribute *attr, char *buf)
584{
585 struct ctlr_info *h;
586 struct Scsi_Host *shost = class_to_shost(dev);
587
588 h = shost_to_hba(shost);
Stephen M. Cameron46380782011-05-03 15:00:01 -0500589 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600590}
591
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800592static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
593{
594 return (scsi3addr[3] & 0xC0) == 0x40;
595}
596
Robert Elliottf2ef0ce2015-01-23 16:41:35 -0600597static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
598 "1(+0)ADM", "UNKNOWN"
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800599};
Scott Teel6b80b182014-02-18 13:56:55 -0600600#define HPSA_RAID_0 0
601#define HPSA_RAID_4 1
602#define HPSA_RAID_1 2 /* also used for RAID 10 */
603#define HPSA_RAID_5 3 /* also used for RAID 50 */
604#define HPSA_RAID_51 4
605#define HPSA_RAID_6 5 /* also used for RAID 60 */
606#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800607#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
608
609static ssize_t raid_level_show(struct device *dev,
610 struct device_attribute *attr, char *buf)
611{
612 ssize_t l = 0;
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600613 unsigned char rlevel;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800614 struct ctlr_info *h;
615 struct scsi_device *sdev;
616 struct hpsa_scsi_dev_t *hdev;
617 unsigned long flags;
618
619 sdev = to_scsi_device(dev);
620 h = sdev_to_hba(sdev);
621 spin_lock_irqsave(&h->lock, flags);
622 hdev = sdev->hostdata;
623 if (!hdev) {
624 spin_unlock_irqrestore(&h->lock, flags);
625 return -ENODEV;
626 }
627
628 /* Is this even a logical drive? */
629 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
630 spin_unlock_irqrestore(&h->lock, flags);
631 l = snprintf(buf, PAGE_SIZE, "N/A\n");
632 return l;
633 }
634
635 rlevel = hdev->raid_level;
636 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600637 if (rlevel > RAID_UNKNOWN)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800638 rlevel = RAID_UNKNOWN;
639 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
640 return l;
641}
642
643static ssize_t lunid_show(struct device *dev,
644 struct device_attribute *attr, char *buf)
645{
646 struct ctlr_info *h;
647 struct scsi_device *sdev;
648 struct hpsa_scsi_dev_t *hdev;
649 unsigned long flags;
650 unsigned char lunid[8];
651
652 sdev = to_scsi_device(dev);
653 h = sdev_to_hba(sdev);
654 spin_lock_irqsave(&h->lock, flags);
655 hdev = sdev->hostdata;
656 if (!hdev) {
657 spin_unlock_irqrestore(&h->lock, flags);
658 return -ENODEV;
659 }
660 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
661 spin_unlock_irqrestore(&h->lock, flags);
662 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
663 lunid[0], lunid[1], lunid[2], lunid[3],
664 lunid[4], lunid[5], lunid[6], lunid[7]);
665}
666
667static ssize_t unique_id_show(struct device *dev,
668 struct device_attribute *attr, char *buf)
669{
670 struct ctlr_info *h;
671 struct scsi_device *sdev;
672 struct hpsa_scsi_dev_t *hdev;
673 unsigned long flags;
674 unsigned char sn[16];
675
676 sdev = to_scsi_device(dev);
677 h = sdev_to_hba(sdev);
678 spin_lock_irqsave(&h->lock, flags);
679 hdev = sdev->hostdata;
680 if (!hdev) {
681 spin_unlock_irqrestore(&h->lock, flags);
682 return -ENODEV;
683 }
684 memcpy(sn, hdev->device_id, sizeof(sn));
685 spin_unlock_irqrestore(&h->lock, flags);
686 return snprintf(buf, 16 * 2 + 2,
687 "%02X%02X%02X%02X%02X%02X%02X%02X"
688 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
689 sn[0], sn[1], sn[2], sn[3],
690 sn[4], sn[5], sn[6], sn[7],
691 sn[8], sn[9], sn[10], sn[11],
692 sn[12], sn[13], sn[14], sn[15]);
693}
694
Scott Teelc1988682014-02-18 13:55:54 -0600695static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
696 struct device_attribute *attr, char *buf)
697{
698 struct ctlr_info *h;
699 struct scsi_device *sdev;
700 struct hpsa_scsi_dev_t *hdev;
701 unsigned long flags;
702 int offload_enabled;
703
704 sdev = to_scsi_device(dev);
705 h = sdev_to_hba(sdev);
706 spin_lock_irqsave(&h->lock, flags);
707 hdev = sdev->hostdata;
708 if (!hdev) {
709 spin_unlock_irqrestore(&h->lock, flags);
710 return -ENODEV;
711 }
712 offload_enabled = hdev->offload_enabled;
713 spin_unlock_irqrestore(&h->lock, flags);
714 return snprintf(buf, 20, "%d\n", offload_enabled);
715}
716
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600717static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
718static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
719static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
720static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
Scott Teelc1988682014-02-18 13:55:54 -0600721static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
722 host_show_hp_ssd_smart_path_enabled, NULL);
Scott Teelda0697b2014-02-18 13:57:00 -0600723static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
724 host_show_hp_ssd_smart_path_status,
725 host_store_hp_ssd_smart_path_status);
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600726static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
727 host_store_raid_offload_debug);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600728static DEVICE_ATTR(firmware_revision, S_IRUGO,
729 host_show_firmware_revision, NULL);
730static DEVICE_ATTR(commands_outstanding, S_IRUGO,
731 host_show_commands_outstanding, NULL);
732static DEVICE_ATTR(transport_mode, S_IRUGO,
733 host_show_transport_mode, NULL);
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600734static DEVICE_ATTR(resettable, S_IRUGO,
735 host_show_resettable, NULL);
Stephen Camerone985c582015-04-23 09:32:22 -0500736static DEVICE_ATTR(lockup_detected, S_IRUGO,
737 host_show_lockup_detected, NULL);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600738
739static struct device_attribute *hpsa_sdev_attrs[] = {
740 &dev_attr_raid_level,
741 &dev_attr_lunid,
742 &dev_attr_unique_id,
Scott Teelc1988682014-02-18 13:55:54 -0600743 &dev_attr_hp_ssd_smart_path_enabled,
Stephen Camerone985c582015-04-23 09:32:22 -0500744 &dev_attr_lockup_detected,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600745 NULL,
746};
747
748static struct device_attribute *hpsa_shost_attrs[] = {
749 &dev_attr_rescan,
750 &dev_attr_firmware_revision,
751 &dev_attr_commands_outstanding,
752 &dev_attr_transport_mode,
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600753 &dev_attr_resettable,
Scott Teelda0697b2014-02-18 13:57:00 -0600754 &dev_attr_hp_ssd_smart_path_status,
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600755 &dev_attr_raid_offload_debug,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600756 NULL,
757};
758
Stephen Cameron41ce4c32015-04-23 09:31:47 -0500759#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
760 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
761
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600762static struct scsi_host_template hpsa_driver_template = {
763 .module = THIS_MODULE,
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600764 .name = HPSA,
765 .proc_name = HPSA,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600766 .queuecommand = hpsa_scsi_queue_command,
767 .scan_start = hpsa_scan_start,
768 .scan_finished = hpsa_scan_finished,
Don Brace7c0a0222015-01-23 16:41:30 -0600769 .change_queue_depth = hpsa_change_queue_depth,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600770 .this_id = -1,
771 .use_clustering = ENABLE_CLUSTERING,
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500772 .eh_abort_handler = hpsa_eh_abort_handler,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600773 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
774 .ioctl = hpsa_ioctl,
775 .slave_alloc = hpsa_slave_alloc,
Stephen Cameron41ce4c32015-04-23 09:31:47 -0500776 .slave_configure = hpsa_slave_configure,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600777 .slave_destroy = hpsa_slave_destroy,
778#ifdef CONFIG_COMPAT
779 .compat_ioctl = hpsa_compat_ioctl,
780#endif
781 .sdev_attrs = hpsa_sdev_attrs,
782 .shost_attrs = hpsa_shost_attrs,
Stephen M. Cameronc0d6a4d2011-10-26 16:20:53 -0500783 .max_sectors = 8192,
Martin K. Petersen54b2b502013-10-23 06:25:40 -0400784 .no_write_same = 1,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600785};
786
Matt Gates254f7962012-05-01 11:43:06 -0500787static inline u32 next_command(struct ctlr_info *h, u8 q)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600788{
789 u32 a;
Stephen M. Cameron072b0512014-05-29 10:53:07 -0500790 struct reply_queue_buffer *rq = &h->reply_queue[q];
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600791
Matt Gatese1f7de02014-02-18 13:55:17 -0600792 if (h->transMethod & CFGTBL_Trans_io_accel1)
793 return h->access.command_completed(h, q);
794
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600795 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
Matt Gates254f7962012-05-01 11:43:06 -0500796 return h->access.command_completed(h, q);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600797
Matt Gates254f7962012-05-01 11:43:06 -0500798 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
799 a = rq->head[rq->current_entry];
800 rq->current_entry++;
Stephen M. Cameron0cbf7682014-11-14 17:27:09 -0600801 atomic_dec(&h->commands_outstanding);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600802 } else {
803 a = FIFO_EMPTY;
804 }
805 /* Check for wraparound */
Matt Gates254f7962012-05-01 11:43:06 -0500806 if (rq->current_entry == h->max_commands) {
807 rq->current_entry = 0;
808 rq->wraparound ^= 1;
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600809 }
810 return a;
811}
812
Scott Teelc3497752014-02-18 13:56:34 -0600813/*
814 * There are some special bits in the bus address of the
815 * command that we have to set for the controller to know
816 * how to process the command:
817 *
818 * Normal performant mode:
819 * bit 0: 1 means performant mode, 0 means simple mode.
820 * bits 1-3 = block fetch table entry
821 * bits 4-6 = command type (== 0)
822 *
823 * ioaccel1 mode:
824 * bit 0 = "performant mode" bit.
825 * bits 1-3 = block fetch table entry
826 * bits 4-6 = command type (== 110)
827 * (command type is needed because ioaccel1 mode
828 * commands are submitted through the same register as normal
829 * mode commands, so this is how the controller knows whether
830 * the command is normal mode or ioaccel1 mode.)
831 *
832 * ioaccel2 mode:
833 * bit 0 = "performant mode" bit.
834 * bits 1-4 = block fetch table entry (note extra bit)
835 * bits 4-6 = not needed, because ioaccel2 mode has
836 * a separate special register for submitting commands.
837 */
838
Webb Scales25163bd2015-04-23 09:32:00 -0500839/*
840 * set_performant_mode: Modify the tag for cciss performant
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600841 * set bit 0 for pull model, bits 3-1 for block fetch
842 * register number
843 */
Webb Scales25163bd2015-04-23 09:32:00 -0500844#define DEFAULT_REPLY_QUEUE (-1)
845static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
846 int reply_queue)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600847{
Matt Gates254f7962012-05-01 11:43:06 -0500848 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600849 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
Webb Scales25163bd2015-04-23 09:32:00 -0500850 if (unlikely(!h->msix_vector))
851 return;
852 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
Matt Gates254f7962012-05-01 11:43:06 -0500853 c->Header.ReplyQueue =
John Kacur804a5cb2013-07-26 16:06:18 +0200854 raw_smp_processor_id() % h->nreply_queues;
Webb Scales25163bd2015-04-23 09:32:00 -0500855 else
856 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
Matt Gates254f7962012-05-01 11:43:06 -0500857 }
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600858}
859
Scott Teelc3497752014-02-18 13:56:34 -0600860static void set_ioaccel1_performant_mode(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -0500861 struct CommandList *c,
862 int reply_queue)
Scott Teelc3497752014-02-18 13:56:34 -0600863{
864 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
865
Webb Scales25163bd2015-04-23 09:32:00 -0500866 /*
867 * Tell the controller to post the reply to the queue for this
Scott Teelc3497752014-02-18 13:56:34 -0600868 * processor. This seems to give the best I/O throughput.
869 */
Webb Scales25163bd2015-04-23 09:32:00 -0500870 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
871 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
872 else
873 cp->ReplyQueue = reply_queue % h->nreply_queues;
874 /*
875 * Set the bits in the address sent down to include:
Scott Teelc3497752014-02-18 13:56:34 -0600876 * - performant mode bit (bit 0)
877 * - pull count (bits 1-3)
878 * - command type (bits 4-6)
879 */
880 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
881 IOACCEL1_BUSADDR_CMDTYPE;
882}
883
Stephen Cameron8be986c2015-04-23 09:34:06 -0500884static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
885 struct CommandList *c,
886 int reply_queue)
887{
888 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
889 &h->ioaccel2_cmd_pool[c->cmdindex];
890
891 /* Tell the controller to post the reply to the queue for this
892 * processor. This seems to give the best I/O throughput.
893 */
894 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
895 cp->reply_queue = smp_processor_id() % h->nreply_queues;
896 else
897 cp->reply_queue = reply_queue % h->nreply_queues;
898 /* Set the bits in the address sent down to include:
899 * - performant mode bit not used in ioaccel mode 2
900 * - pull count (bits 0-3)
901 * - command type isn't needed for ioaccel2
902 */
903 c->busaddr |= h->ioaccel2_blockFetchTable[0];
904}
905
Scott Teelc3497752014-02-18 13:56:34 -0600906static void set_ioaccel2_performant_mode(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -0500907 struct CommandList *c,
908 int reply_queue)
Scott Teelc3497752014-02-18 13:56:34 -0600909{
910 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
911
Webb Scales25163bd2015-04-23 09:32:00 -0500912 /*
913 * Tell the controller to post the reply to the queue for this
Scott Teelc3497752014-02-18 13:56:34 -0600914 * processor. This seems to give the best I/O throughput.
915 */
Webb Scales25163bd2015-04-23 09:32:00 -0500916 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
917 cp->reply_queue = smp_processor_id() % h->nreply_queues;
918 else
919 cp->reply_queue = reply_queue % h->nreply_queues;
920 /*
921 * Set the bits in the address sent down to include:
Scott Teelc3497752014-02-18 13:56:34 -0600922 * - performant mode bit not used in ioaccel mode 2
923 * - pull count (bits 0-3)
924 * - command type isn't needed for ioaccel2
925 */
926 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
927}
928
Stephen M. Camerone85c5972012-05-01 11:43:42 -0500929static int is_firmware_flash_cmd(u8 *cdb)
930{
931 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
932}
933
934/*
935 * During firmware flash, the heartbeat register may not update as frequently
936 * as it should. So we dial down lockup detection during firmware flash. and
937 * dial it back up when firmware flash completes.
938 */
939#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
940#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
941static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
942 struct CommandList *c)
943{
944 if (!is_firmware_flash_cmd(c->Request.CDB))
945 return;
946 atomic_inc(&h->firmware_flash_in_progress);
947 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
948}
949
950static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
951 struct CommandList *c)
952{
953 if (is_firmware_flash_cmd(c->Request.CDB) &&
954 atomic_dec_and_test(&h->firmware_flash_in_progress))
955 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
956}
957
Webb Scales25163bd2015-04-23 09:32:00 -0500958static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
959 struct CommandList *c, int reply_queue)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600960{
Stephen Cameronc05e8862015-01-23 16:44:40 -0600961 dial_down_lockup_detection_during_fw_flash(h, c);
962 atomic_inc(&h->commands_outstanding);
Scott Teelc3497752014-02-18 13:56:34 -0600963 switch (c->cmd_type) {
964 case CMD_IOACCEL1:
Webb Scales25163bd2015-04-23 09:32:00 -0500965 set_ioaccel1_performant_mode(h, c, reply_queue);
Stephen Cameronc05e8862015-01-23 16:44:40 -0600966 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
Scott Teelc3497752014-02-18 13:56:34 -0600967 break;
968 case CMD_IOACCEL2:
Webb Scales25163bd2015-04-23 09:32:00 -0500969 set_ioaccel2_performant_mode(h, c, reply_queue);
Stephen Cameronc05e8862015-01-23 16:44:40 -0600970 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
Scott Teelc3497752014-02-18 13:56:34 -0600971 break;
Stephen Cameron8be986c2015-04-23 09:34:06 -0500972 case IOACCEL2_TMF:
973 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
974 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
975 break;
Scott Teelc3497752014-02-18 13:56:34 -0600976 default:
Webb Scales25163bd2015-04-23 09:32:00 -0500977 set_performant_mode(h, c, reply_queue);
Stephen Cameronc05e8862015-01-23 16:44:40 -0600978 h->access.submit_command(h, c);
Scott Teelc3497752014-02-18 13:56:34 -0600979 }
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600980}
981
Webb Scalesa58e7e52015-04-23 09:34:16 -0500982static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
Webb Scales25163bd2015-04-23 09:32:00 -0500983{
Webb Scalesd604f532015-04-23 09:35:22 -0500984 if (unlikely(hpsa_is_pending_event(c)))
Webb Scalesa58e7e52015-04-23 09:34:16 -0500985 return finish_cmd(c);
986
Webb Scales25163bd2015-04-23 09:32:00 -0500987 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
988}
989
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600990static inline int is_hba_lunid(unsigned char scsi3addr[])
991{
992 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
993}
994
995static inline int is_scsi_rev_5(struct ctlr_info *h)
996{
997 if (!h->hba_inquiry_data)
998 return 0;
999 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1000 return 1;
1001 return 0;
1002}
1003
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001004static int hpsa_find_target_lun(struct ctlr_info *h,
1005 unsigned char scsi3addr[], int bus, int *target, int *lun)
1006{
1007 /* finds an unused bus, target, lun for a new physical device
1008 * assumes h->devlock is held
1009 */
1010 int i, found = 0;
Scott Teelcfe5bad2011-10-26 16:21:07 -05001011 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001012
Akinobu Mita263d9402012-01-21 00:15:27 +09001013 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001014
1015 for (i = 0; i < h->ndevices; i++) {
1016 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
Akinobu Mita263d9402012-01-21 00:15:27 +09001017 __set_bit(h->dev[i]->target, lun_taken);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001018 }
1019
Akinobu Mita263d9402012-01-21 00:15:27 +09001020 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1021 if (i < HPSA_MAX_DEVICES) {
1022 /* *bus = 1; */
1023 *target = i;
1024 *lun = 0;
1025 found = 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001026 }
1027 return !found;
1028}
1029
Webb Scales0d96ef52015-04-23 09:31:55 -05001030static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1031 struct hpsa_scsi_dev_t *dev, char *description)
1032{
1033 dev_printk(level, &h->pdev->dev,
1034 "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
1035 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1036 description,
1037 scsi_device_type(dev->devtype),
1038 dev->vendor,
1039 dev->model,
1040 dev->raid_level > RAID_UNKNOWN ?
1041 "RAID-?" : raid_label[dev->raid_level],
1042 dev->offload_config ? '+' : '-',
1043 dev->offload_enabled ? '+' : '-',
1044 dev->expose_state);
1045}
1046
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001047/* Add an entry into h->dev[] array. */
1048static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
1049 struct hpsa_scsi_dev_t *device,
1050 struct hpsa_scsi_dev_t *added[], int *nadded)
1051{
1052 /* assumes h->devlock is held */
1053 int n = h->ndevices;
1054 int i;
1055 unsigned char addr1[8], addr2[8];
1056 struct hpsa_scsi_dev_t *sd;
1057
Scott Teelcfe5bad2011-10-26 16:21:07 -05001058 if (n >= HPSA_MAX_DEVICES) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001059 dev_err(&h->pdev->dev, "too many devices, some will be "
1060 "inaccessible.\n");
1061 return -1;
1062 }
1063
1064 /* physical devices do not have lun or target assigned until now. */
1065 if (device->lun != -1)
1066 /* Logical device, lun is already assigned. */
1067 goto lun_assigned;
1068
1069 /* If this device a non-zero lun of a multi-lun device
1070 * byte 4 of the 8-byte LUN addr will contain the logical
Don Brace2b08b3e2015-01-23 16:41:09 -06001071 * unit no, zero otherwise.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001072 */
1073 if (device->scsi3addr[4] == 0) {
1074 /* This is not a non-zero lun of a multi-lun device */
1075 if (hpsa_find_target_lun(h, device->scsi3addr,
1076 device->bus, &device->target, &device->lun) != 0)
1077 return -1;
1078 goto lun_assigned;
1079 }
1080
1081 /* This is a non-zero lun of a multi-lun device.
1082 * Search through our list and find the device which
1083 * has the same 8 byte LUN address, excepting byte 4.
1084 * Assign the same bus and target for this new LUN.
1085 * Use the logical unit number from the firmware.
1086 */
1087 memcpy(addr1, device->scsi3addr, 8);
1088 addr1[4] = 0;
1089 for (i = 0; i < n; i++) {
1090 sd = h->dev[i];
1091 memcpy(addr2, sd->scsi3addr, 8);
1092 addr2[4] = 0;
1093 /* differ only in byte 4? */
1094 if (memcmp(addr1, addr2, 8) == 0) {
1095 device->bus = sd->bus;
1096 device->target = sd->target;
1097 device->lun = device->scsi3addr[4];
1098 break;
1099 }
1100 }
1101 if (device->lun == -1) {
1102 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1103 " suspect firmware bug or unsupported hardware "
1104 "configuration.\n");
1105 return -1;
1106 }
1107
1108lun_assigned:
1109
1110 h->dev[n] = device;
1111 h->ndevices++;
1112 added[*nadded] = device;
1113 (*nadded)++;
Webb Scales0d96ef52015-04-23 09:31:55 -05001114 hpsa_show_dev_msg(KERN_INFO, h, device,
1115 device->expose_state & HPSA_SCSI_ADD ? "added" : "masked");
Robert Elliotta473d862015-04-23 09:32:54 -05001116 device->offload_to_be_enabled = device->offload_enabled;
1117 device->offload_enabled = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001118 return 0;
1119}
1120
Scott Teelbd9244f2012-01-19 14:01:30 -06001121/* Update an entry in h->dev[] array. */
1122static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
1123 int entry, struct hpsa_scsi_dev_t *new_entry)
1124{
Robert Elliotta473d862015-04-23 09:32:54 -05001125 int offload_enabled;
Scott Teelbd9244f2012-01-19 14:01:30 -06001126 /* assumes h->devlock is held */
1127 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1128
1129 /* Raid level changed. */
1130 h->dev[entry]->raid_level = new_entry->raid_level;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001131
Don Brace03383732015-01-23 16:43:30 -06001132 /* Raid offload parameters changed. Careful about the ordering. */
1133 if (new_entry->offload_config && new_entry->offload_enabled) {
1134 /*
1135 * if drive is newly offload_enabled, we want to copy the
1136 * raid map data first. If previously offload_enabled and
1137 * offload_config were set, raid map data had better be
1138 * the same as it was before. if raid map data is changed
1139 * then it had better be the case that
1140 * h->dev[entry]->offload_enabled is currently 0.
1141 */
1142 h->dev[entry]->raid_map = new_entry->raid_map;
1143 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
Don Brace03383732015-01-23 16:43:30 -06001144 }
Joe Handzika3144e02015-04-23 09:32:59 -05001145 if (new_entry->hba_ioaccel_enabled) {
1146 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1147 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1148 }
1149 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001150 h->dev[entry]->offload_config = new_entry->offload_config;
Stephen M. Cameron9fb0de22014-02-18 13:56:50 -06001151 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
Don Brace03383732015-01-23 16:43:30 -06001152 h->dev[entry]->queue_depth = new_entry->queue_depth;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001153
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001154 /*
1155 * We can turn off ioaccel offload now, but need to delay turning
1156 * it on until we can update h->dev[entry]->phys_disk[], but we
1157 * can't do that until all the devices are updated.
1158 */
1159 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1160 if (!new_entry->offload_enabled)
1161 h->dev[entry]->offload_enabled = 0;
1162
Robert Elliotta473d862015-04-23 09:32:54 -05001163 offload_enabled = h->dev[entry]->offload_enabled;
1164 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
Webb Scales0d96ef52015-04-23 09:31:55 -05001165 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
Robert Elliotta473d862015-04-23 09:32:54 -05001166 h->dev[entry]->offload_enabled = offload_enabled;
Scott Teelbd9244f2012-01-19 14:01:30 -06001167}
1168
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001169/* Replace an entry from h->dev[] array. */
1170static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1171 int entry, struct hpsa_scsi_dev_t *new_entry,
1172 struct hpsa_scsi_dev_t *added[], int *nadded,
1173 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1174{
1175 /* assumes h->devlock is held */
Scott Teelcfe5bad2011-10-26 16:21:07 -05001176 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001177 removed[*nremoved] = h->dev[entry];
1178 (*nremoved)++;
Stephen M. Cameron01350d02011-08-09 08:18:01 -05001179
1180 /*
1181 * New physical devices won't have target/lun assigned yet
1182 * so we need to preserve the values in the slot we are replacing.
1183 */
1184 if (new_entry->target == -1) {
1185 new_entry->target = h->dev[entry]->target;
1186 new_entry->lun = h->dev[entry]->lun;
1187 }
1188
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001189 h->dev[entry] = new_entry;
1190 added[*nadded] = new_entry;
1191 (*nadded)++;
Webb Scales0d96ef52015-04-23 09:31:55 -05001192 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
Robert Elliotta473d862015-04-23 09:32:54 -05001193 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1194 new_entry->offload_enabled = 0;
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001195}
1196
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001197/* Remove an entry from h->dev[] array. */
1198static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1199 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1200{
1201 /* assumes h->devlock is held */
1202 int i;
1203 struct hpsa_scsi_dev_t *sd;
1204
Scott Teelcfe5bad2011-10-26 16:21:07 -05001205 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001206
1207 sd = h->dev[entry];
1208 removed[*nremoved] = h->dev[entry];
1209 (*nremoved)++;
1210
1211 for (i = entry; i < h->ndevices-1; i++)
1212 h->dev[i] = h->dev[i+1];
1213 h->ndevices--;
Webb Scales0d96ef52015-04-23 09:31:55 -05001214 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001215}
1216
1217#define SCSI3ADDR_EQ(a, b) ( \
1218 (a)[7] == (b)[7] && \
1219 (a)[6] == (b)[6] && \
1220 (a)[5] == (b)[5] && \
1221 (a)[4] == (b)[4] && \
1222 (a)[3] == (b)[3] && \
1223 (a)[2] == (b)[2] && \
1224 (a)[1] == (b)[1] && \
1225 (a)[0] == (b)[0])
1226
1227static void fixup_botched_add(struct ctlr_info *h,
1228 struct hpsa_scsi_dev_t *added)
1229{
1230 /* called when scsi_add_device fails in order to re-adjust
1231 * h->dev[] to match the mid layer's view.
1232 */
1233 unsigned long flags;
1234 int i, j;
1235
1236 spin_lock_irqsave(&h->lock, flags);
1237 for (i = 0; i < h->ndevices; i++) {
1238 if (h->dev[i] == added) {
1239 for (j = i; j < h->ndevices-1; j++)
1240 h->dev[j] = h->dev[j+1];
1241 h->ndevices--;
1242 break;
1243 }
1244 }
1245 spin_unlock_irqrestore(&h->lock, flags);
1246 kfree(added);
1247}
1248
1249static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1250 struct hpsa_scsi_dev_t *dev2)
1251{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001252 /* we compare everything except lun and target as these
1253 * are not yet assigned. Compare parts likely
1254 * to differ first
1255 */
1256 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1257 sizeof(dev1->scsi3addr)) != 0)
1258 return 0;
1259 if (memcmp(dev1->device_id, dev2->device_id,
1260 sizeof(dev1->device_id)) != 0)
1261 return 0;
1262 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1263 return 0;
1264 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1265 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001266 if (dev1->devtype != dev2->devtype)
1267 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001268 if (dev1->bus != dev2->bus)
1269 return 0;
1270 return 1;
1271}
1272
Scott Teelbd9244f2012-01-19 14:01:30 -06001273static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1274 struct hpsa_scsi_dev_t *dev2)
1275{
1276 /* Device attributes that can change, but don't mean
1277 * that the device is a different device, nor that the OS
1278 * needs to be told anything about the change.
1279 */
1280 if (dev1->raid_level != dev2->raid_level)
1281 return 1;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001282 if (dev1->offload_config != dev2->offload_config)
1283 return 1;
1284 if (dev1->offload_enabled != dev2->offload_enabled)
1285 return 1;
Don Brace03383732015-01-23 16:43:30 -06001286 if (dev1->queue_depth != dev2->queue_depth)
1287 return 1;
Scott Teelbd9244f2012-01-19 14:01:30 -06001288 return 0;
1289}
1290
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001291/* Find needle in haystack. If exact match found, return DEVICE_SAME,
1292 * and return needle location in *index. If scsi3addr matches, but not
1293 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
Scott Teelbd9244f2012-01-19 14:01:30 -06001294 * location in *index.
1295 * In the case of a minor device attribute change, such as RAID level, just
1296 * return DEVICE_UPDATED, along with the updated device's location in index.
1297 * If needle not found, return DEVICE_NOT_FOUND.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001298 */
1299static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1300 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1301 int *index)
1302{
1303 int i;
1304#define DEVICE_NOT_FOUND 0
1305#define DEVICE_CHANGED 1
1306#define DEVICE_SAME 2
Scott Teelbd9244f2012-01-19 14:01:30 -06001307#define DEVICE_UPDATED 3
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001308 for (i = 0; i < haystack_size; i++) {
Stephen M. Cameron23231042010-02-04 08:43:36 -06001309 if (haystack[i] == NULL) /* previously removed. */
1310 continue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001311 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1312 *index = i;
Scott Teelbd9244f2012-01-19 14:01:30 -06001313 if (device_is_the_same(needle, haystack[i])) {
1314 if (device_updated(needle, haystack[i]))
1315 return DEVICE_UPDATED;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001316 return DEVICE_SAME;
Scott Teelbd9244f2012-01-19 14:01:30 -06001317 } else {
Stephen M. Cameron98465902014-02-21 16:25:00 -06001318 /* Keep offline devices offline */
1319 if (needle->volume_offline)
1320 return DEVICE_NOT_FOUND;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001321 return DEVICE_CHANGED;
Scott Teelbd9244f2012-01-19 14:01:30 -06001322 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001323 }
1324 }
1325 *index = -1;
1326 return DEVICE_NOT_FOUND;
1327}
1328
Stephen M. Cameron98465902014-02-21 16:25:00 -06001329static void hpsa_monitor_offline_device(struct ctlr_info *h,
1330 unsigned char scsi3addr[])
1331{
1332 struct offline_device_entry *device;
1333 unsigned long flags;
1334
1335 /* Check to see if device is already on the list */
1336 spin_lock_irqsave(&h->offline_device_lock, flags);
1337 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1338 if (memcmp(device->scsi3addr, scsi3addr,
1339 sizeof(device->scsi3addr)) == 0) {
1340 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1341 return;
1342 }
1343 }
1344 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1345
1346 /* Device is not on the list, add it. */
1347 device = kmalloc(sizeof(*device), GFP_KERNEL);
1348 if (!device) {
1349 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1350 return;
1351 }
1352 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1353 spin_lock_irqsave(&h->offline_device_lock, flags);
1354 list_add_tail(&device->offline_list, &h->offline_device_list);
1355 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1356}
1357
1358/* Print a message explaining various offline volume states */
1359static void hpsa_show_volume_status(struct ctlr_info *h,
1360 struct hpsa_scsi_dev_t *sd)
1361{
1362 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1363 dev_info(&h->pdev->dev,
1364 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1365 h->scsi_host->host_no,
1366 sd->bus, sd->target, sd->lun);
1367 switch (sd->volume_offline) {
1368 case HPSA_LV_OK:
1369 break;
1370 case HPSA_LV_UNDERGOING_ERASE:
1371 dev_info(&h->pdev->dev,
1372 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1373 h->scsi_host->host_no,
1374 sd->bus, sd->target, sd->lun);
1375 break;
1376 case HPSA_LV_UNDERGOING_RPI:
1377 dev_info(&h->pdev->dev,
1378 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1379 h->scsi_host->host_no,
1380 sd->bus, sd->target, sd->lun);
1381 break;
1382 case HPSA_LV_PENDING_RPI:
1383 dev_info(&h->pdev->dev,
1384 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1385 h->scsi_host->host_no,
1386 sd->bus, sd->target, sd->lun);
1387 break;
1388 case HPSA_LV_ENCRYPTED_NO_KEY:
1389 dev_info(&h->pdev->dev,
1390 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1391 h->scsi_host->host_no,
1392 sd->bus, sd->target, sd->lun);
1393 break;
1394 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1395 dev_info(&h->pdev->dev,
1396 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1397 h->scsi_host->host_no,
1398 sd->bus, sd->target, sd->lun);
1399 break;
1400 case HPSA_LV_UNDERGOING_ENCRYPTION:
1401 dev_info(&h->pdev->dev,
1402 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1403 h->scsi_host->host_no,
1404 sd->bus, sd->target, sd->lun);
1405 break;
1406 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1407 dev_info(&h->pdev->dev,
1408 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1409 h->scsi_host->host_no,
1410 sd->bus, sd->target, sd->lun);
1411 break;
1412 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1413 dev_info(&h->pdev->dev,
1414 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1415 h->scsi_host->host_no,
1416 sd->bus, sd->target, sd->lun);
1417 break;
1418 case HPSA_LV_PENDING_ENCRYPTION:
1419 dev_info(&h->pdev->dev,
1420 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1421 h->scsi_host->host_no,
1422 sd->bus, sd->target, sd->lun);
1423 break;
1424 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1425 dev_info(&h->pdev->dev,
1426 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1427 h->scsi_host->host_no,
1428 sd->bus, sd->target, sd->lun);
1429 break;
1430 }
1431}
1432
Don Brace03383732015-01-23 16:43:30 -06001433/*
1434 * Figure the list of physical drive pointers for a logical drive with
1435 * raid offload configured.
1436 */
1437static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1438 struct hpsa_scsi_dev_t *dev[], int ndevices,
1439 struct hpsa_scsi_dev_t *logical_drive)
1440{
1441 struct raid_map_data *map = &logical_drive->raid_map;
1442 struct raid_map_disk_data *dd = &map->data[0];
1443 int i, j;
1444 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1445 le16_to_cpu(map->metadata_disks_per_row);
1446 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1447 le16_to_cpu(map->layout_map_count) *
1448 total_disks_per_row;
1449 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1450 total_disks_per_row;
1451 int qdepth;
1452
1453 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1454 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1455
Webb Scalesd604f532015-04-23 09:35:22 -05001456 logical_drive->nphysical_disks = nraid_map_entries;
1457
Don Brace03383732015-01-23 16:43:30 -06001458 qdepth = 0;
1459 for (i = 0; i < nraid_map_entries; i++) {
1460 logical_drive->phys_disk[i] = NULL;
1461 if (!logical_drive->offload_config)
1462 continue;
1463 for (j = 0; j < ndevices; j++) {
1464 if (dev[j]->devtype != TYPE_DISK)
1465 continue;
1466 if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1467 continue;
1468 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1469 continue;
1470
1471 logical_drive->phys_disk[i] = dev[j];
1472 if (i < nphys_disk)
1473 qdepth = min(h->nr_cmds, qdepth +
1474 logical_drive->phys_disk[i]->queue_depth);
1475 break;
1476 }
1477
1478 /*
1479 * This can happen if a physical drive is removed and
1480 * the logical drive is degraded. In that case, the RAID
1481 * map data will refer to a physical disk which isn't actually
1482 * present. And in that case offload_enabled should already
1483 * be 0, but we'll turn it off here just in case
1484 */
1485 if (!logical_drive->phys_disk[i]) {
1486 logical_drive->offload_enabled = 0;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001487 logical_drive->offload_to_be_enabled = 0;
1488 logical_drive->queue_depth = 8;
Don Brace03383732015-01-23 16:43:30 -06001489 }
1490 }
1491 if (nraid_map_entries)
1492 /*
1493 * This is correct for reads, too high for full stripe writes,
1494 * way too high for partial stripe writes
1495 */
1496 logical_drive->queue_depth = qdepth;
1497 else
1498 logical_drive->queue_depth = h->nr_cmds;
1499}
1500
1501static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1502 struct hpsa_scsi_dev_t *dev[], int ndevices)
1503{
1504 int i;
1505
1506 for (i = 0; i < ndevices; i++) {
1507 if (dev[i]->devtype != TYPE_DISK)
1508 continue;
1509 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1510 continue;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001511
1512 /*
1513 * If offload is currently enabled, the RAID map and
1514 * phys_disk[] assignment *better* not be changing
1515 * and since it isn't changing, we do not need to
1516 * update it.
1517 */
1518 if (dev[i]->offload_enabled)
1519 continue;
1520
Don Brace03383732015-01-23 16:43:30 -06001521 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1522 }
1523}
1524
Stephen M. Cameron4967bd32010-02-04 08:41:49 -06001525static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001526 struct hpsa_scsi_dev_t *sd[], int nsds)
1527{
1528 /* sd contains scsi3 addresses and devtypes, and inquiry
1529 * data. This function takes what's in sd to be the current
1530 * reality and updates h->dev[] to reflect that reality.
1531 */
1532 int i, entry, device_change, changes = 0;
1533 struct hpsa_scsi_dev_t *csd;
1534 unsigned long flags;
1535 struct hpsa_scsi_dev_t **added, **removed;
1536 int nadded, nremoved;
1537 struct Scsi_Host *sh = NULL;
1538
Scott Teelcfe5bad2011-10-26 16:21:07 -05001539 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1540 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001541
1542 if (!added || !removed) {
1543 dev_warn(&h->pdev->dev, "out of memory in "
1544 "adjust_hpsa_scsi_table\n");
1545 goto free_and_out;
1546 }
1547
1548 spin_lock_irqsave(&h->devlock, flags);
1549
1550 /* find any devices in h->dev[] that are not in
1551 * sd[] and remove them from h->dev[], and for any
1552 * devices which have changed, remove the old device
1553 * info and add the new device info.
Scott Teelbd9244f2012-01-19 14:01:30 -06001554 * If minor device attributes change, just update
1555 * the existing device structure.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001556 */
1557 i = 0;
1558 nremoved = 0;
1559 nadded = 0;
1560 while (i < h->ndevices) {
1561 csd = h->dev[i];
1562 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1563 if (device_change == DEVICE_NOT_FOUND) {
1564 changes++;
1565 hpsa_scsi_remove_entry(h, hostno, i,
1566 removed, &nremoved);
1567 continue; /* remove ^^^, hence i not incremented */
1568 } else if (device_change == DEVICE_CHANGED) {
1569 changes++;
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001570 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1571 added, &nadded, removed, &nremoved);
Stephen M. Cameronc7f172d2010-02-04 08:43:31 -06001572 /* Set it to NULL to prevent it from being freed
1573 * at the bottom of hpsa_update_scsi_devices()
1574 */
1575 sd[entry] = NULL;
Scott Teelbd9244f2012-01-19 14:01:30 -06001576 } else if (device_change == DEVICE_UPDATED) {
1577 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001578 }
1579 i++;
1580 }
1581
1582 /* Now, make sure every device listed in sd[] is also
1583 * listed in h->dev[], adding them if they aren't found
1584 */
1585
1586 for (i = 0; i < nsds; i++) {
1587 if (!sd[i]) /* if already added above. */
1588 continue;
Stephen M. Cameron98465902014-02-21 16:25:00 -06001589
1590 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1591 * as the SCSI mid-layer does not handle such devices well.
1592 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1593 * at 160Hz, and prevents the system from coming up.
1594 */
1595 if (sd[i]->volume_offline) {
1596 hpsa_show_volume_status(h, sd[i]);
Webb Scales0d96ef52015-04-23 09:31:55 -05001597 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
Stephen M. Cameron98465902014-02-21 16:25:00 -06001598 continue;
1599 }
1600
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001601 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1602 h->ndevices, &entry);
1603 if (device_change == DEVICE_NOT_FOUND) {
1604 changes++;
1605 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1606 added, &nadded) != 0)
1607 break;
1608 sd[i] = NULL; /* prevent from being freed later. */
1609 } else if (device_change == DEVICE_CHANGED) {
1610 /* should never happen... */
1611 changes++;
1612 dev_warn(&h->pdev->dev,
1613 "device unexpectedly changed.\n");
1614 /* but if it does happen, we just ignore that device */
1615 }
1616 }
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001617 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1618
1619 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1620 * any logical drives that need it enabled.
1621 */
1622 for (i = 0; i < h->ndevices; i++)
1623 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1624
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001625 spin_unlock_irqrestore(&h->devlock, flags);
1626
Stephen M. Cameron98465902014-02-21 16:25:00 -06001627 /* Monitor devices which are in one of several NOT READY states to be
1628 * brought online later. This must be done without holding h->devlock,
1629 * so don't touch h->dev[]
1630 */
1631 for (i = 0; i < nsds; i++) {
1632 if (!sd[i]) /* if already added above. */
1633 continue;
1634 if (sd[i]->volume_offline)
1635 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1636 }
1637
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001638 /* Don't notify scsi mid layer of any changes the first time through
1639 * (or if there are no changes) scsi_scan_host will do it later the
1640 * first time through.
1641 */
1642 if (hostno == -1 || !changes)
1643 goto free_and_out;
1644
1645 sh = h->scsi_host;
1646 /* Notify scsi mid layer of any removed devices */
1647 for (i = 0; i < nremoved; i++) {
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001648 if (removed[i]->expose_state & HPSA_SCSI_ADD) {
1649 struct scsi_device *sdev =
1650 scsi_device_lookup(sh, removed[i]->bus,
1651 removed[i]->target, removed[i]->lun);
1652 if (sdev != NULL) {
1653 scsi_remove_device(sdev);
1654 scsi_device_put(sdev);
1655 } else {
1656 /*
1657 * We don't expect to get here.
1658 * future cmds to this device will get selection
1659 * timeout as if the device was gone.
1660 */
Webb Scales0d96ef52015-04-23 09:31:55 -05001661 hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
1662 "didn't find device for removal.");
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001663 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001664 }
1665 kfree(removed[i]);
1666 removed[i] = NULL;
1667 }
1668
1669 /* Notify scsi mid layer of any added devices */
1670 for (i = 0; i < nadded; i++) {
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001671 if (!(added[i]->expose_state & HPSA_SCSI_ADD))
1672 continue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001673 if (scsi_add_device(sh, added[i]->bus,
1674 added[i]->target, added[i]->lun) == 0)
1675 continue;
Webb Scales0d96ef52015-04-23 09:31:55 -05001676 hpsa_show_dev_msg(KERN_WARNING, h, added[i],
1677 "addition failed, device not added.");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001678 /* now we have to remove it from h->dev,
1679 * since it didn't get added to scsi mid layer
1680 */
1681 fixup_botched_add(h, added[i]);
Robert Elliott105a3db2015-04-23 09:33:48 -05001682 added[i] = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001683 }
1684
1685free_and_out:
1686 kfree(added);
1687 kfree(removed);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001688}
1689
1690/*
Joe Perches9e03aa22013-09-03 13:45:58 -07001691 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001692 * Assume's h->devlock is held.
1693 */
1694static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1695 int bus, int target, int lun)
1696{
1697 int i;
1698 struct hpsa_scsi_dev_t *sd;
1699
1700 for (i = 0; i < h->ndevices; i++) {
1701 sd = h->dev[i];
1702 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1703 return sd;
1704 }
1705 return NULL;
1706}
1707
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001708static int hpsa_slave_alloc(struct scsi_device *sdev)
1709{
1710 struct hpsa_scsi_dev_t *sd;
1711 unsigned long flags;
1712 struct ctlr_info *h;
1713
1714 h = sdev_to_hba(sdev);
1715 spin_lock_irqsave(&h->devlock, flags);
1716 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1717 sdev_id(sdev), sdev->lun);
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001718 if (likely(sd)) {
Don Brace03383732015-01-23 16:43:30 -06001719 atomic_set(&sd->ioaccel_cmds_out, 0);
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001720 sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL;
1721 } else
1722 sdev->hostdata = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001723 spin_unlock_irqrestore(&h->devlock, flags);
1724 return 0;
1725}
1726
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001727/* configure scsi device based on internal per-device structure */
1728static int hpsa_slave_configure(struct scsi_device *sdev)
1729{
1730 struct hpsa_scsi_dev_t *sd;
1731 int queue_depth;
1732
1733 sd = sdev->hostdata;
1734 sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH);
1735
1736 if (sd)
1737 queue_depth = sd->queue_depth != 0 ?
1738 sd->queue_depth : sdev->host->can_queue;
1739 else
1740 queue_depth = sdev->host->can_queue;
1741
1742 scsi_change_queue_depth(sdev, queue_depth);
1743
1744 return 0;
1745}
1746
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001747static void hpsa_slave_destroy(struct scsi_device *sdev)
1748{
Stephen M. Cameronbcc442552010-02-04 08:41:54 -06001749 /* nothing to do. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001750}
1751
Webb Scalesd9a729f2015-04-23 09:33:27 -05001752static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1753{
1754 int i;
1755
1756 if (!h->ioaccel2_cmd_sg_list)
1757 return;
1758 for (i = 0; i < h->nr_cmds; i++) {
1759 kfree(h->ioaccel2_cmd_sg_list[i]);
1760 h->ioaccel2_cmd_sg_list[i] = NULL;
1761 }
1762 kfree(h->ioaccel2_cmd_sg_list);
1763 h->ioaccel2_cmd_sg_list = NULL;
1764}
1765
1766static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1767{
1768 int i;
1769
1770 if (h->chainsize <= 0)
1771 return 0;
1772
1773 h->ioaccel2_cmd_sg_list =
1774 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
1775 GFP_KERNEL);
1776 if (!h->ioaccel2_cmd_sg_list)
1777 return -ENOMEM;
1778 for (i = 0; i < h->nr_cmds; i++) {
1779 h->ioaccel2_cmd_sg_list[i] =
1780 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
1781 h->maxsgentries, GFP_KERNEL);
1782 if (!h->ioaccel2_cmd_sg_list[i])
1783 goto clean;
1784 }
1785 return 0;
1786
1787clean:
1788 hpsa_free_ioaccel2_sg_chain_blocks(h);
1789 return -ENOMEM;
1790}
1791
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001792static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1793{
1794 int i;
1795
1796 if (!h->cmd_sg_list)
1797 return;
1798 for (i = 0; i < h->nr_cmds; i++) {
1799 kfree(h->cmd_sg_list[i]);
1800 h->cmd_sg_list[i] = NULL;
1801 }
1802 kfree(h->cmd_sg_list);
1803 h->cmd_sg_list = NULL;
1804}
1805
Robert Elliott105a3db2015-04-23 09:33:48 -05001806static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001807{
1808 int i;
1809
1810 if (h->chainsize <= 0)
1811 return 0;
1812
1813 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1814 GFP_KERNEL);
Robert Elliott3d4e6af2015-01-23 16:42:42 -06001815 if (!h->cmd_sg_list) {
1816 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001817 return -ENOMEM;
Robert Elliott3d4e6af2015-01-23 16:42:42 -06001818 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001819 for (i = 0; i < h->nr_cmds; i++) {
1820 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1821 h->chainsize, GFP_KERNEL);
Robert Elliott3d4e6af2015-01-23 16:42:42 -06001822 if (!h->cmd_sg_list[i]) {
1823 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001824 goto clean;
Robert Elliott3d4e6af2015-01-23 16:42:42 -06001825 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001826 }
1827 return 0;
1828
1829clean:
1830 hpsa_free_sg_chain_blocks(h);
1831 return -ENOMEM;
1832}
1833
Webb Scalesd9a729f2015-04-23 09:33:27 -05001834static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
1835 struct io_accel2_cmd *cp, struct CommandList *c)
1836{
1837 struct ioaccel2_sg_element *chain_block;
1838 u64 temp64;
1839 u32 chain_size;
1840
1841 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
1842 chain_size = le32_to_cpu(cp->data_len);
1843 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
1844 PCI_DMA_TODEVICE);
1845 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1846 /* prevent subsequent unmapping */
1847 cp->sg->address = 0;
1848 return -1;
1849 }
1850 cp->sg->address = cpu_to_le64(temp64);
1851 return 0;
1852}
1853
1854static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
1855 struct io_accel2_cmd *cp)
1856{
1857 struct ioaccel2_sg_element *chain_sg;
1858 u64 temp64;
1859 u32 chain_size;
1860
1861 chain_sg = cp->sg;
1862 temp64 = le64_to_cpu(chain_sg->address);
1863 chain_size = le32_to_cpu(cp->data_len);
1864 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
1865}
1866
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001867static int hpsa_map_sg_chain_block(struct ctlr_info *h,
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001868 struct CommandList *c)
1869{
1870 struct SGDescriptor *chain_sg, *chain_block;
1871 u64 temp64;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001872 u32 chain_len;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001873
1874 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1875 chain_block = h->cmd_sg_list[c->cmdindex];
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001876 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1877 chain_len = sizeof(*chain_sg) *
Don Brace2b08b3e2015-01-23 16:41:09 -06001878 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001879 chain_sg->Len = cpu_to_le32(chain_len);
1880 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001881 PCI_DMA_TODEVICE);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001882 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1883 /* prevent subsequent unmapping */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001884 chain_sg->Addr = cpu_to_le64(0);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001885 return -1;
1886 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001887 chain_sg->Addr = cpu_to_le64(temp64);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001888 return 0;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001889}
1890
1891static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1892 struct CommandList *c)
1893{
1894 struct SGDescriptor *chain_sg;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001895
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001896 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001897 return;
1898
1899 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001900 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1901 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001902}
1903
Scott Teela09c1442014-02-18 13:57:21 -06001904
1905/* Decode the various types of errors on ioaccel2 path.
1906 * Return 1 for any error that should generate a RAID path retry.
1907 * Return 0 for errors that don't require a RAID path retry.
1908 */
1909static int handle_ioaccel_mode2_error(struct ctlr_info *h,
Scott Teelc3497752014-02-18 13:56:34 -06001910 struct CommandList *c,
1911 struct scsi_cmnd *cmd,
1912 struct io_accel2_cmd *c2)
1913{
1914 int data_len;
Scott Teela09c1442014-02-18 13:57:21 -06001915 int retry = 0;
Joe Handzikc40820d2015-04-23 09:33:32 -05001916 u32 ioaccel2_resid = 0;
Scott Teelc3497752014-02-18 13:56:34 -06001917
1918 switch (c2->error_data.serv_response) {
1919 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1920 switch (c2->error_data.status) {
1921 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1922 break;
1923 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05001924 cmd->result |= SAM_STAT_CHECK_CONDITION;
Scott Teelc3497752014-02-18 13:56:34 -06001925 if (c2->error_data.data_present !=
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05001926 IOACCEL2_SENSE_DATA_PRESENT) {
1927 memset(cmd->sense_buffer, 0,
1928 SCSI_SENSE_BUFFERSIZE);
Scott Teelc3497752014-02-18 13:56:34 -06001929 break;
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05001930 }
Scott Teelc3497752014-02-18 13:56:34 -06001931 /* copy the sense data */
1932 data_len = c2->error_data.sense_data_len;
1933 if (data_len > SCSI_SENSE_BUFFERSIZE)
1934 data_len = SCSI_SENSE_BUFFERSIZE;
1935 if (data_len > sizeof(c2->error_data.sense_data_buff))
1936 data_len =
1937 sizeof(c2->error_data.sense_data_buff);
1938 memcpy(cmd->sense_buffer,
1939 c2->error_data.sense_data_buff, data_len);
Scott Teela09c1442014-02-18 13:57:21 -06001940 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001941 break;
1942 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
Scott Teela09c1442014-02-18 13:57:21 -06001943 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001944 break;
1945 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
Scott Teela09c1442014-02-18 13:57:21 -06001946 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001947 break;
1948 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
Stephen Cameron4a8da222015-04-23 09:32:43 -05001949 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001950 break;
1951 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
Scott Teela09c1442014-02-18 13:57:21 -06001952 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001953 break;
1954 default:
Scott Teela09c1442014-02-18 13:57:21 -06001955 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001956 break;
1957 }
1958 break;
1959 case IOACCEL2_SERV_RESPONSE_FAILURE:
Joe Handzikc40820d2015-04-23 09:33:32 -05001960 switch (c2->error_data.status) {
1961 case IOACCEL2_STATUS_SR_IO_ERROR:
1962 case IOACCEL2_STATUS_SR_IO_ABORTED:
1963 case IOACCEL2_STATUS_SR_OVERRUN:
1964 retry = 1;
1965 break;
1966 case IOACCEL2_STATUS_SR_UNDERRUN:
1967 cmd->result = (DID_OK << 16); /* host byte */
1968 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1969 ioaccel2_resid = get_unaligned_le32(
1970 &c2->error_data.resid_cnt[0]);
1971 scsi_set_resid(cmd, ioaccel2_resid);
1972 break;
1973 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
1974 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
1975 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
1976 /* We will get an event from ctlr to trigger rescan */
1977 retry = 1;
1978 break;
1979 default:
1980 retry = 1;
Joe Handzikc40820d2015-04-23 09:33:32 -05001981 }
Scott Teelc3497752014-02-18 13:56:34 -06001982 break;
1983 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1984 break;
1985 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1986 break;
1987 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
Scott Teela09c1442014-02-18 13:57:21 -06001988 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001989 break;
1990 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
Scott Teelc3497752014-02-18 13:56:34 -06001991 break;
1992 default:
Scott Teela09c1442014-02-18 13:57:21 -06001993 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001994 break;
1995 }
Scott Teela09c1442014-02-18 13:57:21 -06001996
1997 return retry; /* retry on raid path? */
Scott Teelc3497752014-02-18 13:56:34 -06001998}
1999
Webb Scalesa58e7e52015-04-23 09:34:16 -05002000static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2001 struct CommandList *c)
2002{
Webb Scalesd604f532015-04-23 09:35:22 -05002003 bool do_wake = false;
2004
Webb Scalesa58e7e52015-04-23 09:34:16 -05002005 /*
2006 * Prevent the following race in the abort handler:
2007 *
2008 * 1. LLD is requested to abort a SCSI command
2009 * 2. The SCSI command completes
2010 * 3. The struct CommandList associated with step 2 is made available
2011 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2012 * 5. Abort handler follows scsi_cmnd->host_scribble and
2013 * finds struct CommandList and tries to aborts it
2014 * Now we have aborted the wrong command.
2015 *
Webb Scalesd604f532015-04-23 09:35:22 -05002016 * Reset c->scsi_cmd here so that the abort or reset handler will know
2017 * this command has completed. Then, check to see if the handler is
Webb Scalesa58e7e52015-04-23 09:34:16 -05002018 * waiting for this command, and, if so, wake it.
2019 */
2020 c->scsi_cmd = SCSI_CMD_IDLE;
Webb Scalesd604f532015-04-23 09:35:22 -05002021 mb(); /* Declare command idle before checking for pending events. */
Webb Scalesa58e7e52015-04-23 09:34:16 -05002022 if (c->abort_pending) {
Webb Scalesd604f532015-04-23 09:35:22 -05002023 do_wake = true;
Webb Scalesa58e7e52015-04-23 09:34:16 -05002024 c->abort_pending = false;
Webb Scalesa58e7e52015-04-23 09:34:16 -05002025 }
Webb Scalesd604f532015-04-23 09:35:22 -05002026 if (c->reset_pending) {
2027 unsigned long flags;
2028 struct hpsa_scsi_dev_t *dev;
2029
2030 /*
2031 * There appears to be a reset pending; lock the lock and
2032 * reconfirm. If so, then decrement the count of outstanding
2033 * commands and wake the reset command if this is the last one.
2034 */
2035 spin_lock_irqsave(&h->lock, flags);
2036 dev = c->reset_pending; /* Re-fetch under the lock. */
2037 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2038 do_wake = true;
2039 c->reset_pending = NULL;
2040 spin_unlock_irqrestore(&h->lock, flags);
2041 }
2042
2043 if (do_wake)
2044 wake_up_all(&h->event_sync_wait_queue);
Webb Scalesa58e7e52015-04-23 09:34:16 -05002045}
2046
Webb Scales73153fe2015-04-23 09:35:04 -05002047static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2048 struct CommandList *c)
2049{
2050 hpsa_cmd_resolve_events(h, c);
2051 cmd_tagged_free(h, c);
2052}
2053
Webb Scales8a0ff922015-04-23 09:34:11 -05002054static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2055 struct CommandList *c, struct scsi_cmnd *cmd)
2056{
Webb Scales73153fe2015-04-23 09:35:04 -05002057 hpsa_cmd_resolve_and_free(h, c);
Webb Scales8a0ff922015-04-23 09:34:11 -05002058 cmd->scsi_done(cmd);
2059}
2060
2061static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2062{
2063 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2064 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2065}
2066
Webb Scalesa58e7e52015-04-23 09:34:16 -05002067static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2068{
2069 cmd->result = DID_ABORT << 16;
2070}
2071
2072static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2073 struct scsi_cmnd *cmd)
2074{
2075 hpsa_set_scsi_cmd_aborted(cmd);
2076 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2077 c->Request.CDB, c->err_info->ScsiStatus);
Webb Scales73153fe2015-04-23 09:35:04 -05002078 hpsa_cmd_resolve_and_free(h, c);
Webb Scalesa58e7e52015-04-23 09:34:16 -05002079}
2080
Scott Teelc3497752014-02-18 13:56:34 -06002081static void process_ioaccel2_completion(struct ctlr_info *h,
2082 struct CommandList *c, struct scsi_cmnd *cmd,
2083 struct hpsa_scsi_dev_t *dev)
2084{
2085 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2086
2087 /* check for good status */
2088 if (likely(c2->error_data.serv_response == 0 &&
Webb Scales8a0ff922015-04-23 09:34:11 -05002089 c2->error_data.status == 0))
2090 return hpsa_cmd_free_and_done(h, c, cmd);
Scott Teelc3497752014-02-18 13:56:34 -06002091
Webb Scales8a0ff922015-04-23 09:34:11 -05002092 /*
2093 * Any RAID offload error results in retry which will use
Scott Teelc3497752014-02-18 13:56:34 -06002094 * the normal I/O path so the controller can handle whatever's
2095 * wrong.
2096 */
2097 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
2098 c2->error_data.serv_response ==
2099 IOACCEL2_SERV_RESPONSE_FAILURE) {
Don Brace080ef1c2015-01-23 16:43:25 -06002100 if (c2->error_data.status ==
2101 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
2102 dev->offload_enabled = 0;
Webb Scales8a0ff922015-04-23 09:34:11 -05002103
2104 return hpsa_retry_cmd(h, c);
Scott Teelc3497752014-02-18 13:56:34 -06002105 }
Don Brace080ef1c2015-01-23 16:43:25 -06002106
2107 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
Webb Scales8a0ff922015-04-23 09:34:11 -05002108 return hpsa_retry_cmd(h, c);
Don Brace080ef1c2015-01-23 16:43:25 -06002109
Webb Scales8a0ff922015-04-23 09:34:11 -05002110 return hpsa_cmd_free_and_done(h, c, cmd);
Scott Teelc3497752014-02-18 13:56:34 -06002111}
2112
Stephen Cameron9437ac42015-04-23 09:32:16 -05002113/* Returns 0 on success, < 0 otherwise. */
2114static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2115 struct CommandList *cp)
2116{
2117 u8 tmf_status = cp->err_info->ScsiStatus;
2118
2119 switch (tmf_status) {
2120 case CISS_TMF_COMPLETE:
2121 /*
2122 * CISS_TMF_COMPLETE never happens, instead,
2123 * ei->CommandStatus == 0 for this case.
2124 */
2125 case CISS_TMF_SUCCESS:
2126 return 0;
2127 case CISS_TMF_INVALID_FRAME:
2128 case CISS_TMF_NOT_SUPPORTED:
2129 case CISS_TMF_FAILED:
2130 case CISS_TMF_WRONG_LUN:
2131 case CISS_TMF_OVERLAPPED_TAG:
2132 break;
2133 default:
2134 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2135 tmf_status);
2136 break;
2137 }
2138 return -tmf_status;
2139}
2140
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05002141static void complete_scsi_command(struct CommandList *cp)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002142{
2143 struct scsi_cmnd *cmd;
2144 struct ctlr_info *h;
2145 struct ErrorInfo *ei;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002146 struct hpsa_scsi_dev_t *dev;
Webb Scalesd9a729f2015-04-23 09:33:27 -05002147 struct io_accel2_cmd *c2;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002148
Stephen Cameron9437ac42015-04-23 09:32:16 -05002149 u8 sense_key;
2150 u8 asc; /* additional sense code */
2151 u8 ascq; /* additional sense code qualifier */
Stephen M. Camerondb111e12011-06-03 09:57:34 -05002152 unsigned long sense_data_size;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002153
2154 ei = cp->err_info;
Stephen Cameron7fa30302015-01-23 16:44:30 -06002155 cmd = cp->scsi_cmd;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002156 h = cp->h;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002157 dev = cmd->device->hostdata;
Webb Scalesd9a729f2015-04-23 09:33:27 -05002158 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002159
2160 scsi_dma_unmap(cmd); /* undo the DMA mappings */
Matt Gatese1f7de02014-02-18 13:55:17 -06002161 if ((cp->cmd_type == CMD_SCSI) &&
Don Brace2b08b3e2015-01-23 16:41:09 -06002162 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002163 hpsa_unmap_sg_chain_block(h, cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002164
Webb Scalesd9a729f2015-04-23 09:33:27 -05002165 if ((cp->cmd_type == CMD_IOACCEL2) &&
2166 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2167 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2168
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002169 cmd->result = (DID_OK << 16); /* host byte */
2170 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
Scott Teelc3497752014-02-18 13:56:34 -06002171
Don Brace03383732015-01-23 16:43:30 -06002172 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2173 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2174
Webb Scales25163bd2015-04-23 09:32:00 -05002175 /*
2176 * We check for lockup status here as it may be set for
2177 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2178 * fail_all_oustanding_cmds()
2179 */
2180 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2181 /* DID_NO_CONNECT will prevent a retry */
2182 cmd->result = DID_NO_CONNECT << 16;
Webb Scales8a0ff922015-04-23 09:34:11 -05002183 return hpsa_cmd_free_and_done(h, cp, cmd);
Webb Scales25163bd2015-04-23 09:32:00 -05002184 }
2185
Webb Scalesd604f532015-04-23 09:35:22 -05002186 if ((unlikely(hpsa_is_pending_event(cp)))) {
2187 if (cp->reset_pending)
2188 return hpsa_cmd_resolve_and_free(h, cp);
2189 if (cp->abort_pending)
2190 return hpsa_cmd_abort_and_free(h, cp, cmd);
2191 }
2192
Scott Teelc3497752014-02-18 13:56:34 -06002193 if (cp->cmd_type == CMD_IOACCEL2)
2194 return process_ioaccel2_completion(h, cp, cmd, dev);
2195
Robert Elliott6aa4c362014-07-03 10:18:19 -05002196 scsi_set_resid(cmd, ei->ResidualCnt);
Webb Scales8a0ff922015-04-23 09:34:11 -05002197 if (ei->CommandStatus == 0)
2198 return hpsa_cmd_free_and_done(h, cp, cmd);
Robert Elliott6aa4c362014-07-03 10:18:19 -05002199
Matt Gatese1f7de02014-02-18 13:55:17 -06002200 /* For I/O accelerator commands, copy over some fields to the normal
2201 * CISS header used below for error handling.
2202 */
2203 if (cp->cmd_type == CMD_IOACCEL1) {
2204 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
Don Brace2b08b3e2015-01-23 16:41:09 -06002205 cp->Header.SGList = scsi_sg_count(cmd);
2206 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2207 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2208 IOACCEL1_IOFLAGS_CDBLEN_MASK;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002209 cp->Header.tag = c->tag;
Matt Gatese1f7de02014-02-18 13:55:17 -06002210 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2211 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002212
2213 /* Any RAID offload error results in retry which will use
2214 * the normal I/O path so the controller can handle whatever's
2215 * wrong.
2216 */
2217 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
2218 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2219 dev->offload_enabled = 0;
Webb Scalesd604f532015-04-23 09:35:22 -05002220 return hpsa_retry_cmd(h, cp);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002221 }
Matt Gatese1f7de02014-02-18 13:55:17 -06002222 }
2223
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002224 /* an error has occurred */
2225 switch (ei->CommandStatus) {
2226
2227 case CMD_TARGET_STATUS:
Stephen Cameron9437ac42015-04-23 09:32:16 -05002228 cmd->result |= ei->ScsiStatus;
2229 /* copy the sense data */
2230 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2231 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2232 else
2233 sense_data_size = sizeof(ei->SenseInfo);
2234 if (ei->SenseLen < sense_data_size)
2235 sense_data_size = ei->SenseLen;
2236 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2237 if (ei->ScsiStatus)
2238 decode_sense_data(ei->SenseInfo, sense_data_size,
2239 &sense_key, &asc, &ascq);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002240 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
Matt Gates1d3b3602010-02-04 08:43:00 -06002241 if (sense_key == ABORTED_COMMAND) {
Stephen M. Cameron2e311fb2013-09-23 13:33:41 -05002242 cmd->result |= DID_SOFT_ERROR << 16;
Matt Gates1d3b3602010-02-04 08:43:00 -06002243 break;
2244 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002245 break;
2246 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002247 /* Problem was not a check condition
2248 * Pass it up to the upper layers...
2249 */
2250 if (ei->ScsiStatus) {
2251 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2252 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2253 "Returning result: 0x%x\n",
2254 cp, ei->ScsiStatus,
2255 sense_key, asc, ascq,
2256 cmd->result);
2257 } else { /* scsi status is zero??? How??? */
2258 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2259 "Returning no connection.\n", cp),
2260
2261 /* Ordinarily, this case should never happen,
2262 * but there is a bug in some released firmware
2263 * revisions that allows it to happen if, for
2264 * example, a 4100 backplane loses power and
2265 * the tape drive is in it. We assume that
2266 * it's a fatal error of some kind because we
2267 * can't show that it wasn't. We will make it
2268 * look like selection timeout since that is
2269 * the most common reason for this to occur,
2270 * and it's severe enough.
2271 */
2272
2273 cmd->result = DID_NO_CONNECT << 16;
2274 }
2275 break;
2276
2277 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2278 break;
2279 case CMD_DATA_OVERRUN:
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002280 dev_warn(&h->pdev->dev,
2281 "CDB %16phN data overrun\n", cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002282 break;
2283 case CMD_INVALID: {
2284 /* print_bytes(cp, sizeof(*cp), 1, 0);
2285 print_cmd(cp); */
2286 /* We get CMD_INVALID if you address a non-existent device
2287 * instead of a selection timeout (no response). You will
2288 * see this if you yank out a drive, then try to access it.
2289 * This is kind of a shame because it means that any other
2290 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2291 * missing target. */
2292 cmd->result = DID_NO_CONNECT << 16;
2293 }
2294 break;
2295 case CMD_PROTOCOL_ERR:
Stephen M. Cameron256d0ea2012-09-14 16:34:25 -05002296 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002297 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2298 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002299 break;
2300 case CMD_HARDWARE_ERR:
2301 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002302 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2303 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002304 break;
2305 case CMD_CONNECTION_LOST:
2306 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002307 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2308 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002309 break;
2310 case CMD_ABORTED:
Webb Scalesa58e7e52015-04-23 09:34:16 -05002311 /* Return now to avoid calling scsi_done(). */
2312 return hpsa_cmd_abort_and_free(h, cp, cmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002313 case CMD_ABORT_FAILED:
2314 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002315 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2316 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002317 break;
2318 case CMD_UNSOLICITED_ABORT:
Stephen M. Cameronf6e76052011-07-26 11:08:52 -05002319 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002320 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2321 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002322 break;
2323 case CMD_TIMEOUT:
2324 cmd->result = DID_TIME_OUT << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002325 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2326 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002327 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002328 case CMD_UNABORTABLE:
2329 cmd->result = DID_ERROR << 16;
2330 dev_warn(&h->pdev->dev, "Command unabortable\n");
2331 break;
Stephen Cameron9437ac42015-04-23 09:32:16 -05002332 case CMD_TMF_STATUS:
2333 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2334 cmd->result = DID_ERROR << 16;
2335 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002336 case CMD_IOACCEL_DISABLED:
2337 /* This only handles the direct pass-through case since RAID
2338 * offload is handled above. Just attempt a retry.
2339 */
2340 cmd->result = DID_SOFT_ERROR << 16;
2341 dev_warn(&h->pdev->dev,
2342 "cp %p had HP SSD Smart Path error\n", cp);
2343 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002344 default:
2345 cmd->result = DID_ERROR << 16;
2346 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2347 cp, ei->CommandStatus);
2348 }
Webb Scales8a0ff922015-04-23 09:34:11 -05002349
2350 return hpsa_cmd_free_and_done(h, cp, cmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002351}
2352
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002353static void hpsa_pci_unmap(struct pci_dev *pdev,
2354 struct CommandList *c, int sg_used, int data_direction)
2355{
2356 int i;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002357
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002358 for (i = 0; i < sg_used; i++)
2359 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2360 le32_to_cpu(c->SG[i].Len),
2361 data_direction);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002362}
2363
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002364static int hpsa_map_one(struct pci_dev *pdev,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002365 struct CommandList *cp,
2366 unsigned char *buf,
2367 size_t buflen,
2368 int data_direction)
2369{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002370 u64 addr64;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002371
2372 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2373 cp->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002374 cp->Header.SGTotal = cpu_to_le16(0);
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002375 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002376 }
2377
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002378 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
Shuah Khaneceaae12013-02-20 11:24:34 -06002379 if (dma_mapping_error(&pdev->dev, addr64)) {
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002380 /* Prevent subsequent unmap of something never mapped */
Shuah Khaneceaae12013-02-20 11:24:34 -06002381 cp->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002382 cp->Header.SGTotal = cpu_to_le16(0);
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002383 return -1;
Shuah Khaneceaae12013-02-20 11:24:34 -06002384 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002385 cp->SG[0].Addr = cpu_to_le64(addr64);
2386 cp->SG[0].Len = cpu_to_le32(buflen);
2387 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2388 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2389 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002390 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002391}
2392
Webb Scales25163bd2015-04-23 09:32:00 -05002393#define NO_TIMEOUT ((unsigned long) -1)
2394#define DEFAULT_TIMEOUT 30000 /* milliseconds */
2395static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2396 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002397{
2398 DECLARE_COMPLETION_ONSTACK(wait);
2399
2400 c->waiting = &wait;
Webb Scales25163bd2015-04-23 09:32:00 -05002401 __enqueue_cmd_and_start_io(h, c, reply_queue);
2402 if (timeout_msecs == NO_TIMEOUT) {
2403 /* TODO: get rid of this no-timeout thing */
2404 wait_for_completion_io(&wait);
2405 return IO_OK;
2406 }
2407 if (!wait_for_completion_io_timeout(&wait,
2408 msecs_to_jiffies(timeout_msecs))) {
2409 dev_warn(&h->pdev->dev, "Command timed out.\n");
2410 return -ETIMEDOUT;
2411 }
2412 return IO_OK;
2413}
2414
2415static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2416 int reply_queue, unsigned long timeout_msecs)
2417{
2418 if (unlikely(lockup_detected(h))) {
2419 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2420 return IO_OK;
2421 }
2422 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002423}
2424
Stephen M. Cameron094963d2014-05-29 10:53:18 -05002425static u32 lockup_detected(struct ctlr_info *h)
2426{
2427 int cpu;
2428 u32 rc, *lockup_detected;
2429
2430 cpu = get_cpu();
2431 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2432 rc = *lockup_detected;
2433 put_cpu();
2434 return rc;
2435}
2436
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002437#define MAX_DRIVER_CMD_RETRIES 25
Webb Scales25163bd2015-04-23 09:32:00 -05002438static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2439 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002440{
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002441 int backoff_time = 10, retry_count = 0;
Webb Scales25163bd2015-04-23 09:32:00 -05002442 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002443
2444 do {
Joe Perches7630abd2011-05-08 23:32:40 -07002445 memset(c->err_info, 0, sizeof(*c->err_info));
Webb Scales25163bd2015-04-23 09:32:00 -05002446 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2447 timeout_msecs);
2448 if (rc)
2449 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002450 retry_count++;
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002451 if (retry_count > 3) {
2452 msleep(backoff_time);
2453 if (backoff_time < 1000)
2454 backoff_time *= 2;
2455 }
Matt Bondurant852af202012-05-01 11:42:35 -05002456 } while ((check_for_unit_attention(h, c) ||
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002457 check_for_busy(h, c)) &&
2458 retry_count <= MAX_DRIVER_CMD_RETRIES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002459 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
Webb Scales25163bd2015-04-23 09:32:00 -05002460 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2461 rc = -EIO;
2462 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002463}
2464
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002465static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2466 struct CommandList *c)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002467{
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002468 const u8 *cdb = c->Request.CDB;
2469 const u8 *lun = c->Header.LUN.LunAddrBytes;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002470
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002471 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2472 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2473 txt, lun[0], lun[1], lun[2], lun[3],
2474 lun[4], lun[5], lun[6], lun[7],
2475 cdb[0], cdb[1], cdb[2], cdb[3],
2476 cdb[4], cdb[5], cdb[6], cdb[7],
2477 cdb[8], cdb[9], cdb[10], cdb[11],
2478 cdb[12], cdb[13], cdb[14], cdb[15]);
2479}
2480
2481static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2482 struct CommandList *cp)
2483{
2484 const struct ErrorInfo *ei = cp->err_info;
2485 struct device *d = &cp->h->pdev->dev;
Stephen Cameron9437ac42015-04-23 09:32:16 -05002486 u8 sense_key, asc, ascq;
2487 int sense_len;
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002488
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002489 switch (ei->CommandStatus) {
2490 case CMD_TARGET_STATUS:
Stephen Cameron9437ac42015-04-23 09:32:16 -05002491 if (ei->SenseLen > sizeof(ei->SenseInfo))
2492 sense_len = sizeof(ei->SenseInfo);
2493 else
2494 sense_len = ei->SenseLen;
2495 decode_sense_data(ei->SenseInfo, sense_len,
2496 &sense_key, &asc, &ascq);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002497 hpsa_print_cmd(h, "SCSI status", cp);
2498 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
Stephen Cameron9437ac42015-04-23 09:32:16 -05002499 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2500 sense_key, asc, ascq);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002501 else
Stephen Cameron9437ac42015-04-23 09:32:16 -05002502 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002503 if (ei->ScsiStatus == 0)
2504 dev_warn(d, "SCSI status is abnormally zero. "
2505 "(probably indicates selection timeout "
2506 "reported incorrectly due to a known "
2507 "firmware bug, circa July, 2001.)\n");
2508 break;
2509 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002510 break;
2511 case CMD_DATA_OVERRUN:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002512 hpsa_print_cmd(h, "overrun condition", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002513 break;
2514 case CMD_INVALID: {
2515 /* controller unfortunately reports SCSI passthru's
2516 * to non-existent targets as invalid commands.
2517 */
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002518 hpsa_print_cmd(h, "invalid command", cp);
2519 dev_warn(d, "probably means device no longer present\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002520 }
2521 break;
2522 case CMD_PROTOCOL_ERR:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002523 hpsa_print_cmd(h, "protocol error", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002524 break;
2525 case CMD_HARDWARE_ERR:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002526 hpsa_print_cmd(h, "hardware error", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002527 break;
2528 case CMD_CONNECTION_LOST:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002529 hpsa_print_cmd(h, "connection lost", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002530 break;
2531 case CMD_ABORTED:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002532 hpsa_print_cmd(h, "aborted", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002533 break;
2534 case CMD_ABORT_FAILED:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002535 hpsa_print_cmd(h, "abort failed", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002536 break;
2537 case CMD_UNSOLICITED_ABORT:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002538 hpsa_print_cmd(h, "unsolicited abort", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002539 break;
2540 case CMD_TIMEOUT:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002541 hpsa_print_cmd(h, "timed out", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002542 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002543 case CMD_UNABORTABLE:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002544 hpsa_print_cmd(h, "unabortable", cp);
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002545 break;
Webb Scales25163bd2015-04-23 09:32:00 -05002546 case CMD_CTLR_LOCKUP:
2547 hpsa_print_cmd(h, "controller lockup detected", cp);
2548 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002549 default:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002550 hpsa_print_cmd(h, "unknown status", cp);
2551 dev_warn(d, "Unknown command status %x\n",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002552 ei->CommandStatus);
2553 }
2554}
2555
2556static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002557 u16 page, unsigned char *buf,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002558 unsigned char bufsize)
2559{
2560 int rc = IO_OK;
2561 struct CommandList *c;
2562 struct ErrorInfo *ei;
2563
Stephen Cameron45fcb862015-01-23 16:43:04 -06002564 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002565
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002566 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2567 page, scsi3addr, TYPE_CMD)) {
2568 rc = -1;
2569 goto out;
2570 }
Webb Scales25163bd2015-04-23 09:32:00 -05002571 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2572 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2573 if (rc)
2574 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002575 ei = c->err_info;
2576 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002577 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002578 rc = -1;
2579 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002580out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06002581 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002582 return rc;
2583}
2584
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002585static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2586 unsigned char *scsi3addr, unsigned char page,
2587 struct bmic_controller_parameters *buf, size_t bufsize)
2588{
2589 int rc = IO_OK;
2590 struct CommandList *c;
2591 struct ErrorInfo *ei;
2592
Stephen Cameron45fcb862015-01-23 16:43:04 -06002593 c = cmd_alloc(h);
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002594 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2595 page, scsi3addr, TYPE_CMD)) {
2596 rc = -1;
2597 goto out;
2598 }
Webb Scales25163bd2015-04-23 09:32:00 -05002599 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2600 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2601 if (rc)
2602 goto out;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002603 ei = c->err_info;
2604 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2605 hpsa_scsi_interpret_error(h, c);
2606 rc = -1;
2607 }
2608out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06002609 cmd_free(h, c);
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002610 return rc;
Robert Elliottbf43caf2015-04-23 09:33:38 -05002611}
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002612
Scott Teelbf711ac2014-02-18 13:56:39 -06002613static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
Webb Scales25163bd2015-04-23 09:32:00 -05002614 u8 reset_type, int reply_queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002615{
2616 int rc = IO_OK;
2617 struct CommandList *c;
2618 struct ErrorInfo *ei;
2619
Stephen Cameron45fcb862015-01-23 16:43:04 -06002620 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002621
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002622
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002623 /* fill_cmd can't fail here, no data buffer to map. */
Scott Teelbf711ac2014-02-18 13:56:39 -06002624 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2625 scsi3addr, TYPE_MSG);
2626 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
Webb Scales25163bd2015-04-23 09:32:00 -05002627 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2628 if (rc) {
2629 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2630 goto out;
2631 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002632 /* no unmap needed here because no data xfer. */
2633
2634 ei = c->err_info;
2635 if (ei->CommandStatus != 0) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002636 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002637 rc = -1;
2638 }
Webb Scales25163bd2015-04-23 09:32:00 -05002639out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06002640 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002641 return rc;
2642}
2643
Webb Scalesd604f532015-04-23 09:35:22 -05002644static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2645 struct hpsa_scsi_dev_t *dev,
2646 unsigned char *scsi3addr)
2647{
2648 int i;
2649 bool match = false;
2650 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2651 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2652
2653 if (hpsa_is_cmd_idle(c))
2654 return false;
2655
2656 switch (c->cmd_type) {
2657 case CMD_SCSI:
2658 case CMD_IOCTL_PEND:
2659 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2660 sizeof(c->Header.LUN.LunAddrBytes));
2661 break;
2662
2663 case CMD_IOACCEL1:
2664 case CMD_IOACCEL2:
2665 if (c->phys_disk == dev) {
2666 /* HBA mode match */
2667 match = true;
2668 } else {
2669 /* Possible RAID mode -- check each phys dev. */
2670 /* FIXME: Do we need to take out a lock here? If
2671 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
2672 * instead. */
2673 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2674 /* FIXME: an alternate test might be
2675 *
2676 * match = dev->phys_disk[i]->ioaccel_handle
2677 * == c2->scsi_nexus; */
2678 match = dev->phys_disk[i] == c->phys_disk;
2679 }
2680 }
2681 break;
2682
2683 case IOACCEL2_TMF:
2684 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2685 match = dev->phys_disk[i]->ioaccel_handle ==
2686 le32_to_cpu(ac->it_nexus);
2687 }
2688 break;
2689
2690 case 0: /* The command is in the middle of being initialized. */
2691 match = false;
2692 break;
2693
2694 default:
2695 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
2696 c->cmd_type);
2697 BUG();
2698 }
2699
2700 return match;
2701}
2702
2703static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
2704 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
2705{
2706 int i;
2707 int rc = 0;
2708
2709 /* We can really only handle one reset at a time */
2710 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
2711 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
2712 return -EINTR;
2713 }
2714
2715 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
2716
2717 for (i = 0; i < h->nr_cmds; i++) {
2718 struct CommandList *c = h->cmd_pool + i;
2719 int refcount = atomic_inc_return(&c->refcount);
2720
2721 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
2722 unsigned long flags;
2723
2724 /*
2725 * Mark the target command as having a reset pending,
2726 * then lock a lock so that the command cannot complete
2727 * while we're considering it. If the command is not
2728 * idle then count it; otherwise revoke the event.
2729 */
2730 c->reset_pending = dev;
2731 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
2732 if (!hpsa_is_cmd_idle(c))
2733 atomic_inc(&dev->reset_cmds_out);
2734 else
2735 c->reset_pending = NULL;
2736 spin_unlock_irqrestore(&h->lock, flags);
2737 }
2738
2739 cmd_free(h, c);
2740 }
2741
2742 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
2743 if (!rc)
2744 wait_event(h->event_sync_wait_queue,
2745 atomic_read(&dev->reset_cmds_out) == 0 ||
2746 lockup_detected(h));
2747
2748 if (unlikely(lockup_detected(h))) {
Don Brace77678d32015-07-18 11:12:22 -05002749 dev_warn(&h->pdev->dev,
2750 "Controller lockup detected during reset wait\n");
2751 rc = -ENODEV;
2752 }
Webb Scalesd604f532015-04-23 09:35:22 -05002753
2754 if (unlikely(rc))
2755 atomic_set(&dev->reset_cmds_out, 0);
2756
2757 mutex_unlock(&h->reset_mutex);
2758 return rc;
2759}
2760
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002761static void hpsa_get_raid_level(struct ctlr_info *h,
2762 unsigned char *scsi3addr, unsigned char *raid_level)
2763{
2764 int rc;
2765 unsigned char *buf;
2766
2767 *raid_level = RAID_UNKNOWN;
2768 buf = kzalloc(64, GFP_KERNEL);
2769 if (!buf)
2770 return;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002771 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002772 if (rc == 0)
2773 *raid_level = buf[8];
2774 if (*raid_level > RAID_UNKNOWN)
2775 *raid_level = RAID_UNKNOWN;
2776 kfree(buf);
2777 return;
2778}
2779
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002780#define HPSA_MAP_DEBUG
2781#ifdef HPSA_MAP_DEBUG
2782static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2783 struct raid_map_data *map_buff)
2784{
2785 struct raid_map_disk_data *dd = &map_buff->data[0];
2786 int map, row, col;
2787 u16 map_cnt, row_cnt, disks_per_row;
2788
2789 if (rc != 0)
2790 return;
2791
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06002792 /* Show details only if debugging has been activated. */
2793 if (h->raid_offload_debug < 2)
2794 return;
2795
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002796 dev_info(&h->pdev->dev, "structure_size = %u\n",
2797 le32_to_cpu(map_buff->structure_size));
2798 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2799 le32_to_cpu(map_buff->volume_blk_size));
2800 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2801 le64_to_cpu(map_buff->volume_blk_cnt));
2802 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2803 map_buff->phys_blk_shift);
2804 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2805 map_buff->parity_rotation_shift);
2806 dev_info(&h->pdev->dev, "strip_size = %u\n",
2807 le16_to_cpu(map_buff->strip_size));
2808 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2809 le64_to_cpu(map_buff->disk_starting_blk));
2810 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2811 le64_to_cpu(map_buff->disk_blk_cnt));
2812 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2813 le16_to_cpu(map_buff->data_disks_per_row));
2814 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2815 le16_to_cpu(map_buff->metadata_disks_per_row));
2816 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2817 le16_to_cpu(map_buff->row_cnt));
2818 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2819 le16_to_cpu(map_buff->layout_map_count));
Don Brace2b08b3e2015-01-23 16:41:09 -06002820 dev_info(&h->pdev->dev, "flags = 0x%x\n",
Scott Teeldd0e19f2014-02-18 13:57:31 -06002821 le16_to_cpu(map_buff->flags));
Don Brace2b08b3e2015-01-23 16:41:09 -06002822 dev_info(&h->pdev->dev, "encrypytion = %s\n",
2823 le16_to_cpu(map_buff->flags) &
2824 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
Scott Teeldd0e19f2014-02-18 13:57:31 -06002825 dev_info(&h->pdev->dev, "dekindex = %u\n",
2826 le16_to_cpu(map_buff->dekindex));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002827 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2828 for (map = 0; map < map_cnt; map++) {
2829 dev_info(&h->pdev->dev, "Map%u:\n", map);
2830 row_cnt = le16_to_cpu(map_buff->row_cnt);
2831 for (row = 0; row < row_cnt; row++) {
2832 dev_info(&h->pdev->dev, " Row%u:\n", row);
2833 disks_per_row =
2834 le16_to_cpu(map_buff->data_disks_per_row);
2835 for (col = 0; col < disks_per_row; col++, dd++)
2836 dev_info(&h->pdev->dev,
2837 " D%02u: h=0x%04x xor=%u,%u\n",
2838 col, dd->ioaccel_handle,
2839 dd->xor_mult[0], dd->xor_mult[1]);
2840 disks_per_row =
2841 le16_to_cpu(map_buff->metadata_disks_per_row);
2842 for (col = 0; col < disks_per_row; col++, dd++)
2843 dev_info(&h->pdev->dev,
2844 " M%02u: h=0x%04x xor=%u,%u\n",
2845 col, dd->ioaccel_handle,
2846 dd->xor_mult[0], dd->xor_mult[1]);
2847 }
2848 }
2849}
2850#else
2851static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2852 __attribute__((unused)) int rc,
2853 __attribute__((unused)) struct raid_map_data *map_buff)
2854{
2855}
2856#endif
2857
2858static int hpsa_get_raid_map(struct ctlr_info *h,
2859 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2860{
2861 int rc = 0;
2862 struct CommandList *c;
2863 struct ErrorInfo *ei;
2864
Stephen Cameron45fcb862015-01-23 16:43:04 -06002865 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05002866
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002867 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2868 sizeof(this_device->raid_map), 0,
2869 scsi3addr, TYPE_CMD)) {
Robert Elliott2dd02d72015-04-23 09:33:43 -05002870 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
2871 cmd_free(h, c);
2872 return -1;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002873 }
Webb Scales25163bd2015-04-23 09:32:00 -05002874 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2875 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2876 if (rc)
2877 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002878 ei = c->err_info;
2879 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002880 hpsa_scsi_interpret_error(h, c);
Webb Scales25163bd2015-04-23 09:32:00 -05002881 rc = -1;
2882 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002883 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06002884 cmd_free(h, c);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002885
2886 /* @todo in the future, dynamically allocate RAID map memory */
2887 if (le32_to_cpu(this_device->raid_map.structure_size) >
2888 sizeof(this_device->raid_map)) {
2889 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2890 rc = -1;
2891 }
2892 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2893 return rc;
Webb Scales25163bd2015-04-23 09:32:00 -05002894out:
2895 cmd_free(h, c);
2896 return rc;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002897}
2898
Don Brace03383732015-01-23 16:43:30 -06002899static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2900 unsigned char scsi3addr[], u16 bmic_device_index,
2901 struct bmic_identify_physical_device *buf, size_t bufsize)
2902{
2903 int rc = IO_OK;
2904 struct CommandList *c;
2905 struct ErrorInfo *ei;
2906
2907 c = cmd_alloc(h);
2908 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2909 0, RAID_CTLR_LUNID, TYPE_CMD);
2910 if (rc)
2911 goto out;
2912
2913 c->Request.CDB[2] = bmic_device_index & 0xff;
2914 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2915
Webb Scales25163bd2015-04-23 09:32:00 -05002916 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
2917 NO_TIMEOUT);
Don Brace03383732015-01-23 16:43:30 -06002918 ei = c->err_info;
2919 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2920 hpsa_scsi_interpret_error(h, c);
2921 rc = -1;
2922 }
2923out:
2924 cmd_free(h, c);
2925 return rc;
2926}
2927
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06002928static int hpsa_vpd_page_supported(struct ctlr_info *h,
2929 unsigned char scsi3addr[], u8 page)
2930{
2931 int rc;
2932 int i;
2933 int pages;
2934 unsigned char *buf, bufsize;
2935
2936 buf = kzalloc(256, GFP_KERNEL);
2937 if (!buf)
2938 return 0;
2939
2940 /* Get the size of the page list first */
2941 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2942 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2943 buf, HPSA_VPD_HEADER_SZ);
2944 if (rc != 0)
2945 goto exit_unsupported;
2946 pages = buf[3];
2947 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2948 bufsize = pages + HPSA_VPD_HEADER_SZ;
2949 else
2950 bufsize = 255;
2951
2952 /* Get the whole VPD page list */
2953 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2954 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2955 buf, bufsize);
2956 if (rc != 0)
2957 goto exit_unsupported;
2958
2959 pages = buf[3];
2960 for (i = 1; i <= pages; i++)
2961 if (buf[3 + i] == page)
2962 goto exit_supported;
2963exit_unsupported:
2964 kfree(buf);
2965 return 0;
2966exit_supported:
2967 kfree(buf);
2968 return 1;
2969}
2970
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002971static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2972 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2973{
2974 int rc;
2975 unsigned char *buf;
2976 u8 ioaccel_status;
2977
2978 this_device->offload_config = 0;
2979 this_device->offload_enabled = 0;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05002980 this_device->offload_to_be_enabled = 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002981
2982 buf = kzalloc(64, GFP_KERNEL);
2983 if (!buf)
2984 return;
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06002985 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2986 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002987 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002988 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002989 if (rc != 0)
2990 goto out;
2991
2992#define IOACCEL_STATUS_BYTE 4
2993#define OFFLOAD_CONFIGURED_BIT 0x01
2994#define OFFLOAD_ENABLED_BIT 0x02
2995 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2996 this_device->offload_config =
2997 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2998 if (this_device->offload_config) {
2999 this_device->offload_enabled =
3000 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3001 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3002 this_device->offload_enabled = 0;
3003 }
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003004 this_device->offload_to_be_enabled = this_device->offload_enabled;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003005out:
3006 kfree(buf);
3007 return;
3008}
3009
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003010/* Get the device id from inquiry page 0x83 */
3011static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3012 unsigned char *device_id, int buflen)
3013{
3014 int rc;
3015 unsigned char *buf;
3016
3017 if (buflen > 16)
3018 buflen = 16;
3019 buf = kzalloc(64, GFP_KERNEL);
3020 if (!buf)
Stephen M. Camerona84d7942014-05-29 10:54:20 -05003021 return -ENOMEM;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06003022 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003023 if (rc == 0)
3024 memcpy(device_id, &buf[8], buflen);
3025 kfree(buf);
3026 return rc != 0;
3027}
3028
3029static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
Don Brace03383732015-01-23 16:43:30 -06003030 void *buf, int bufsize,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003031 int extended_response)
3032{
3033 int rc = IO_OK;
3034 struct CommandList *c;
3035 unsigned char scsi3addr[8];
3036 struct ErrorInfo *ei;
3037
Stephen Cameron45fcb862015-01-23 16:43:04 -06003038 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05003039
Stephen M. Camerone89c0ae2010-02-04 08:42:04 -06003040 /* address the controller */
3041 memset(scsi3addr, 0, sizeof(scsi3addr));
Stephen M. Camerona2dac132013-02-20 11:24:41 -06003042 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3043 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3044 rc = -1;
3045 goto out;
3046 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003047 if (extended_response)
3048 c->Request.CDB[1] = extended_response;
Webb Scales25163bd2015-04-23 09:32:00 -05003049 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3050 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3051 if (rc)
3052 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003053 ei = c->err_info;
3054 if (ei->CommandStatus != 0 &&
3055 ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06003056 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003057 rc = -1;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003058 } else {
Don Brace03383732015-01-23 16:43:30 -06003059 struct ReportLUNdata *rld = buf;
3060
3061 if (rld->extended_response_flag != extended_response) {
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003062 dev_err(&h->pdev->dev,
3063 "report luns requested format %u, got %u\n",
3064 extended_response,
Don Brace03383732015-01-23 16:43:30 -06003065 rld->extended_response_flag);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003066 rc = -1;
3067 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003068 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06003069out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06003070 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003071 return rc;
3072}
3073
3074static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
Don Brace03383732015-01-23 16:43:30 -06003075 struct ReportExtendedLUNdata *buf, int bufsize)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003076{
Don Brace03383732015-01-23 16:43:30 -06003077 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3078 HPSA_REPORT_PHYS_EXTENDED);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003079}
3080
3081static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3082 struct ReportLUNdata *buf, int bufsize)
3083{
3084 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3085}
3086
3087static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3088 int bus, int target, int lun)
3089{
3090 device->bus = bus;
3091 device->target = target;
3092 device->lun = lun;
3093}
3094
Stephen M. Cameron98465902014-02-21 16:25:00 -06003095/* Use VPD inquiry to get details of volume status */
3096static int hpsa_get_volume_status(struct ctlr_info *h,
3097 unsigned char scsi3addr[])
3098{
3099 int rc;
3100 int status;
3101 int size;
3102 unsigned char *buf;
3103
3104 buf = kzalloc(64, GFP_KERNEL);
3105 if (!buf)
3106 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3107
3108 /* Does controller have VPD for logical volume status? */
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05003109 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
Stephen M. Cameron98465902014-02-21 16:25:00 -06003110 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06003111
3112 /* Get the size of the VPD return buffer */
3113 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3114 buf, HPSA_VPD_HEADER_SZ);
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05003115 if (rc != 0)
Stephen M. Cameron98465902014-02-21 16:25:00 -06003116 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06003117 size = buf[3];
3118
3119 /* Now get the whole VPD buffer */
3120 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3121 buf, size + HPSA_VPD_HEADER_SZ);
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05003122 if (rc != 0)
Stephen M. Cameron98465902014-02-21 16:25:00 -06003123 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06003124 status = buf[4]; /* status byte */
3125
3126 kfree(buf);
3127 return status;
3128exit_failed:
3129 kfree(buf);
3130 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3131}
3132
3133/* Determine offline status of a volume.
3134 * Return either:
3135 * 0 (not offline)
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05003136 * 0xff (offline for unknown reasons)
Stephen M. Cameron98465902014-02-21 16:25:00 -06003137 * # (integer code indicating one of several NOT READY states
3138 * describing why a volume is to be kept offline)
3139 */
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05003140static int hpsa_volume_offline(struct ctlr_info *h,
Stephen M. Cameron98465902014-02-21 16:25:00 -06003141 unsigned char scsi3addr[])
3142{
3143 struct CommandList *c;
Stephen Cameron9437ac42015-04-23 09:32:16 -05003144 unsigned char *sense;
3145 u8 sense_key, asc, ascq;
3146 int sense_len;
Webb Scales25163bd2015-04-23 09:32:00 -05003147 int rc, ldstat = 0;
Stephen M. Cameron98465902014-02-21 16:25:00 -06003148 u16 cmd_status;
3149 u8 scsi_status;
3150#define ASC_LUN_NOT_READY 0x04
3151#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3152#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3153
3154 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05003155
Stephen M. Cameron98465902014-02-21 16:25:00 -06003156 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
Webb Scales25163bd2015-04-23 09:32:00 -05003157 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3158 if (rc) {
3159 cmd_free(h, c);
3160 return 0;
3161 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06003162 sense = c->err_info->SenseInfo;
Stephen Cameron9437ac42015-04-23 09:32:16 -05003163 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3164 sense_len = sizeof(c->err_info->SenseInfo);
3165 else
3166 sense_len = c->err_info->SenseLen;
3167 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
Stephen M. Cameron98465902014-02-21 16:25:00 -06003168 cmd_status = c->err_info->CommandStatus;
3169 scsi_status = c->err_info->ScsiStatus;
3170 cmd_free(h, c);
3171 /* Is the volume 'not ready'? */
3172 if (cmd_status != CMD_TARGET_STATUS ||
3173 scsi_status != SAM_STAT_CHECK_CONDITION ||
3174 sense_key != NOT_READY ||
3175 asc != ASC_LUN_NOT_READY) {
3176 return 0;
3177 }
3178
3179 /* Determine the reason for not ready state */
3180 ldstat = hpsa_get_volume_status(h, scsi3addr);
3181
3182 /* Keep volume offline in certain cases: */
3183 switch (ldstat) {
3184 case HPSA_LV_UNDERGOING_ERASE:
3185 case HPSA_LV_UNDERGOING_RPI:
3186 case HPSA_LV_PENDING_RPI:
3187 case HPSA_LV_ENCRYPTED_NO_KEY:
3188 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3189 case HPSA_LV_UNDERGOING_ENCRYPTION:
3190 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3191 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3192 return ldstat;
3193 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3194 /* If VPD status page isn't available,
3195 * use ASC/ASCQ to determine state
3196 */
3197 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3198 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3199 return ldstat;
3200 break;
3201 default:
3202 break;
3203 }
3204 return 0;
3205}
3206
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003207/*
3208 * Find out if a logical device supports aborts by simply trying one.
3209 * Smart Array may claim not to support aborts on logical drives, but
3210 * if a MSA2000 * is connected, the drives on that will be presented
3211 * by the Smart Array as logical drives, and aborts may be sent to
3212 * those devices successfully. So the simplest way to find out is
3213 * to simply try an abort and see how the device responds.
3214 */
3215static int hpsa_device_supports_aborts(struct ctlr_info *h,
3216 unsigned char *scsi3addr)
3217{
3218 struct CommandList *c;
3219 struct ErrorInfo *ei;
3220 int rc = 0;
3221
3222 u64 tag = (u64) -1; /* bogus tag */
3223
3224 /* Assume that physical devices support aborts */
3225 if (!is_logical_dev_addr_mode(scsi3addr))
3226 return 1;
3227
3228 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05003229
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003230 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3231 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3232 /* no unmap needed here because no data xfer. */
3233 ei = c->err_info;
3234 switch (ei->CommandStatus) {
3235 case CMD_INVALID:
3236 rc = 0;
3237 break;
3238 case CMD_UNABORTABLE:
3239 case CMD_ABORT_FAILED:
3240 rc = 1;
3241 break;
Stephen Cameron9437ac42015-04-23 09:32:16 -05003242 case CMD_TMF_STATUS:
3243 rc = hpsa_evaluate_tmf_status(h, c);
3244 break;
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003245 default:
3246 rc = 0;
3247 break;
3248 }
3249 cmd_free(h, c);
3250 return rc;
3251}
3252
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003253static int hpsa_update_device_info(struct ctlr_info *h,
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003254 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3255 unsigned char *is_OBDR_device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003256{
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003257
3258#define OBDR_SIG_OFFSET 43
3259#define OBDR_TAPE_SIG "$DR-10"
3260#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3261#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3262
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06003263 unsigned char *inq_buff;
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003264 unsigned char *obdr_sig;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003265
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06003266 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003267 if (!inq_buff)
3268 goto bail_out;
3269
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003270 /* Do an inquiry to the device to see what it is. */
3271 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3272 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3273 /* Inquiry failed (msg printed already) */
3274 dev_err(&h->pdev->dev,
3275 "hpsa_update_device_info: inquiry failed\n");
3276 goto bail_out;
3277 }
3278
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003279 this_device->devtype = (inq_buff[0] & 0x1f);
3280 memcpy(this_device->scsi3addr, scsi3addr, 8);
3281 memcpy(this_device->vendor, &inq_buff[8],
3282 sizeof(this_device->vendor));
3283 memcpy(this_device->model, &inq_buff[16],
3284 sizeof(this_device->model));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003285 memset(this_device->device_id, 0,
3286 sizeof(this_device->device_id));
3287 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
3288 sizeof(this_device->device_id));
3289
3290 if (this_device->devtype == TYPE_DISK &&
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003291 is_logical_dev_addr_mode(scsi3addr)) {
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05003292 int volume_offline;
3293
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003294 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003295 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3296 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05003297 volume_offline = hpsa_volume_offline(h, scsi3addr);
3298 if (volume_offline < 0 || volume_offline > 0xff)
3299 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3300 this_device->volume_offline = volume_offline & 0xff;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003301 } else {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003302 this_device->raid_level = RAID_UNKNOWN;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003303 this_device->offload_config = 0;
3304 this_device->offload_enabled = 0;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003305 this_device->offload_to_be_enabled = 0;
Joe Handzika3144e02015-04-23 09:32:59 -05003306 this_device->hba_ioaccel_enabled = 0;
Stephen M. Cameron98465902014-02-21 16:25:00 -06003307 this_device->volume_offline = 0;
Don Brace03383732015-01-23 16:43:30 -06003308 this_device->queue_depth = h->nr_cmds;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003309 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003310
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003311 if (is_OBDR_device) {
3312 /* See if this is a One-Button-Disaster-Recovery device
3313 * by looking for "$DR-10" at offset 43 in inquiry data.
3314 */
3315 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3316 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3317 strncmp(obdr_sig, OBDR_TAPE_SIG,
3318 OBDR_SIG_LEN) == 0);
3319 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003320 kfree(inq_buff);
3321 return 0;
3322
3323bail_out:
3324 kfree(inq_buff);
3325 return 1;
3326}
3327
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003328static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3329 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3330{
3331 unsigned long flags;
3332 int rc, entry;
3333 /*
3334 * See if this device supports aborts. If we already know
3335 * the device, we already know if it supports aborts, otherwise
3336 * we have to find out if it supports aborts by trying one.
3337 */
3338 spin_lock_irqsave(&h->devlock, flags);
3339 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3340 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3341 entry >= 0 && entry < h->ndevices) {
3342 dev->supports_aborts = h->dev[entry]->supports_aborts;
3343 spin_unlock_irqrestore(&h->devlock, flags);
3344 } else {
3345 spin_unlock_irqrestore(&h->devlock, flags);
3346 dev->supports_aborts =
3347 hpsa_device_supports_aborts(h, scsi3addr);
3348 if (dev->supports_aborts < 0)
3349 dev->supports_aborts = 0;
3350 }
3351}
3352
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003353static unsigned char *ext_target_model[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003354 "MSA2012",
3355 "MSA2024",
3356 "MSA2312",
3357 "MSA2324",
Stephen M. Cameronfda38512011-05-03 15:00:07 -05003358 "P2000 G3 SAS",
Stephen M. Camerone06c8e52013-09-23 13:33:56 -05003359 "MSA 2040 SAS",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003360 NULL,
3361};
3362
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003363static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003364{
3365 int i;
3366
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003367 for (i = 0; ext_target_model[i]; i++)
3368 if (strncmp(device->model, ext_target_model[i],
3369 strlen(ext_target_model[i])) == 0)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003370 return 1;
3371 return 0;
3372}
3373
3374/* Helper function to assign bus, target, lun mapping of devices.
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003375 * Puts non-external target logical volumes on bus 0, external target logical
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003376 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
3377 * Logical drive target and lun are assigned at this time, but
3378 * physical device lun and target assignment are deferred (assigned
3379 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3380 */
3381static void figure_bus_target_lun(struct ctlr_info *h,
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003382 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003383{
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003384 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003385
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003386 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3387 /* physical device, target and lun filled in later */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003388 if (is_hba_lunid(lunaddrbytes))
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003389 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003390 else
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003391 /* defer target, lun assignment for physical devices */
3392 hpsa_set_bus_target_lun(device, 2, -1, -1);
3393 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003394 }
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003395 /* It's a logical device */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003396 if (is_ext_target(h, device)) {
3397 /* external target way, put logicals on bus 1
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003398 * and match target/lun numbers box
3399 * reports, other smart array, bus 0, target 0, match lunid
3400 */
3401 hpsa_set_bus_target_lun(device,
3402 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
3403 return;
3404 }
3405 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003406}
3407
3408/*
3409 * If there is no lun 0 on a target, linux won't find any devices.
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003410 * For the external targets (arrays), we have to manually detect the enclosure
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003411 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3412 * it for some reason. *tmpdevice is the target we're adding,
3413 * this_device is a pointer into the current element of currentsd[]
3414 * that we're building up in update_scsi_devices(), below.
3415 * lunzerobits is a bitmap that tracks which targets already have a
3416 * lun 0 assigned.
3417 * Returns 1 if an enclosure was added, 0 if not.
3418 */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003419static int add_ext_target_dev(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003420 struct hpsa_scsi_dev_t *tmpdevice,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003421 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003422 unsigned long lunzerobits[], int *n_ext_target_devs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003423{
3424 unsigned char scsi3addr[8];
3425
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003426 if (test_bit(tmpdevice->target, lunzerobits))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003427 return 0; /* There is already a lun 0 on this target. */
3428
3429 if (!is_logical_dev_addr_mode(lunaddrbytes))
3430 return 0; /* It's the logical targets that may lack lun 0. */
3431
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003432 if (!is_ext_target(h, tmpdevice))
3433 return 0; /* Only external target devices have this problem. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003434
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003435 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003436 return 0;
3437
Stephen M. Cameronc4f8a292011-01-07 10:55:43 -06003438 memset(scsi3addr, 0, 8);
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003439 scsi3addr[3] = tmpdevice->target;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003440 if (is_hba_lunid(scsi3addr))
3441 return 0; /* Don't add the RAID controller here. */
3442
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003443 if (is_scsi_rev_5(h))
3444 return 0; /* p1210m doesn't need to do this. */
3445
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003446 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
Scott Teelaca4a522012-01-19 14:01:19 -06003447 dev_warn(&h->pdev->dev, "Maximum number of external "
3448 "target devices exceeded. Check your hardware "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003449 "configuration.");
3450 return 0;
3451 }
3452
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003453 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003454 return 0;
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003455 (*n_ext_target_devs)++;
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003456 hpsa_set_bus_target_lun(this_device,
3457 tmpdevice->bus, tmpdevice->target, 0);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003458 hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003459 set_bit(tmpdevice->target, lunzerobits);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003460 return 1;
3461}
3462
3463/*
Scott Teel54b6e9e2014-02-18 13:56:45 -06003464 * Get address of physical disk used for an ioaccel2 mode command:
3465 * 1. Extract ioaccel2 handle from the command.
3466 * 2. Find a matching ioaccel2 handle from list of physical disks.
3467 * 3. Return:
3468 * 1 and set scsi3addr to address of matching physical
3469 * 0 if no matching physical disk was found.
3470 */
3471static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3472 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3473{
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003474 struct io_accel2_cmd *c2 =
3475 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3476 unsigned long flags;
Scott Teel54b6e9e2014-02-18 13:56:45 -06003477 int i;
Scott Teel54b6e9e2014-02-18 13:56:45 -06003478
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003479 spin_lock_irqsave(&h->devlock, flags);
3480 for (i = 0; i < h->ndevices; i++)
3481 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3482 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3483 sizeof(h->dev[i]->scsi3addr));
3484 spin_unlock_irqrestore(&h->devlock, flags);
3485 return 1;
3486 }
3487 spin_unlock_irqrestore(&h->devlock, flags);
3488 return 0;
Scott Teel54b6e9e2014-02-18 13:56:45 -06003489}
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003490
Scott Teel54b6e9e2014-02-18 13:56:45 -06003491/*
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003492 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3493 * logdev. The number of luns in physdev and logdev are returned in
3494 * *nphysicals and *nlogicals, respectively.
3495 * Returns 0 on success, -1 otherwise.
3496 */
3497static int hpsa_gather_lun_info(struct ctlr_info *h,
Don Brace03383732015-01-23 16:43:30 -06003498 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003499 struct ReportLUNdata *logdev, u32 *nlogicals)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003500{
Don Brace03383732015-01-23 16:43:30 -06003501 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003502 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3503 return -1;
3504 }
Don Brace03383732015-01-23 16:43:30 -06003505 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003506 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
Don Brace03383732015-01-23 16:43:30 -06003507 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3508 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003509 *nphysicals = HPSA_MAX_PHYS_LUN;
3510 }
Don Brace03383732015-01-23 16:43:30 -06003511 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003512 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3513 return -1;
3514 }
Stephen M. Cameron6df1e952010-02-04 08:42:19 -06003515 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003516 /* Reject Logicals in excess of our max capability. */
3517 if (*nlogicals > HPSA_MAX_LUN) {
3518 dev_warn(&h->pdev->dev,
3519 "maximum logical LUNs (%d) exceeded. "
3520 "%d LUNs ignored.\n", HPSA_MAX_LUN,
3521 *nlogicals - HPSA_MAX_LUN);
3522 *nlogicals = HPSA_MAX_LUN;
3523 }
3524 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3525 dev_warn(&h->pdev->dev,
3526 "maximum logical + physical LUNs (%d) exceeded. "
3527 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3528 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3529 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3530 }
3531 return 0;
3532}
3533
Don Brace42a91642014-11-14 17:26:27 -06003534static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3535 int i, int nphysicals, int nlogicals,
Matt Gatesa93aa1f2014-02-18 13:55:07 -06003536 struct ReportExtendedLUNdata *physdev_list,
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003537 struct ReportLUNdata *logdev_list)
3538{
3539 /* Helper function, figure out where the LUN ID info is coming from
3540 * given index i, lists of physical and logical devices, where in
3541 * the list the raid controller is supposed to appear (first or last)
3542 */
3543
3544 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3545 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3546
3547 if (i == raid_ctlr_position)
3548 return RAID_CTLR_LUNID;
3549
3550 if (i < logicals_start)
Stephen M. Camerond5b5d962014-05-29 10:53:34 -05003551 return &physdev_list->LUN[i -
3552 (raid_ctlr_position == 0)].lunid[0];
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003553
3554 if (i < last_device)
3555 return &logdev_list->LUN[i - nphysicals -
3556 (raid_ctlr_position == 0)][0];
3557 BUG();
3558 return NULL;
3559}
3560
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003561static int hpsa_hba_mode_enabled(struct ctlr_info *h)
3562{
3563 int rc;
Joe Handzik6e8e8082014-05-15 15:44:42 -05003564 int hba_mode_enabled;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003565 struct bmic_controller_parameters *ctlr_params;
3566 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3567 GFP_KERNEL);
3568
3569 if (!ctlr_params)
Joe Handzik96444fb2014-05-15 15:44:47 -05003570 return -ENOMEM;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003571 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3572 sizeof(struct bmic_controller_parameters));
Joe Handzik96444fb2014-05-15 15:44:47 -05003573 if (rc) {
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003574 kfree(ctlr_params);
Joe Handzik96444fb2014-05-15 15:44:47 -05003575 return rc;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003576 }
Joe Handzik6e8e8082014-05-15 15:44:42 -05003577
3578 hba_mode_enabled =
3579 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3580 kfree(ctlr_params);
3581 return hba_mode_enabled;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003582}
3583
Don Brace03383732015-01-23 16:43:30 -06003584/* get physical drive ioaccel handle and queue depth */
3585static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3586 struct hpsa_scsi_dev_t *dev,
3587 u8 *lunaddrbytes,
3588 struct bmic_identify_physical_device *id_phys)
3589{
3590 int rc;
3591 struct ext_report_lun_entry *rle =
3592 (struct ext_report_lun_entry *) lunaddrbytes;
3593
3594 dev->ioaccel_handle = rle->ioaccel_handle;
Joe Handzika3144e02015-04-23 09:32:59 -05003595 if (PHYS_IOACCEL(lunaddrbytes) && dev->ioaccel_handle)
3596 dev->hba_ioaccel_enabled = 1;
Don Brace03383732015-01-23 16:43:30 -06003597 memset(id_phys, 0, sizeof(*id_phys));
3598 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3599 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3600 sizeof(*id_phys));
3601 if (!rc)
3602 /* Reserve space for FW operations */
3603#define DRIVE_CMDS_RESERVED_FOR_FW 2
3604#define DRIVE_QUEUE_DEPTH 7
3605 dev->queue_depth =
3606 le16_to_cpu(id_phys->current_queue_depth_limit) -
3607 DRIVE_CMDS_RESERVED_FOR_FW;
3608 else
3609 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3610 atomic_set(&dev->ioaccel_cmds_out, 0);
Webb Scalesd604f532015-04-23 09:35:22 -05003611 atomic_set(&dev->reset_cmds_out, 0);
Don Brace03383732015-01-23 16:43:30 -06003612}
3613
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003614static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3615{
3616 /* the idea here is we could get notified
3617 * that some devices have changed, so we do a report
3618 * physical luns and report logical luns cmd, and adjust
3619 * our list of devices accordingly.
3620 *
3621 * The scsi3addr's of devices won't change so long as the
3622 * adapter is not reset. That means we can rescan and
3623 * tell which devices we already know about, vs. new
3624 * devices, vs. disappearing devices.
3625 */
Matt Gatesa93aa1f2014-02-18 13:55:07 -06003626 struct ReportExtendedLUNdata *physdev_list = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003627 struct ReportLUNdata *logdev_list = NULL;
Don Brace03383732015-01-23 16:43:30 -06003628 struct bmic_identify_physical_device *id_phys = NULL;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003629 u32 nphysicals = 0;
3630 u32 nlogicals = 0;
3631 u32 ndev_allocated = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003632 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3633 int ncurrent = 0;
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003634 int i, n_ext_target_devs, ndevs_to_allocate;
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003635 int raid_ctlr_position;
Joe Handzik2bbf5c72014-05-21 11:16:01 -05003636 int rescan_hba_mode;
Scott Teelaca4a522012-01-19 14:01:19 -06003637 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003638
Scott Teelcfe5bad2011-10-26 16:21:07 -05003639 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameron92084712014-11-14 17:26:54 -06003640 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3641 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003642 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
Don Brace03383732015-01-23 16:43:30 -06003643 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003644
Don Brace03383732015-01-23 16:43:30 -06003645 if (!currentsd || !physdev_list || !logdev_list ||
3646 !tmpdevice || !id_phys) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003647 dev_err(&h->pdev->dev, "out of memory\n");
3648 goto out;
3649 }
3650 memset(lunzerobits, 0, sizeof(lunzerobits));
3651
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003652 rescan_hba_mode = hpsa_hba_mode_enabled(h);
Joe Handzik96444fb2014-05-15 15:44:47 -05003653 if (rescan_hba_mode < 0)
3654 goto out;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003655
3656 if (!h->hba_mode_enabled && rescan_hba_mode)
3657 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3658 else if (h->hba_mode_enabled && !rescan_hba_mode)
3659 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3660
3661 h->hba_mode_enabled = rescan_hba_mode;
3662
Don Brace03383732015-01-23 16:43:30 -06003663 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3664 logdev_list, &nlogicals))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003665 goto out;
3666
Scott Teelaca4a522012-01-19 14:01:19 -06003667 /* We might see up to the maximum number of logical and physical disks
3668 * plus external target devices, and a device for the local RAID
3669 * controller.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003670 */
Scott Teelaca4a522012-01-19 14:01:19 -06003671 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003672
3673 /* Allocate the per device structures */
3674 for (i = 0; i < ndevs_to_allocate; i++) {
Scott Teelb7ec0212011-10-26 16:21:12 -05003675 if (i >= HPSA_MAX_DEVICES) {
3676 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3677 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3678 ndevs_to_allocate - HPSA_MAX_DEVICES);
3679 break;
3680 }
3681
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003682 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3683 if (!currentsd[i]) {
3684 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3685 __FILE__, __LINE__);
3686 goto out;
3687 }
3688 ndev_allocated++;
3689 }
3690
Stephen M. Cameron86452912014-05-29 10:53:49 -05003691 if (is_scsi_rev_5(h))
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003692 raid_ctlr_position = 0;
3693 else
3694 raid_ctlr_position = nphysicals + nlogicals;
3695
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003696 /* adjust our table of devices */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003697 n_ext_target_devs = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003698 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003699 u8 *lunaddrbytes, is_OBDR = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003700
3701 /* Figure out where the LUN ID info is coming from */
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003702 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3703 i, nphysicals, nlogicals, physdev_list, logdev_list);
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003704
3705 /* skip masked non-disk devices */
3706 if (MASKED_DEVICE(lunaddrbytes))
3707 if (i < nphysicals + (raid_ctlr_position == 0) &&
3708 NON_DISK_PHYS_DEV(lunaddrbytes))
3709 continue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003710
3711 /* Get device type, vendor, model, device id */
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003712 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3713 &is_OBDR))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003714 continue; /* skip it if we can't talk to it. */
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003715 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003716 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003717 this_device = currentsd[ncurrent];
3718
3719 /*
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003720 * For external target devices, we have to insert a LUN 0 which
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003721 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3722 * is nonetheless an enclosure device there. We have to
3723 * present that otherwise linux won't find anything if
3724 * there is no lun 0.
3725 */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003726 if (add_ext_target_dev(h, tmpdevice, this_device,
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003727 lunaddrbytes, lunzerobits,
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003728 &n_ext_target_devs)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003729 ncurrent++;
3730 this_device = currentsd[ncurrent];
3731 }
3732
3733 *this_device = *tmpdevice;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003734
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003735 /* do not expose masked devices */
3736 if (MASKED_DEVICE(lunaddrbytes) &&
3737 i < nphysicals + (raid_ctlr_position == 0)) {
3738 if (h->hba_mode_enabled)
3739 dev_warn(&h->pdev->dev,
3740 "Masked physical device detected\n");
3741 this_device->expose_state = HPSA_DO_NOT_EXPOSE;
3742 } else {
3743 this_device->expose_state =
3744 HPSA_SG_ATTACH | HPSA_ULD_ATTACH;
3745 }
3746
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003747 switch (this_device->devtype) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003748 case TYPE_ROM:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003749 /* We don't *really* support actual CD-ROM devices,
3750 * just "One Button Disaster Recovery" tape drive
3751 * which temporarily pretends to be a CD-ROM drive.
3752 * So we check that the device is really an OBDR tape
3753 * device by checking for "$DR-10" in bytes 43-48 of
3754 * the inquiry data.
3755 */
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003756 if (is_OBDR)
3757 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003758 break;
3759 case TYPE_DISK:
Joe Handzikecf418d12015-04-23 09:33:04 -05003760 if (i >= nphysicals) {
3761 ncurrent++;
3762 break;
3763 }
3764
3765 if (h->hba_mode_enabled)
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003766 /* never use raid mapper in HBA mode */
3767 this_device->offload_enabled = 0;
Joe Handzikecf418d12015-04-23 09:33:04 -05003768 else if (!(h->transMethod & CFGTBL_Trans_io_accel1 ||
3769 h->transMethod & CFGTBL_Trans_io_accel2))
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003770 break;
Joe Handzikecf418d12015-04-23 09:33:04 -05003771
3772 hpsa_get_ioaccel_drive_info(h, this_device,
3773 lunaddrbytes, id_phys);
3774 atomic_set(&this_device->ioaccel_cmds_out, 0);
3775 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003776 break;
3777 case TYPE_TAPE:
3778 case TYPE_MEDIUM_CHANGER:
3779 ncurrent++;
3780 break;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003781 case TYPE_ENCLOSURE:
3782 if (h->hba_mode_enabled)
3783 ncurrent++;
3784 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003785 case TYPE_RAID:
3786 /* Only present the Smartarray HBA as a RAID controller.
3787 * If it's a RAID controller other than the HBA itself
3788 * (an external RAID controller, MSA500 or similar)
3789 * don't present it.
3790 */
3791 if (!is_hba_lunid(lunaddrbytes))
3792 break;
3793 ncurrent++;
3794 break;
3795 default:
3796 break;
3797 }
Scott Teelcfe5bad2011-10-26 16:21:07 -05003798 if (ncurrent >= HPSA_MAX_DEVICES)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003799 break;
3800 }
3801 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3802out:
3803 kfree(tmpdevice);
3804 for (i = 0; i < ndev_allocated; i++)
3805 kfree(currentsd[i]);
3806 kfree(currentsd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003807 kfree(physdev_list);
3808 kfree(logdev_list);
Don Brace03383732015-01-23 16:43:30 -06003809 kfree(id_phys);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003810}
3811
Webb Scalesec5cbf02015-01-23 16:44:45 -06003812static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3813 struct scatterlist *sg)
3814{
3815 u64 addr64 = (u64) sg_dma_address(sg);
3816 unsigned int len = sg_dma_len(sg);
3817
3818 desc->Addr = cpu_to_le64(addr64);
3819 desc->Len = cpu_to_le32(len);
3820 desc->Ext = 0;
3821}
3822
Webb Scalesc7ee65b2015-01-23 16:42:17 -06003823/*
3824 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003825 * dma mapping and fills in the scatter gather entries of the
3826 * hpsa command, cp.
3827 */
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003828static int hpsa_scatter_gather(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003829 struct CommandList *cp,
3830 struct scsi_cmnd *cmd)
3831{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003832 struct scatterlist *sg;
Webb Scalesb3a7ba72015-04-23 09:34:27 -05003833 int use_sg, i, sg_limit, chained, last_sg;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003834 struct SGDescriptor *curr_sg;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003835
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003836 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003837
3838 use_sg = scsi_dma_map(cmd);
3839 if (use_sg < 0)
3840 return use_sg;
3841
3842 if (!use_sg)
3843 goto sglist_finished;
3844
Webb Scalesb3a7ba72015-04-23 09:34:27 -05003845 /*
3846 * If the number of entries is greater than the max for a single list,
3847 * then we have a chained list; we will set up all but one entry in the
3848 * first list (the last entry is saved for link information);
3849 * otherwise, we don't have a chained list and we'll set up at each of
3850 * the entries in the one list.
3851 */
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003852 curr_sg = cp->SG;
Webb Scalesb3a7ba72015-04-23 09:34:27 -05003853 chained = use_sg > h->max_cmd_sg_entries;
3854 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
3855 last_sg = scsi_sg_count(cmd) - 1;
3856 scsi_for_each_sg(cmd, sg, sg_limit, i) {
Webb Scalesec5cbf02015-01-23 16:44:45 -06003857 hpsa_set_sg_descriptor(curr_sg, sg);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003858 curr_sg++;
3859 }
Webb Scalesec5cbf02015-01-23 16:44:45 -06003860
Webb Scalesb3a7ba72015-04-23 09:34:27 -05003861 if (chained) {
3862 /*
3863 * Continue with the chained list. Set curr_sg to the chained
3864 * list. Modify the limit to the total count less the entries
3865 * we've already set up. Resume the scan at the list entry
3866 * where the previous loop left off.
3867 */
3868 curr_sg = h->cmd_sg_list[cp->cmdindex];
3869 sg_limit = use_sg - sg_limit;
3870 for_each_sg(sg, sg, sg_limit, i) {
3871 hpsa_set_sg_descriptor(curr_sg, sg);
3872 curr_sg++;
3873 }
3874 }
3875
Webb Scalesec5cbf02015-01-23 16:44:45 -06003876 /* Back the pointer up to the last entry and mark it as "last". */
Webb Scalesb3a7ba72015-04-23 09:34:27 -05003877 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003878
3879 if (use_sg + chained > h->maxSG)
3880 h->maxSG = use_sg + chained;
3881
3882 if (chained) {
3883 cp->Header.SGList = h->max_cmd_sg_entries;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003884 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06003885 if (hpsa_map_sg_chain_block(h, cp)) {
3886 scsi_dma_unmap(cmd);
3887 return -1;
3888 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003889 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003890 }
3891
3892sglist_finished:
3893
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003894 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
Webb Scalesc7ee65b2015-01-23 16:42:17 -06003895 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003896 return 0;
3897}
3898
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003899#define IO_ACCEL_INELIGIBLE (1)
3900static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3901{
3902 int is_write = 0;
3903 u32 block;
3904 u32 block_cnt;
3905
3906 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3907 switch (cdb[0]) {
3908 case WRITE_6:
3909 case WRITE_12:
3910 is_write = 1;
3911 case READ_6:
3912 case READ_12:
3913 if (*cdb_len == 6) {
3914 block = (((u32) cdb[2]) << 8) | cdb[3];
3915 block_cnt = cdb[4];
3916 } else {
3917 BUG_ON(*cdb_len != 12);
3918 block = (((u32) cdb[2]) << 24) |
3919 (((u32) cdb[3]) << 16) |
3920 (((u32) cdb[4]) << 8) |
3921 cdb[5];
3922 block_cnt =
3923 (((u32) cdb[6]) << 24) |
3924 (((u32) cdb[7]) << 16) |
3925 (((u32) cdb[8]) << 8) |
3926 cdb[9];
3927 }
3928 if (block_cnt > 0xffff)
3929 return IO_ACCEL_INELIGIBLE;
3930
3931 cdb[0] = is_write ? WRITE_10 : READ_10;
3932 cdb[1] = 0;
3933 cdb[2] = (u8) (block >> 24);
3934 cdb[3] = (u8) (block >> 16);
3935 cdb[4] = (u8) (block >> 8);
3936 cdb[5] = (u8) (block);
3937 cdb[6] = 0;
3938 cdb[7] = (u8) (block_cnt >> 8);
3939 cdb[8] = (u8) (block_cnt);
3940 cdb[9] = 0;
3941 *cdb_len = 10;
3942 break;
3943 }
3944 return 0;
3945}
3946
Scott Teelc3497752014-02-18 13:56:34 -06003947static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003948 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -06003949 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
Matt Gatese1f7de02014-02-18 13:55:17 -06003950{
3951 struct scsi_cmnd *cmd = c->scsi_cmd;
Matt Gatese1f7de02014-02-18 13:55:17 -06003952 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3953 unsigned int len;
3954 unsigned int total_len = 0;
3955 struct scatterlist *sg;
3956 u64 addr64;
3957 int use_sg, i;
3958 struct SGDescriptor *curr_sg;
3959 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3960
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003961 /* TODO: implement chaining support */
Don Brace03383732015-01-23 16:43:30 -06003962 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3963 atomic_dec(&phys_disk->ioaccel_cmds_out);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003964 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06003965 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003966
Matt Gatese1f7de02014-02-18 13:55:17 -06003967 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3968
Don Brace03383732015-01-23 16:43:30 -06003969 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3970 atomic_dec(&phys_disk->ioaccel_cmds_out);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003971 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06003972 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003973
Matt Gatese1f7de02014-02-18 13:55:17 -06003974 c->cmd_type = CMD_IOACCEL1;
3975
3976 /* Adjust the DMA address to point to the accelerated command buffer */
3977 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3978 (c->cmdindex * sizeof(*cp));
3979 BUG_ON(c->busaddr & 0x0000007F);
3980
3981 use_sg = scsi_dma_map(cmd);
Don Brace03383732015-01-23 16:43:30 -06003982 if (use_sg < 0) {
3983 atomic_dec(&phys_disk->ioaccel_cmds_out);
Matt Gatese1f7de02014-02-18 13:55:17 -06003984 return use_sg;
Don Brace03383732015-01-23 16:43:30 -06003985 }
Matt Gatese1f7de02014-02-18 13:55:17 -06003986
3987 if (use_sg) {
3988 curr_sg = cp->SG;
3989 scsi_for_each_sg(cmd, sg, use_sg, i) {
3990 addr64 = (u64) sg_dma_address(sg);
3991 len = sg_dma_len(sg);
3992 total_len += len;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003993 curr_sg->Addr = cpu_to_le64(addr64);
3994 curr_sg->Len = cpu_to_le32(len);
3995 curr_sg->Ext = cpu_to_le32(0);
Matt Gatese1f7de02014-02-18 13:55:17 -06003996 curr_sg++;
3997 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003998 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
Matt Gatese1f7de02014-02-18 13:55:17 -06003999
4000 switch (cmd->sc_data_direction) {
4001 case DMA_TO_DEVICE:
4002 control |= IOACCEL1_CONTROL_DATA_OUT;
4003 break;
4004 case DMA_FROM_DEVICE:
4005 control |= IOACCEL1_CONTROL_DATA_IN;
4006 break;
4007 case DMA_NONE:
4008 control |= IOACCEL1_CONTROL_NODATAXFER;
4009 break;
4010 default:
4011 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4012 cmd->sc_data_direction);
4013 BUG();
4014 break;
4015 }
4016 } else {
4017 control |= IOACCEL1_CONTROL_NODATAXFER;
4018 }
4019
Scott Teelc3497752014-02-18 13:56:34 -06004020 c->Header.SGList = use_sg;
Matt Gatese1f7de02014-02-18 13:55:17 -06004021 /* Fill out the command structure to submit */
Don Brace2b08b3e2015-01-23 16:41:09 -06004022 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4023 cp->transfer_len = cpu_to_le32(total_len);
4024 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4025 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4026 cp->control = cpu_to_le32(control);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004027 memcpy(cp->CDB, cdb, cdb_len);
4028 memcpy(cp->CISS_LUN, scsi3addr, 8);
Scott Teelc3497752014-02-18 13:56:34 -06004029 /* Tag was already set at init time. */
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004030 enqueue_cmd_and_start_io(h, c);
Matt Gatese1f7de02014-02-18 13:55:17 -06004031 return 0;
4032}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004033
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004034/*
4035 * Queue a command directly to a device behind the controller using the
4036 * I/O accelerator path.
4037 */
4038static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4039 struct CommandList *c)
4040{
4041 struct scsi_cmnd *cmd = c->scsi_cmd;
4042 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4043
Don Brace03383732015-01-23 16:43:30 -06004044 c->phys_disk = dev;
4045
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004046 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
Don Brace03383732015-01-23 16:43:30 -06004047 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004048}
4049
Scott Teeldd0e19f2014-02-18 13:57:31 -06004050/*
4051 * Set encryption parameters for the ioaccel2 request
4052 */
4053static void set_encrypt_ioaccel2(struct ctlr_info *h,
4054 struct CommandList *c, struct io_accel2_cmd *cp)
4055{
4056 struct scsi_cmnd *cmd = c->scsi_cmd;
4057 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4058 struct raid_map_data *map = &dev->raid_map;
4059 u64 first_block;
4060
Scott Teeldd0e19f2014-02-18 13:57:31 -06004061 /* Are we doing encryption on this device */
Don Brace2b08b3e2015-01-23 16:41:09 -06004062 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
Scott Teeldd0e19f2014-02-18 13:57:31 -06004063 return;
4064 /* Set the data encryption key index. */
4065 cp->dekindex = map->dekindex;
4066
4067 /* Set the encryption enable flag, encoded into direction field. */
4068 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4069
4070 /* Set encryption tweak values based on logical block address
4071 * If block size is 512, tweak value is LBA.
4072 * For other block sizes, tweak is (LBA * block size)/ 512)
4073 */
4074 switch (cmd->cmnd[0]) {
4075 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4076 case WRITE_6:
4077 case READ_6:
Don Brace2b08b3e2015-01-23 16:41:09 -06004078 first_block = get_unaligned_be16(&cmd->cmnd[2]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06004079 break;
4080 case WRITE_10:
4081 case READ_10:
Scott Teeldd0e19f2014-02-18 13:57:31 -06004082 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4083 case WRITE_12:
4084 case READ_12:
Don Brace2b08b3e2015-01-23 16:41:09 -06004085 first_block = get_unaligned_be32(&cmd->cmnd[2]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06004086 break;
4087 case WRITE_16:
4088 case READ_16:
Don Brace2b08b3e2015-01-23 16:41:09 -06004089 first_block = get_unaligned_be64(&cmd->cmnd[2]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06004090 break;
4091 default:
4092 dev_err(&h->pdev->dev,
Don Brace2b08b3e2015-01-23 16:41:09 -06004093 "ERROR: %s: size (0x%x) not supported for encryption\n",
4094 __func__, cmd->cmnd[0]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06004095 BUG();
4096 break;
4097 }
Don Brace2b08b3e2015-01-23 16:41:09 -06004098
4099 if (le32_to_cpu(map->volume_blk_size) != 512)
4100 first_block = first_block *
4101 le32_to_cpu(map->volume_blk_size)/512;
4102
4103 cp->tweak_lower = cpu_to_le32(first_block);
4104 cp->tweak_upper = cpu_to_le32(first_block >> 32);
Scott Teeldd0e19f2014-02-18 13:57:31 -06004105}
4106
Scott Teelc3497752014-02-18 13:56:34 -06004107static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4108 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -06004109 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
Scott Teelc3497752014-02-18 13:56:34 -06004110{
4111 struct scsi_cmnd *cmd = c->scsi_cmd;
4112 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4113 struct ioaccel2_sg_element *curr_sg;
4114 int use_sg, i;
4115 struct scatterlist *sg;
4116 u64 addr64;
4117 u32 len;
4118 u32 total_len = 0;
4119
Webb Scalesd9a729f2015-04-23 09:33:27 -05004120 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
Scott Teelc3497752014-02-18 13:56:34 -06004121
Don Brace03383732015-01-23 16:43:30 -06004122 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4123 atomic_dec(&phys_disk->ioaccel_cmds_out);
Scott Teelc3497752014-02-18 13:56:34 -06004124 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06004125 }
4126
Scott Teelc3497752014-02-18 13:56:34 -06004127 c->cmd_type = CMD_IOACCEL2;
4128 /* Adjust the DMA address to point to the accelerated command buffer */
4129 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4130 (c->cmdindex * sizeof(*cp));
4131 BUG_ON(c->busaddr & 0x0000007F);
4132
4133 memset(cp, 0, sizeof(*cp));
4134 cp->IU_type = IOACCEL2_IU_TYPE;
4135
4136 use_sg = scsi_dma_map(cmd);
Don Brace03383732015-01-23 16:43:30 -06004137 if (use_sg < 0) {
4138 atomic_dec(&phys_disk->ioaccel_cmds_out);
Scott Teelc3497752014-02-18 13:56:34 -06004139 return use_sg;
Don Brace03383732015-01-23 16:43:30 -06004140 }
Scott Teelc3497752014-02-18 13:56:34 -06004141
4142 if (use_sg) {
Scott Teelc3497752014-02-18 13:56:34 -06004143 curr_sg = cp->sg;
Webb Scalesd9a729f2015-04-23 09:33:27 -05004144 if (use_sg > h->ioaccel_maxsg) {
4145 addr64 = le64_to_cpu(
4146 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4147 curr_sg->address = cpu_to_le64(addr64);
4148 curr_sg->length = 0;
4149 curr_sg->reserved[0] = 0;
4150 curr_sg->reserved[1] = 0;
4151 curr_sg->reserved[2] = 0;
4152 curr_sg->chain_indicator = 0x80;
4153
4154 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4155 }
Scott Teelc3497752014-02-18 13:56:34 -06004156 scsi_for_each_sg(cmd, sg, use_sg, i) {
4157 addr64 = (u64) sg_dma_address(sg);
4158 len = sg_dma_len(sg);
4159 total_len += len;
4160 curr_sg->address = cpu_to_le64(addr64);
4161 curr_sg->length = cpu_to_le32(len);
4162 curr_sg->reserved[0] = 0;
4163 curr_sg->reserved[1] = 0;
4164 curr_sg->reserved[2] = 0;
4165 curr_sg->chain_indicator = 0;
4166 curr_sg++;
4167 }
4168
4169 switch (cmd->sc_data_direction) {
4170 case DMA_TO_DEVICE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06004171 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4172 cp->direction |= IOACCEL2_DIR_DATA_OUT;
Scott Teelc3497752014-02-18 13:56:34 -06004173 break;
4174 case DMA_FROM_DEVICE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06004175 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4176 cp->direction |= IOACCEL2_DIR_DATA_IN;
Scott Teelc3497752014-02-18 13:56:34 -06004177 break;
4178 case DMA_NONE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06004179 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4180 cp->direction |= IOACCEL2_DIR_NO_DATA;
Scott Teelc3497752014-02-18 13:56:34 -06004181 break;
4182 default:
4183 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4184 cmd->sc_data_direction);
4185 BUG();
4186 break;
4187 }
4188 } else {
Scott Teeldd0e19f2014-02-18 13:57:31 -06004189 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4190 cp->direction |= IOACCEL2_DIR_NO_DATA;
Scott Teelc3497752014-02-18 13:56:34 -06004191 }
Scott Teeldd0e19f2014-02-18 13:57:31 -06004192
4193 /* Set encryption parameters, if necessary */
4194 set_encrypt_ioaccel2(h, c, cp);
4195
Don Brace2b08b3e2015-01-23 16:41:09 -06004196 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
Don Bracef2405db2015-01-23 16:43:09 -06004197 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
Scott Teelc3497752014-02-18 13:56:34 -06004198 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
Scott Teelc3497752014-02-18 13:56:34 -06004199
Scott Teelc3497752014-02-18 13:56:34 -06004200 cp->data_len = cpu_to_le32(total_len);
4201 cp->err_ptr = cpu_to_le64(c->busaddr +
4202 offsetof(struct io_accel2_cmd, error_data));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004203 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
Scott Teelc3497752014-02-18 13:56:34 -06004204
Webb Scalesd9a729f2015-04-23 09:33:27 -05004205 /* fill in sg elements */
4206 if (use_sg > h->ioaccel_maxsg) {
4207 cp->sg_count = 1;
4208 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4209 atomic_dec(&phys_disk->ioaccel_cmds_out);
4210 scsi_dma_unmap(cmd);
4211 return -1;
4212 }
4213 } else
4214 cp->sg_count = (u8) use_sg;
4215
Scott Teelc3497752014-02-18 13:56:34 -06004216 enqueue_cmd_and_start_io(h, c);
4217 return 0;
4218}
4219
4220/*
4221 * Queue a command to the correct I/O accelerator path.
4222 */
4223static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4224 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -06004225 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
Scott Teelc3497752014-02-18 13:56:34 -06004226{
Don Brace03383732015-01-23 16:43:30 -06004227 /* Try to honor the device's queue depth */
4228 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4229 phys_disk->queue_depth) {
4230 atomic_dec(&phys_disk->ioaccel_cmds_out);
4231 return IO_ACCEL_INELIGIBLE;
4232 }
Scott Teelc3497752014-02-18 13:56:34 -06004233 if (h->transMethod & CFGTBL_Trans_io_accel1)
4234 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
Don Brace03383732015-01-23 16:43:30 -06004235 cdb, cdb_len, scsi3addr,
4236 phys_disk);
Scott Teelc3497752014-02-18 13:56:34 -06004237 else
4238 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
Don Brace03383732015-01-23 16:43:30 -06004239 cdb, cdb_len, scsi3addr,
4240 phys_disk);
Scott Teelc3497752014-02-18 13:56:34 -06004241}
4242
Scott Teel6b80b182014-02-18 13:56:55 -06004243static void raid_map_helper(struct raid_map_data *map,
4244 int offload_to_mirror, u32 *map_index, u32 *current_group)
4245{
4246 if (offload_to_mirror == 0) {
4247 /* use physical disk in the first mirrored group. */
Don Brace2b08b3e2015-01-23 16:41:09 -06004248 *map_index %= le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004249 return;
4250 }
4251 do {
4252 /* determine mirror group that *map_index indicates */
Don Brace2b08b3e2015-01-23 16:41:09 -06004253 *current_group = *map_index /
4254 le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004255 if (offload_to_mirror == *current_group)
4256 continue;
Don Brace2b08b3e2015-01-23 16:41:09 -06004257 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
Scott Teel6b80b182014-02-18 13:56:55 -06004258 /* select map index from next group */
Don Brace2b08b3e2015-01-23 16:41:09 -06004259 *map_index += le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004260 (*current_group)++;
4261 } else {
4262 /* select map index from first group */
Don Brace2b08b3e2015-01-23 16:41:09 -06004263 *map_index %= le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004264 *current_group = 0;
4265 }
4266 } while (offload_to_mirror != *current_group);
4267}
4268
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004269/*
4270 * Attempt to perform offload RAID mapping for a logical volume I/O.
4271 */
4272static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4273 struct CommandList *c)
4274{
4275 struct scsi_cmnd *cmd = c->scsi_cmd;
4276 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4277 struct raid_map_data *map = &dev->raid_map;
4278 struct raid_map_disk_data *dd = &map->data[0];
4279 int is_write = 0;
4280 u32 map_index;
4281 u64 first_block, last_block;
4282 u32 block_cnt;
4283 u32 blocks_per_row;
4284 u64 first_row, last_row;
4285 u32 first_row_offset, last_row_offset;
4286 u32 first_column, last_column;
Scott Teel6b80b182014-02-18 13:56:55 -06004287 u64 r0_first_row, r0_last_row;
4288 u32 r5or6_blocks_per_row;
4289 u64 r5or6_first_row, r5or6_last_row;
4290 u32 r5or6_first_row_offset, r5or6_last_row_offset;
4291 u32 r5or6_first_column, r5or6_last_column;
4292 u32 total_disks_per_row;
4293 u32 stripesize;
4294 u32 first_group, last_group, current_group;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004295 u32 map_row;
4296 u32 disk_handle;
4297 u64 disk_block;
4298 u32 disk_block_cnt;
4299 u8 cdb[16];
4300 u8 cdb_len;
Don Brace2b08b3e2015-01-23 16:41:09 -06004301 u16 strip_size;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004302#if BITS_PER_LONG == 32
4303 u64 tmpdiv;
4304#endif
Scott Teel6b80b182014-02-18 13:56:55 -06004305 int offload_to_mirror;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004306
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004307 /* check for valid opcode, get LBA and block count */
4308 switch (cmd->cmnd[0]) {
4309 case WRITE_6:
4310 is_write = 1;
4311 case READ_6:
4312 first_block =
4313 (((u64) cmd->cmnd[2]) << 8) |
4314 cmd->cmnd[3];
4315 block_cnt = cmd->cmnd[4];
Stephen M. Cameron3fa89a02014-07-03 10:18:14 -05004316 if (block_cnt == 0)
4317 block_cnt = 256;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004318 break;
4319 case WRITE_10:
4320 is_write = 1;
4321 case READ_10:
4322 first_block =
4323 (((u64) cmd->cmnd[2]) << 24) |
4324 (((u64) cmd->cmnd[3]) << 16) |
4325 (((u64) cmd->cmnd[4]) << 8) |
4326 cmd->cmnd[5];
4327 block_cnt =
4328 (((u32) cmd->cmnd[7]) << 8) |
4329 cmd->cmnd[8];
4330 break;
4331 case WRITE_12:
4332 is_write = 1;
4333 case READ_12:
4334 first_block =
4335 (((u64) cmd->cmnd[2]) << 24) |
4336 (((u64) cmd->cmnd[3]) << 16) |
4337 (((u64) cmd->cmnd[4]) << 8) |
4338 cmd->cmnd[5];
4339 block_cnt =
4340 (((u32) cmd->cmnd[6]) << 24) |
4341 (((u32) cmd->cmnd[7]) << 16) |
4342 (((u32) cmd->cmnd[8]) << 8) |
4343 cmd->cmnd[9];
4344 break;
4345 case WRITE_16:
4346 is_write = 1;
4347 case READ_16:
4348 first_block =
4349 (((u64) cmd->cmnd[2]) << 56) |
4350 (((u64) cmd->cmnd[3]) << 48) |
4351 (((u64) cmd->cmnd[4]) << 40) |
4352 (((u64) cmd->cmnd[5]) << 32) |
4353 (((u64) cmd->cmnd[6]) << 24) |
4354 (((u64) cmd->cmnd[7]) << 16) |
4355 (((u64) cmd->cmnd[8]) << 8) |
4356 cmd->cmnd[9];
4357 block_cnt =
4358 (((u32) cmd->cmnd[10]) << 24) |
4359 (((u32) cmd->cmnd[11]) << 16) |
4360 (((u32) cmd->cmnd[12]) << 8) |
4361 cmd->cmnd[13];
4362 break;
4363 default:
4364 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4365 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004366 last_block = first_block + block_cnt - 1;
4367
4368 /* check for write to non-RAID-0 */
4369 if (is_write && dev->raid_level != 0)
4370 return IO_ACCEL_INELIGIBLE;
4371
4372 /* check for invalid block or wraparound */
Don Brace2b08b3e2015-01-23 16:41:09 -06004373 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4374 last_block < first_block)
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004375 return IO_ACCEL_INELIGIBLE;
4376
4377 /* calculate stripe information for the request */
Don Brace2b08b3e2015-01-23 16:41:09 -06004378 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4379 le16_to_cpu(map->strip_size);
4380 strip_size = le16_to_cpu(map->strip_size);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004381#if BITS_PER_LONG == 32
4382 tmpdiv = first_block;
4383 (void) do_div(tmpdiv, blocks_per_row);
4384 first_row = tmpdiv;
4385 tmpdiv = last_block;
4386 (void) do_div(tmpdiv, blocks_per_row);
4387 last_row = tmpdiv;
4388 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4389 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4390 tmpdiv = first_row_offset;
Don Brace2b08b3e2015-01-23 16:41:09 -06004391 (void) do_div(tmpdiv, strip_size);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004392 first_column = tmpdiv;
4393 tmpdiv = last_row_offset;
Don Brace2b08b3e2015-01-23 16:41:09 -06004394 (void) do_div(tmpdiv, strip_size);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004395 last_column = tmpdiv;
4396#else
4397 first_row = first_block / blocks_per_row;
4398 last_row = last_block / blocks_per_row;
4399 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4400 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
Don Brace2b08b3e2015-01-23 16:41:09 -06004401 first_column = first_row_offset / strip_size;
4402 last_column = last_row_offset / strip_size;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004403#endif
4404
4405 /* if this isn't a single row/column then give to the controller */
4406 if ((first_row != last_row) || (first_column != last_column))
4407 return IO_ACCEL_INELIGIBLE;
4408
4409 /* proceeding with driver mapping */
Don Brace2b08b3e2015-01-23 16:41:09 -06004410 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4411 le16_to_cpu(map->metadata_disks_per_row);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004412 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
Don Brace2b08b3e2015-01-23 16:41:09 -06004413 le16_to_cpu(map->row_cnt);
Scott Teel6b80b182014-02-18 13:56:55 -06004414 map_index = (map_row * total_disks_per_row) + first_column;
4415
4416 switch (dev->raid_level) {
4417 case HPSA_RAID_0:
4418 break; /* nothing special to do */
4419 case HPSA_RAID_1:
4420 /* Handles load balance across RAID 1 members.
4421 * (2-drive R1 and R10 with even # of drives.)
4422 * Appropriate for SSDs, not optimal for HDDs
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004423 */
Don Brace2b08b3e2015-01-23 16:41:09 -06004424 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004425 if (dev->offload_to_mirror)
Don Brace2b08b3e2015-01-23 16:41:09 -06004426 map_index += le16_to_cpu(map->data_disks_per_row);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004427 dev->offload_to_mirror = !dev->offload_to_mirror;
Scott Teel6b80b182014-02-18 13:56:55 -06004428 break;
4429 case HPSA_RAID_ADM:
4430 /* Handles N-way mirrors (R1-ADM)
4431 * and R10 with # of drives divisible by 3.)
4432 */
Don Brace2b08b3e2015-01-23 16:41:09 -06004433 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
Scott Teel6b80b182014-02-18 13:56:55 -06004434
4435 offload_to_mirror = dev->offload_to_mirror;
4436 raid_map_helper(map, offload_to_mirror,
4437 &map_index, &current_group);
4438 /* set mirror group to use next time */
4439 offload_to_mirror =
Don Brace2b08b3e2015-01-23 16:41:09 -06004440 (offload_to_mirror >=
4441 le16_to_cpu(map->layout_map_count) - 1)
Scott Teel6b80b182014-02-18 13:56:55 -06004442 ? 0 : offload_to_mirror + 1;
Scott Teel6b80b182014-02-18 13:56:55 -06004443 dev->offload_to_mirror = offload_to_mirror;
4444 /* Avoid direct use of dev->offload_to_mirror within this
4445 * function since multiple threads might simultaneously
4446 * increment it beyond the range of dev->layout_map_count -1.
4447 */
4448 break;
4449 case HPSA_RAID_5:
4450 case HPSA_RAID_6:
Don Brace2b08b3e2015-01-23 16:41:09 -06004451 if (le16_to_cpu(map->layout_map_count) <= 1)
Scott Teel6b80b182014-02-18 13:56:55 -06004452 break;
4453
4454 /* Verify first and last block are in same RAID group */
4455 r5or6_blocks_per_row =
Don Brace2b08b3e2015-01-23 16:41:09 -06004456 le16_to_cpu(map->strip_size) *
4457 le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004458 BUG_ON(r5or6_blocks_per_row == 0);
Don Brace2b08b3e2015-01-23 16:41:09 -06004459 stripesize = r5or6_blocks_per_row *
4460 le16_to_cpu(map->layout_map_count);
Scott Teel6b80b182014-02-18 13:56:55 -06004461#if BITS_PER_LONG == 32
4462 tmpdiv = first_block;
4463 first_group = do_div(tmpdiv, stripesize);
4464 tmpdiv = first_group;
4465 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4466 first_group = tmpdiv;
4467 tmpdiv = last_block;
4468 last_group = do_div(tmpdiv, stripesize);
4469 tmpdiv = last_group;
4470 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4471 last_group = tmpdiv;
4472#else
4473 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4474 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
Scott Teel6b80b182014-02-18 13:56:55 -06004475#endif
Stephen M. Cameron000ff7c2014-03-13 17:12:50 -05004476 if (first_group != last_group)
Scott Teel6b80b182014-02-18 13:56:55 -06004477 return IO_ACCEL_INELIGIBLE;
4478
4479 /* Verify request is in a single row of RAID 5/6 */
4480#if BITS_PER_LONG == 32
4481 tmpdiv = first_block;
4482 (void) do_div(tmpdiv, stripesize);
4483 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4484 tmpdiv = last_block;
4485 (void) do_div(tmpdiv, stripesize);
4486 r5or6_last_row = r0_last_row = tmpdiv;
4487#else
4488 first_row = r5or6_first_row = r0_first_row =
4489 first_block / stripesize;
4490 r5or6_last_row = r0_last_row = last_block / stripesize;
4491#endif
4492 if (r5or6_first_row != r5or6_last_row)
4493 return IO_ACCEL_INELIGIBLE;
4494
4495
4496 /* Verify request is in a single column */
4497#if BITS_PER_LONG == 32
4498 tmpdiv = first_block;
4499 first_row_offset = do_div(tmpdiv, stripesize);
4500 tmpdiv = first_row_offset;
4501 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4502 r5or6_first_row_offset = first_row_offset;
4503 tmpdiv = last_block;
4504 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4505 tmpdiv = r5or6_last_row_offset;
4506 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4507 tmpdiv = r5or6_first_row_offset;
4508 (void) do_div(tmpdiv, map->strip_size);
4509 first_column = r5or6_first_column = tmpdiv;
4510 tmpdiv = r5or6_last_row_offset;
4511 (void) do_div(tmpdiv, map->strip_size);
4512 r5or6_last_column = tmpdiv;
4513#else
4514 first_row_offset = r5or6_first_row_offset =
4515 (u32)((first_block % stripesize) %
4516 r5or6_blocks_per_row);
4517
4518 r5or6_last_row_offset =
4519 (u32)((last_block % stripesize) %
4520 r5or6_blocks_per_row);
4521
4522 first_column = r5or6_first_column =
Don Brace2b08b3e2015-01-23 16:41:09 -06004523 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
Scott Teel6b80b182014-02-18 13:56:55 -06004524 r5or6_last_column =
Don Brace2b08b3e2015-01-23 16:41:09 -06004525 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
Scott Teel6b80b182014-02-18 13:56:55 -06004526#endif
4527 if (r5or6_first_column != r5or6_last_column)
4528 return IO_ACCEL_INELIGIBLE;
4529
4530 /* Request is eligible */
4531 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
Don Brace2b08b3e2015-01-23 16:41:09 -06004532 le16_to_cpu(map->row_cnt);
Scott Teel6b80b182014-02-18 13:56:55 -06004533
4534 map_index = (first_group *
Don Brace2b08b3e2015-01-23 16:41:09 -06004535 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
Scott Teel6b80b182014-02-18 13:56:55 -06004536 (map_row * total_disks_per_row) + first_column;
4537 break;
4538 default:
4539 return IO_ACCEL_INELIGIBLE;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004540 }
Scott Teel6b80b182014-02-18 13:56:55 -06004541
Stephen Cameron07543e02015-01-23 16:44:14 -06004542 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4543 return IO_ACCEL_INELIGIBLE;
4544
Don Brace03383732015-01-23 16:43:30 -06004545 c->phys_disk = dev->phys_disk[map_index];
4546
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004547 disk_handle = dd[map_index].ioaccel_handle;
Don Brace2b08b3e2015-01-23 16:41:09 -06004548 disk_block = le64_to_cpu(map->disk_starting_blk) +
4549 first_row * le16_to_cpu(map->strip_size) +
4550 (first_row_offset - first_column *
4551 le16_to_cpu(map->strip_size));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004552 disk_block_cnt = block_cnt;
4553
4554 /* handle differing logical/physical block sizes */
4555 if (map->phys_blk_shift) {
4556 disk_block <<= map->phys_blk_shift;
4557 disk_block_cnt <<= map->phys_blk_shift;
4558 }
4559 BUG_ON(disk_block_cnt > 0xffff);
4560
4561 /* build the new CDB for the physical disk I/O */
4562 if (disk_block > 0xffffffff) {
4563 cdb[0] = is_write ? WRITE_16 : READ_16;
4564 cdb[1] = 0;
4565 cdb[2] = (u8) (disk_block >> 56);
4566 cdb[3] = (u8) (disk_block >> 48);
4567 cdb[4] = (u8) (disk_block >> 40);
4568 cdb[5] = (u8) (disk_block >> 32);
4569 cdb[6] = (u8) (disk_block >> 24);
4570 cdb[7] = (u8) (disk_block >> 16);
4571 cdb[8] = (u8) (disk_block >> 8);
4572 cdb[9] = (u8) (disk_block);
4573 cdb[10] = (u8) (disk_block_cnt >> 24);
4574 cdb[11] = (u8) (disk_block_cnt >> 16);
4575 cdb[12] = (u8) (disk_block_cnt >> 8);
4576 cdb[13] = (u8) (disk_block_cnt);
4577 cdb[14] = 0;
4578 cdb[15] = 0;
4579 cdb_len = 16;
4580 } else {
4581 cdb[0] = is_write ? WRITE_10 : READ_10;
4582 cdb[1] = 0;
4583 cdb[2] = (u8) (disk_block >> 24);
4584 cdb[3] = (u8) (disk_block >> 16);
4585 cdb[4] = (u8) (disk_block >> 8);
4586 cdb[5] = (u8) (disk_block);
4587 cdb[6] = 0;
4588 cdb[7] = (u8) (disk_block_cnt >> 8);
4589 cdb[8] = (u8) (disk_block_cnt);
4590 cdb[9] = 0;
4591 cdb_len = 10;
4592 }
4593 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
Don Brace03383732015-01-23 16:43:30 -06004594 dev->scsi3addr,
4595 dev->phys_disk[map_index]);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004596}
4597
Webb Scales25163bd2015-04-23 09:32:00 -05004598/*
4599 * Submit commands down the "normal" RAID stack path
4600 * All callers to hpsa_ciss_submit must check lockup_detected
4601 * beforehand, before (opt.) and after calling cmd_alloc
4602 */
Stephen Cameron574f05d2015-01-23 16:43:20 -06004603static int hpsa_ciss_submit(struct ctlr_info *h,
4604 struct CommandList *c, struct scsi_cmnd *cmd,
4605 unsigned char scsi3addr[])
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004606{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004607 cmd->host_scribble = (unsigned char *) c;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004608 c->cmd_type = CMD_SCSI;
4609 c->scsi_cmd = cmd;
4610 c->Header.ReplyQueue = 0; /* unused in simple mode */
4611 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
Don Bracef2405db2015-01-23 16:43:09 -06004612 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004613
4614 /* Fill in the request block... */
4615
4616 c->Request.Timeout = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004617 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4618 c->Request.CDBLen = cmd->cmd_len;
4619 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004620 switch (cmd->sc_data_direction) {
4621 case DMA_TO_DEVICE:
Stephen M. Camerona505b862014-11-14 17:27:04 -06004622 c->Request.type_attr_dir =
4623 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004624 break;
4625 case DMA_FROM_DEVICE:
Stephen M. Camerona505b862014-11-14 17:27:04 -06004626 c->Request.type_attr_dir =
4627 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004628 break;
4629 case DMA_NONE:
Stephen M. Camerona505b862014-11-14 17:27:04 -06004630 c->Request.type_attr_dir =
4631 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004632 break;
4633 case DMA_BIDIRECTIONAL:
4634 /* This can happen if a buggy application does a scsi passthru
4635 * and sets both inlen and outlen to non-zero. ( see
4636 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4637 */
4638
Stephen M. Camerona505b862014-11-14 17:27:04 -06004639 c->Request.type_attr_dir =
4640 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004641 /* This is technically wrong, and hpsa controllers should
4642 * reject it with CMD_INVALID, which is the most correct
4643 * response, but non-fibre backends appear to let it
4644 * slide by, and give the same results as if this field
4645 * were set correctly. Either way is acceptable for
4646 * our purposes here.
4647 */
4648
4649 break;
4650
4651 default:
4652 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4653 cmd->sc_data_direction);
4654 BUG();
4655 break;
4656 }
4657
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06004658 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
Webb Scales73153fe2015-04-23 09:35:04 -05004659 hpsa_cmd_resolve_and_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004660 return SCSI_MLQUEUE_HOST_BUSY;
4661 }
4662 enqueue_cmd_and_start_io(h, c);
4663 /* the cmd'll come back via intr handler in complete_scsi_command() */
4664 return 0;
4665}
4666
Stephen Cameron360c73b2015-04-23 09:32:32 -05004667static void hpsa_cmd_init(struct ctlr_info *h, int index,
4668 struct CommandList *c)
4669{
4670 dma_addr_t cmd_dma_handle, err_dma_handle;
4671
4672 /* Zero out all of commandlist except the last field, refcount */
4673 memset(c, 0, offsetof(struct CommandList, refcount));
4674 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
4675 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4676 c->err_info = h->errinfo_pool + index;
4677 memset(c->err_info, 0, sizeof(*c->err_info));
4678 err_dma_handle = h->errinfo_pool_dhandle
4679 + index * sizeof(*c->err_info);
4680 c->cmdindex = index;
4681 c->busaddr = (u32) cmd_dma_handle;
4682 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4683 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4684 c->h = h;
Webb Scalesa58e7e52015-04-23 09:34:16 -05004685 c->scsi_cmd = SCSI_CMD_IDLE;
Stephen Cameron360c73b2015-04-23 09:32:32 -05004686}
4687
4688static void hpsa_preinitialize_commands(struct ctlr_info *h)
4689{
4690 int i;
4691
4692 for (i = 0; i < h->nr_cmds; i++) {
4693 struct CommandList *c = h->cmd_pool + i;
4694
4695 hpsa_cmd_init(h, i, c);
4696 atomic_set(&c->refcount, 0);
4697 }
4698}
4699
4700static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
4701 struct CommandList *c)
4702{
4703 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4704
Webb Scales73153fe2015-04-23 09:35:04 -05004705 BUG_ON(c->cmdindex != index);
4706
Stephen Cameron360c73b2015-04-23 09:32:32 -05004707 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4708 memset(c->err_info, 0, sizeof(*c->err_info));
4709 c->busaddr = (u32) cmd_dma_handle;
4710}
4711
Webb Scales592a0ad2015-04-23 09:32:48 -05004712static int hpsa_ioaccel_submit(struct ctlr_info *h,
4713 struct CommandList *c, struct scsi_cmnd *cmd,
4714 unsigned char *scsi3addr)
4715{
4716 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4717 int rc = IO_ACCEL_INELIGIBLE;
4718
4719 cmd->host_scribble = (unsigned char *) c;
4720
4721 if (dev->offload_enabled) {
4722 hpsa_cmd_init(h, c->cmdindex, c);
4723 c->cmd_type = CMD_SCSI;
4724 c->scsi_cmd = cmd;
4725 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4726 if (rc < 0) /* scsi_dma_map failed. */
4727 rc = SCSI_MLQUEUE_HOST_BUSY;
Joe Handzika3144e02015-04-23 09:32:59 -05004728 } else if (dev->hba_ioaccel_enabled) {
Webb Scales592a0ad2015-04-23 09:32:48 -05004729 hpsa_cmd_init(h, c->cmdindex, c);
4730 c->cmd_type = CMD_SCSI;
4731 c->scsi_cmd = cmd;
4732 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4733 if (rc < 0) /* scsi_dma_map failed. */
4734 rc = SCSI_MLQUEUE_HOST_BUSY;
4735 }
4736 return rc;
4737}
4738
Don Brace080ef1c2015-01-23 16:43:25 -06004739static void hpsa_command_resubmit_worker(struct work_struct *work)
4740{
4741 struct scsi_cmnd *cmd;
4742 struct hpsa_scsi_dev_t *dev;
Webb Scales8a0ff922015-04-23 09:34:11 -05004743 struct CommandList *c = container_of(work, struct CommandList, work);
Don Brace080ef1c2015-01-23 16:43:25 -06004744
4745 cmd = c->scsi_cmd;
4746 dev = cmd->device->hostdata;
4747 if (!dev) {
4748 cmd->result = DID_NO_CONNECT << 16;
Webb Scales8a0ff922015-04-23 09:34:11 -05004749 return hpsa_cmd_free_and_done(c->h, c, cmd);
Don Brace080ef1c2015-01-23 16:43:25 -06004750 }
Webb Scalesd604f532015-04-23 09:35:22 -05004751 if (c->reset_pending)
4752 return hpsa_cmd_resolve_and_free(c->h, c);
Webb Scalesa58e7e52015-04-23 09:34:16 -05004753 if (c->abort_pending)
4754 return hpsa_cmd_abort_and_free(c->h, c, cmd);
Webb Scales592a0ad2015-04-23 09:32:48 -05004755 if (c->cmd_type == CMD_IOACCEL2) {
4756 struct ctlr_info *h = c->h;
4757 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
4758 int rc;
4759
4760 if (c2->error_data.serv_response ==
4761 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
4762 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
4763 if (rc == 0)
4764 return;
4765 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4766 /*
4767 * If we get here, it means dma mapping failed.
4768 * Try again via scsi mid layer, which will
4769 * then get SCSI_MLQUEUE_HOST_BUSY.
4770 */
4771 cmd->result = DID_IMM_RETRY << 16;
Webb Scales8a0ff922015-04-23 09:34:11 -05004772 return hpsa_cmd_free_and_done(h, c, cmd);
Webb Scales592a0ad2015-04-23 09:32:48 -05004773 }
4774 /* else, fall thru and resubmit down CISS path */
4775 }
4776 }
Stephen Cameron360c73b2015-04-23 09:32:32 -05004777 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
Don Brace080ef1c2015-01-23 16:43:25 -06004778 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4779 /*
4780 * If we get here, it means dma mapping failed. Try
4781 * again via scsi mid layer, which will then get
4782 * SCSI_MLQUEUE_HOST_BUSY.
Webb Scales592a0ad2015-04-23 09:32:48 -05004783 *
4784 * hpsa_ciss_submit will have already freed c
4785 * if it encountered a dma mapping failure.
Don Brace080ef1c2015-01-23 16:43:25 -06004786 */
4787 cmd->result = DID_IMM_RETRY << 16;
4788 cmd->scsi_done(cmd);
4789 }
4790}
4791
Stephen Cameron574f05d2015-01-23 16:43:20 -06004792/* Running in struct Scsi_Host->host_lock less mode */
4793static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4794{
4795 struct ctlr_info *h;
4796 struct hpsa_scsi_dev_t *dev;
4797 unsigned char scsi3addr[8];
4798 struct CommandList *c;
4799 int rc = 0;
4800
4801 /* Get the ptr to our adapter structure out of cmd->host. */
4802 h = sdev_to_hba(cmd->device);
Webb Scales73153fe2015-04-23 09:35:04 -05004803
4804 BUG_ON(cmd->request->tag < 0);
4805
Stephen Cameron574f05d2015-01-23 16:43:20 -06004806 dev = cmd->device->hostdata;
4807 if (!dev) {
4808 cmd->result = DID_NO_CONNECT << 16;
4809 cmd->scsi_done(cmd);
4810 return 0;
4811 }
Webb Scales73153fe2015-04-23 09:35:04 -05004812
Stephen Cameron574f05d2015-01-23 16:43:20 -06004813 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4814
4815 if (unlikely(lockup_detected(h))) {
Webb Scales25163bd2015-04-23 09:32:00 -05004816 cmd->result = DID_NO_CONNECT << 16;
Stephen Cameron574f05d2015-01-23 16:43:20 -06004817 cmd->scsi_done(cmd);
4818 return 0;
4819 }
Webb Scales73153fe2015-04-23 09:35:04 -05004820 c = cmd_tagged_alloc(h, cmd);
Stephen Cameron574f05d2015-01-23 16:43:20 -06004821
Stephen Cameron407863c2015-01-23 16:44:19 -06004822 /*
4823 * Call alternate submit routine for I/O accelerated commands.
Stephen Cameron574f05d2015-01-23 16:43:20 -06004824 * Retries always go down the normal I/O path.
4825 */
4826 if (likely(cmd->retries == 0 &&
4827 cmd->request->cmd_type == REQ_TYPE_FS &&
4828 h->acciopath_status)) {
Webb Scales592a0ad2015-04-23 09:32:48 -05004829 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
4830 if (rc == 0)
4831 return 0;
4832 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
Webb Scales73153fe2015-04-23 09:35:04 -05004833 hpsa_cmd_resolve_and_free(h, c);
Webb Scales592a0ad2015-04-23 09:32:48 -05004834 return SCSI_MLQUEUE_HOST_BUSY;
Stephen Cameron574f05d2015-01-23 16:43:20 -06004835 }
4836 }
4837 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4838}
4839
Webb Scales8ebc9242015-01-23 16:44:50 -06004840static void hpsa_scan_complete(struct ctlr_info *h)
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004841{
4842 unsigned long flags;
4843
Webb Scales8ebc9242015-01-23 16:44:50 -06004844 spin_lock_irqsave(&h->scan_lock, flags);
4845 h->scan_finished = 1;
4846 wake_up_all(&h->scan_wait_queue);
4847 spin_unlock_irqrestore(&h->scan_lock, flags);
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004848}
4849
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004850static void hpsa_scan_start(struct Scsi_Host *sh)
4851{
4852 struct ctlr_info *h = shost_to_hba(sh);
4853 unsigned long flags;
4854
Webb Scales8ebc9242015-01-23 16:44:50 -06004855 /*
4856 * Don't let rescans be initiated on a controller known to be locked
4857 * up. If the controller locks up *during* a rescan, that thread is
4858 * probably hosed, but at least we can prevent new rescan threads from
4859 * piling up on a locked up controller.
4860 */
4861 if (unlikely(lockup_detected(h)))
4862 return hpsa_scan_complete(h);
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004863
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004864 /* wait until any scan already in progress is finished. */
4865 while (1) {
4866 spin_lock_irqsave(&h->scan_lock, flags);
4867 if (h->scan_finished)
4868 break;
4869 spin_unlock_irqrestore(&h->scan_lock, flags);
4870 wait_event(h->scan_wait_queue, h->scan_finished);
4871 /* Note: We don't need to worry about a race between this
4872 * thread and driver unload because the midlayer will
4873 * have incremented the reference count, so unload won't
4874 * happen if we're in here.
4875 */
4876 }
4877 h->scan_finished = 0; /* mark scan as in progress */
4878 spin_unlock_irqrestore(&h->scan_lock, flags);
4879
Webb Scales8ebc9242015-01-23 16:44:50 -06004880 if (unlikely(lockup_detected(h)))
4881 return hpsa_scan_complete(h);
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004882
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004883 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4884
Webb Scales8ebc9242015-01-23 16:44:50 -06004885 hpsa_scan_complete(h);
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004886}
4887
Don Brace7c0a0222015-01-23 16:41:30 -06004888static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4889{
Don Brace03383732015-01-23 16:43:30 -06004890 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4891
4892 if (!logical_drive)
4893 return -ENODEV;
Don Brace7c0a0222015-01-23 16:41:30 -06004894
4895 if (qdepth < 1)
4896 qdepth = 1;
Don Brace03383732015-01-23 16:43:30 -06004897 else if (qdepth > logical_drive->queue_depth)
4898 qdepth = logical_drive->queue_depth;
4899
4900 return scsi_change_queue_depth(sdev, qdepth);
Don Brace7c0a0222015-01-23 16:41:30 -06004901}
4902
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004903static int hpsa_scan_finished(struct Scsi_Host *sh,
4904 unsigned long elapsed_time)
4905{
4906 struct ctlr_info *h = shost_to_hba(sh);
4907 unsigned long flags;
4908 int finished;
4909
4910 spin_lock_irqsave(&h->scan_lock, flags);
4911 finished = h->scan_finished;
4912 spin_unlock_irqrestore(&h->scan_lock, flags);
4913 return finished;
4914}
4915
Robert Elliott2946e822015-04-23 09:35:09 -05004916static int hpsa_scsi_host_alloc(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004917{
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004918 struct Scsi_Host *sh;
4919 int error;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004920
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004921 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
Robert Elliott2946e822015-04-23 09:35:09 -05004922 if (sh == NULL) {
4923 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
4924 return -ENOMEM;
4925 }
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004926
4927 sh->io_port = 0;
4928 sh->n_io_port = 0;
4929 sh->this_id = -1;
4930 sh->max_channel = 3;
4931 sh->max_cmd_len = MAX_COMMAND_SIZE;
4932 sh->max_lun = HPSA_MAX_LUN;
4933 sh->max_id = HPSA_MAX_LUN;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05004934 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
Don Brace03383732015-01-23 16:43:30 -06004935 sh->cmd_per_lun = sh->can_queue;
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004936 sh->sg_tablesize = h->maxsgentries;
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004937 sh->hostdata[0] = (unsigned long) h;
4938 sh->irq = h->intr[h->intr_mode];
4939 sh->unique_id = sh->irq;
Webb Scales73153fe2015-04-23 09:35:04 -05004940 error = scsi_init_shared_tag_map(sh, sh->can_queue);
4941 if (error) {
4942 dev_err(&h->pdev->dev,
4943 "%s: scsi_init_shared_tag_map failed for controller %d\n",
4944 __func__, h->ctlr);
Robert Elliott2946e822015-04-23 09:35:09 -05004945 scsi_host_put(sh);
4946 return error;
Webb Scales73153fe2015-04-23 09:35:04 -05004947 }
Robert Elliott2946e822015-04-23 09:35:09 -05004948 h->scsi_host = sh;
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004949 return 0;
Robert Elliott2946e822015-04-23 09:35:09 -05004950}
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004951
Robert Elliott2946e822015-04-23 09:35:09 -05004952static int hpsa_scsi_add_host(struct ctlr_info *h)
4953{
4954 int rv;
4955
4956 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
4957 if (rv) {
4958 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
4959 return rv;
4960 }
4961 scsi_scan_host(h->scsi_host);
4962 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004963}
4964
Webb Scalesb69324f2015-04-23 09:34:22 -05004965/*
Webb Scales73153fe2015-04-23 09:35:04 -05004966 * The block layer has already gone to the trouble of picking out a unique,
4967 * small-integer tag for this request. We use an offset from that value as
4968 * an index to select our command block. (The offset allows us to reserve the
4969 * low-numbered entries for our own uses.)
4970 */
4971static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
4972{
4973 int idx = scmd->request->tag;
4974
4975 if (idx < 0)
4976 return idx;
4977
4978 /* Offset to leave space for internal cmds. */
4979 return idx += HPSA_NRESERVED_CMDS;
4980}
4981
4982/*
Webb Scalesb69324f2015-04-23 09:34:22 -05004983 * Send a TEST_UNIT_READY command to the specified LUN using the specified
4984 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
4985 */
4986static int hpsa_send_test_unit_ready(struct ctlr_info *h,
4987 struct CommandList *c, unsigned char lunaddr[],
4988 int reply_queue)
4989{
4990 int rc;
4991
4992 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4993 (void) fill_cmd(c, TEST_UNIT_READY, h,
4994 NULL, 0, 0, lunaddr, TYPE_CMD);
4995 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
4996 if (rc)
4997 return rc;
4998 /* no unmap needed here because no data xfer. */
4999
5000 /* Check if the unit is already ready. */
5001 if (c->err_info->CommandStatus == CMD_SUCCESS)
5002 return 0;
5003
5004 /*
5005 * The first command sent after reset will receive "unit attention" to
5006 * indicate that the LUN has been reset...this is actually what we're
5007 * looking for (but, success is good too).
5008 */
5009 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5010 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5011 (c->err_info->SenseInfo[2] == NO_SENSE ||
5012 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5013 return 0;
5014
5015 return 1;
5016}
5017
5018/*
5019 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5020 * returns zero when the unit is ready, and non-zero when giving up.
5021 */
5022static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5023 struct CommandList *c,
5024 unsigned char lunaddr[], int reply_queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005025{
Tomas Henzl89193582014-02-21 16:25:05 -06005026 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005027 int count = 0;
5028 int waittime = 1; /* seconds */
Webb Scalesb69324f2015-04-23 09:34:22 -05005029
5030 /* Send test unit ready until device ready, or give up. */
5031 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5032
5033 /*
5034 * Wait for a bit. do this first, because if we send
5035 * the TUR right away, the reset will just abort it.
5036 */
5037 msleep(1000 * waittime);
5038
5039 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5040 if (!rc)
5041 break;
5042
5043 /* Increase wait time with each try, up to a point. */
5044 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5045 waittime *= 2;
5046
5047 dev_warn(&h->pdev->dev,
5048 "waiting %d secs for device to become ready.\n",
5049 waittime);
5050 }
5051
5052 return rc;
5053}
5054
5055static int wait_for_device_to_become_ready(struct ctlr_info *h,
5056 unsigned char lunaddr[],
5057 int reply_queue)
5058{
5059 int first_queue;
5060 int last_queue;
5061 int rq;
5062 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005063 struct CommandList *c;
5064
Stephen Cameron45fcb862015-01-23 16:43:04 -06005065 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005066
Webb Scalesb69324f2015-04-23 09:34:22 -05005067 /*
5068 * If no specific reply queue was requested, then send the TUR
5069 * repeatedly, requesting a reply on each reply queue; otherwise execute
5070 * the loop exactly once using only the specified queue.
5071 */
5072 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5073 first_queue = 0;
5074 last_queue = h->nreply_queues - 1;
5075 } else {
5076 first_queue = reply_queue;
5077 last_queue = reply_queue;
5078 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005079
Webb Scalesb69324f2015-04-23 09:34:22 -05005080 for (rq = first_queue; rq <= last_queue; rq++) {
5081 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
Webb Scales25163bd2015-04-23 09:32:00 -05005082 if (rc)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005083 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005084 }
5085
5086 if (rc)
5087 dev_warn(&h->pdev->dev, "giving up on device.\n");
5088 else
5089 dev_warn(&h->pdev->dev, "device is ready.\n");
5090
Stephen Cameron45fcb862015-01-23 16:43:04 -06005091 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005092 return rc;
5093}
5094
5095/* Need at least one of these error handlers to keep ../scsi/hosts.c from
5096 * complaining. Doing a host- or bus-reset can't do anything good here.
5097 */
5098static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5099{
5100 int rc;
5101 struct ctlr_info *h;
5102 struct hpsa_scsi_dev_t *dev;
Dan Carpenter2dc127b2015-06-04 17:47:56 +03005103 char msg[48];
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005104
5105 /* find the controller to which the command to be aborted was sent */
5106 h = sdev_to_hba(scsicmd->device);
5107 if (h == NULL) /* paranoia */
5108 return FAILED;
Don Bracee3458932015-01-23 16:44:24 -06005109
5110 if (lockup_detected(h))
5111 return FAILED;
5112
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005113 dev = scsicmd->device->hostdata;
5114 if (!dev) {
Webb Scalesd604f532015-04-23 09:35:22 -05005115 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005116 return FAILED;
5117 }
Webb Scales25163bd2015-04-23 09:32:00 -05005118
5119 /* if controller locked up, we can guarantee command won't complete */
5120 if (lockup_detected(h)) {
Dan Carpenter2dc127b2015-06-04 17:47:56 +03005121 snprintf(msg, sizeof(msg),
5122 "cmd %d RESET FAILED, lockup detected",
5123 hpsa_get_cmd_index(scsicmd));
Webb Scales73153fe2015-04-23 09:35:04 -05005124 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
Webb Scales25163bd2015-04-23 09:32:00 -05005125 return FAILED;
5126 }
5127
5128 /* this reset request might be the result of a lockup; check */
5129 if (detect_controller_lockup(h)) {
Dan Carpenter2dc127b2015-06-04 17:47:56 +03005130 snprintf(msg, sizeof(msg),
5131 "cmd %d RESET FAILED, new lockup detected",
5132 hpsa_get_cmd_index(scsicmd));
Webb Scales73153fe2015-04-23 09:35:04 -05005133 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
Webb Scales25163bd2015-04-23 09:32:00 -05005134 return FAILED;
5135 }
5136
Webb Scalesd604f532015-04-23 09:35:22 -05005137 /* Do not attempt on controller */
5138 if (is_hba_lunid(dev->scsi3addr))
5139 return SUCCESS;
5140
Webb Scales25163bd2015-04-23 09:32:00 -05005141 hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting");
5142
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005143 /* send a reset to the SCSI LUN which the command was sent to */
Webb Scalesd604f532015-04-23 09:35:22 -05005144 rc = hpsa_do_reset(h, dev, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
5145 DEFAULT_REPLY_QUEUE);
Dan Carpenter2dc127b2015-06-04 17:47:56 +03005146 snprintf(msg, sizeof(msg), "reset %s",
5147 rc == 0 ? "completed successfully" : "failed");
Webb Scalesd604f532015-04-23 09:35:22 -05005148 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5149 return rc == 0 ? SUCCESS : FAILED;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005150}
5151
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05005152static void swizzle_abort_tag(u8 *tag)
5153{
5154 u8 original_tag[8];
5155
5156 memcpy(original_tag, tag, 8);
5157 tag[0] = original_tag[3];
5158 tag[1] = original_tag[2];
5159 tag[2] = original_tag[1];
5160 tag[3] = original_tag[0];
5161 tag[4] = original_tag[7];
5162 tag[5] = original_tag[6];
5163 tag[6] = original_tag[5];
5164 tag[7] = original_tag[4];
5165}
5166
Scott Teel17eb87d2014-02-18 13:55:28 -06005167static void hpsa_get_tag(struct ctlr_info *h,
Don Brace2b08b3e2015-01-23 16:41:09 -06005168 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
Scott Teel17eb87d2014-02-18 13:55:28 -06005169{
Don Brace2b08b3e2015-01-23 16:41:09 -06005170 u64 tag;
Scott Teel17eb87d2014-02-18 13:55:28 -06005171 if (c->cmd_type == CMD_IOACCEL1) {
5172 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5173 &h->ioaccel_cmd_pool[c->cmdindex];
Don Brace2b08b3e2015-01-23 16:41:09 -06005174 tag = le64_to_cpu(cm1->tag);
5175 *tagupper = cpu_to_le32(tag >> 32);
5176 *taglower = cpu_to_le32(tag);
Scott Teel54b6e9e2014-02-18 13:56:45 -06005177 return;
Scott Teel17eb87d2014-02-18 13:55:28 -06005178 }
Scott Teel54b6e9e2014-02-18 13:56:45 -06005179 if (c->cmd_type == CMD_IOACCEL2) {
5180 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5181 &h->ioaccel2_cmd_pool[c->cmdindex];
Scott Teeldd0e19f2014-02-18 13:57:31 -06005182 /* upper tag not used in ioaccel2 mode */
5183 memset(tagupper, 0, sizeof(*tagupper));
5184 *taglower = cm2->Tag;
Scott Teel54b6e9e2014-02-18 13:56:45 -06005185 return;
5186 }
Don Brace2b08b3e2015-01-23 16:41:09 -06005187 tag = le64_to_cpu(c->Header.tag);
5188 *tagupper = cpu_to_le32(tag >> 32);
5189 *taglower = cpu_to_le32(tag);
Scott Teel17eb87d2014-02-18 13:55:28 -06005190}
5191
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005192static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005193 struct CommandList *abort, int reply_queue)
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005194{
5195 int rc = IO_OK;
5196 struct CommandList *c;
5197 struct ErrorInfo *ei;
Don Brace2b08b3e2015-01-23 16:41:09 -06005198 __le32 tagupper, taglower;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005199
Stephen Cameron45fcb862015-01-23 16:43:04 -06005200 c = cmd_alloc(h);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005201
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005202 /* fill_cmd can't fail here, no buffer to map */
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005203 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005204 0, 0, scsi3addr, TYPE_MSG);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005205 if (h->needs_abort_tags_swizzled)
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05005206 swizzle_abort_tag(&c->Request.CDB[4]);
Webb Scales25163bd2015-04-23 09:32:00 -05005207 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
Scott Teel17eb87d2014-02-18 13:55:28 -06005208 hpsa_get_tag(h, abort, &taglower, &tagupper);
Webb Scales25163bd2015-04-23 09:32:00 -05005209 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
Scott Teel17eb87d2014-02-18 13:55:28 -06005210 __func__, tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005211 /* no unmap needed here because no data xfer. */
5212
5213 ei = c->err_info;
5214 switch (ei->CommandStatus) {
5215 case CMD_SUCCESS:
5216 break;
Stephen Cameron9437ac42015-04-23 09:32:16 -05005217 case CMD_TMF_STATUS:
5218 rc = hpsa_evaluate_tmf_status(h, c);
5219 break;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005220 case CMD_UNABORTABLE: /* Very common, don't make noise. */
5221 rc = -1;
5222 break;
5223 default:
5224 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
Scott Teel17eb87d2014-02-18 13:55:28 -06005225 __func__, tagupper, taglower);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06005226 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005227 rc = -1;
5228 break;
5229 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06005230 cmd_free(h, c);
Scott Teeldd0e19f2014-02-18 13:57:31 -06005231 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5232 __func__, tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005233 return rc;
5234}
5235
Stephen Cameron8be986c2015-04-23 09:34:06 -05005236static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5237 struct CommandList *command_to_abort, int reply_queue)
5238{
5239 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5240 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5241 struct io_accel2_cmd *c2a =
5242 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
Webb Scalesa58e7e52015-04-23 09:34:16 -05005243 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
Stephen Cameron8be986c2015-04-23 09:34:06 -05005244 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5245
5246 /*
5247 * We're overlaying struct hpsa_tmf_struct on top of something which
5248 * was allocated as a struct io_accel2_cmd, so we better be sure it
5249 * actually fits, and doesn't overrun the error info space.
5250 */
5251 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5252 sizeof(struct io_accel2_cmd));
5253 BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5254 offsetof(struct hpsa_tmf_struct, error_len) +
5255 sizeof(ac->error_len));
5256
5257 c->cmd_type = IOACCEL2_TMF;
Webb Scalesa58e7e52015-04-23 09:34:16 -05005258 c->scsi_cmd = SCSI_CMD_BUSY;
5259
Stephen Cameron8be986c2015-04-23 09:34:06 -05005260 /* Adjust the DMA address to point to the accelerated command buffer */
5261 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5262 (c->cmdindex * sizeof(struct io_accel2_cmd));
5263 BUG_ON(c->busaddr & 0x0000007F);
5264
5265 memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5266 ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5267 ac->reply_queue = reply_queue;
5268 ac->tmf = IOACCEL2_TMF_ABORT;
5269 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5270 memset(ac->lun_id, 0, sizeof(ac->lun_id));
5271 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5272 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5273 ac->error_ptr = cpu_to_le64(c->busaddr +
5274 offsetof(struct io_accel2_cmd, error_data));
5275 ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5276}
5277
Scott Teel54b6e9e2014-02-18 13:56:45 -06005278/* ioaccel2 path firmware cannot handle abort task requests.
5279 * Change abort requests to physical target reset, and send to the
5280 * address of the physical disk used for the ioaccel 2 command.
5281 * Return 0 on success (IO_OK)
5282 * -1 on failure
5283 */
5284
5285static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -05005286 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
Scott Teel54b6e9e2014-02-18 13:56:45 -06005287{
5288 int rc = IO_OK;
5289 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
5290 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
5291 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
5292 unsigned char *psa = &phys_scsi3addr[0];
5293
5294 /* Get a pointer to the hpsa logical device. */
Stephen Cameron7fa30302015-01-23 16:44:30 -06005295 scmd = abort->scsi_cmd;
Scott Teel54b6e9e2014-02-18 13:56:45 -06005296 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
5297 if (dev == NULL) {
5298 dev_warn(&h->pdev->dev,
5299 "Cannot abort: no device pointer for command.\n");
5300 return -1; /* not abortable */
5301 }
5302
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06005303 if (h->raid_offload_debug > 0)
5304 dev_info(&h->pdev->dev,
Webb Scales0d96ef52015-04-23 09:31:55 -05005305 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06005306 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
Webb Scales0d96ef52015-04-23 09:31:55 -05005307 "Reset as abort",
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06005308 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
5309 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
5310
Scott Teel54b6e9e2014-02-18 13:56:45 -06005311 if (!dev->offload_enabled) {
5312 dev_warn(&h->pdev->dev,
5313 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5314 return -1; /* not abortable */
5315 }
5316
5317 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
5318 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
5319 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
5320 return -1; /* not abortable */
5321 }
5322
5323 /* send the reset */
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06005324 if (h->raid_offload_debug > 0)
5325 dev_info(&h->pdev->dev,
5326 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5327 psa[0], psa[1], psa[2], psa[3],
5328 psa[4], psa[5], psa[6], psa[7]);
Webb Scalesd604f532015-04-23 09:35:22 -05005329 rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
Scott Teel54b6e9e2014-02-18 13:56:45 -06005330 if (rc != 0) {
5331 dev_warn(&h->pdev->dev,
5332 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5333 psa[0], psa[1], psa[2], psa[3],
5334 psa[4], psa[5], psa[6], psa[7]);
5335 return rc; /* failed to reset */
5336 }
5337
5338 /* wait for device to recover */
Webb Scalesb69324f2015-04-23 09:34:22 -05005339 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
Scott Teel54b6e9e2014-02-18 13:56:45 -06005340 dev_warn(&h->pdev->dev,
5341 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5342 psa[0], psa[1], psa[2], psa[3],
5343 psa[4], psa[5], psa[6], psa[7]);
5344 return -1; /* failed to recover */
5345 }
5346
5347 /* device recovered */
5348 dev_info(&h->pdev->dev,
5349 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5350 psa[0], psa[1], psa[2], psa[3],
5351 psa[4], psa[5], psa[6], psa[7]);
5352
5353 return rc; /* success */
5354}
5355
Stephen Cameron8be986c2015-04-23 09:34:06 -05005356static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5357 struct CommandList *abort, int reply_queue)
5358{
5359 int rc = IO_OK;
5360 struct CommandList *c;
5361 __le32 taglower, tagupper;
5362 struct hpsa_scsi_dev_t *dev;
5363 struct io_accel2_cmd *c2;
5364
5365 dev = abort->scsi_cmd->device->hostdata;
5366 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5367 return -1;
5368
5369 c = cmd_alloc(h);
5370 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5371 c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5372 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5373 hpsa_get_tag(h, abort, &taglower, &tagupper);
5374 dev_dbg(&h->pdev->dev,
5375 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5376 __func__, tagupper, taglower);
5377 /* no unmap needed here because no data xfer. */
5378
5379 dev_dbg(&h->pdev->dev,
5380 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5381 __func__, tagupper, taglower, c2->error_data.serv_response);
5382 switch (c2->error_data.serv_response) {
5383 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5384 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5385 rc = 0;
5386 break;
5387 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5388 case IOACCEL2_SERV_RESPONSE_FAILURE:
5389 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5390 rc = -1;
5391 break;
5392 default:
5393 dev_warn(&h->pdev->dev,
5394 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5395 __func__, tagupper, taglower,
5396 c2->error_data.serv_response);
5397 rc = -1;
5398 }
5399 cmd_free(h, c);
5400 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5401 tagupper, taglower);
5402 return rc;
5403}
5404
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05005405static int hpsa_send_abort_both_ways(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -05005406 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05005407{
Stephen Cameron8be986c2015-04-23 09:34:06 -05005408 /*
5409 * ioccelerator mode 2 commands should be aborted via the
Scott Teel54b6e9e2014-02-18 13:56:45 -06005410 * accelerated path, since RAID path is unaware of these commands,
Stephen Cameron8be986c2015-04-23 09:34:06 -05005411 * but not all underlying firmware can handle abort TMF.
5412 * Change abort to physical device reset when abort TMF is unsupported.
Scott Teel54b6e9e2014-02-18 13:56:45 -06005413 */
Stephen Cameron8be986c2015-04-23 09:34:06 -05005414 if (abort->cmd_type == CMD_IOACCEL2) {
5415 if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
5416 return hpsa_send_abort_ioaccel2(h, abort,
5417 reply_queue);
5418 else
5419 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
Webb Scales25163bd2015-04-23 09:32:00 -05005420 abort, reply_queue);
Stephen Cameron8be986c2015-04-23 09:34:06 -05005421 }
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005422 return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
Webb Scales25163bd2015-04-23 09:32:00 -05005423}
5424
5425/* Find out which reply queue a command was meant to return on */
5426static int hpsa_extract_reply_queue(struct ctlr_info *h,
5427 struct CommandList *c)
5428{
5429 if (c->cmd_type == CMD_IOACCEL2)
5430 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5431 return c->Header.ReplyQueue;
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05005432}
5433
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005434/*
5435 * Limit concurrency of abort commands to prevent
5436 * over-subscription of commands
5437 */
5438static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5439{
5440#define ABORT_CMD_WAIT_MSECS 5000
5441 return !wait_event_timeout(h->abort_cmd_wait_queue,
5442 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5443 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5444}
5445
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005446/* Send an abort for the specified command.
5447 * If the device and controller support it,
5448 * send a task abort request.
5449 */
5450static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5451{
5452
Webb Scalesa58e7e52015-04-23 09:34:16 -05005453 int rc;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005454 struct ctlr_info *h;
5455 struct hpsa_scsi_dev_t *dev;
5456 struct CommandList *abort; /* pointer to command to be aborted */
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005457 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
5458 char msg[256]; /* For debug messaging. */
5459 int ml = 0;
Don Brace2b08b3e2015-01-23 16:41:09 -06005460 __le32 tagupper, taglower;
Webb Scales25163bd2015-04-23 09:32:00 -05005461 int refcount, reply_queue;
5462
5463 if (sc == NULL)
5464 return FAILED;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005465
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005466 if (sc->device == NULL)
5467 return FAILED;
5468
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005469 /* Find the controller of the command to be aborted */
5470 h = sdev_to_hba(sc->device);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005471 if (h == NULL)
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005472 return FAILED;
5473
Webb Scales25163bd2015-04-23 09:32:00 -05005474 /* Find the device of the command to be aborted */
5475 dev = sc->device->hostdata;
5476 if (!dev) {
5477 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
5478 msg);
Don Bracee3458932015-01-23 16:44:24 -06005479 return FAILED;
Webb Scales25163bd2015-04-23 09:32:00 -05005480 }
5481
5482 /* If controller locked up, we can guarantee command won't complete */
5483 if (lockup_detected(h)) {
5484 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5485 "ABORT FAILED, lockup detected");
5486 return FAILED;
5487 }
5488
5489 /* This is a good time to check if controller lockup has occurred */
5490 if (detect_controller_lockup(h)) {
5491 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5492 "ABORT FAILED, new lockup detected");
5493 return FAILED;
5494 }
Don Bracee3458932015-01-23 16:44:24 -06005495
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005496 /* Check that controller supports some kind of task abort */
5497 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5498 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5499 return FAILED;
5500
5501 memset(msg, 0, sizeof(msg));
Robert Elliott4b761552015-04-23 09:33:54 -05005502 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005503 h->scsi_host->host_no, sc->device->channel,
Webb Scales0d96ef52015-04-23 09:31:55 -05005504 sc->device->id, sc->device->lun,
Robert Elliott4b761552015-04-23 09:33:54 -05005505 "Aborting command", sc);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005506
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005507 /* Get SCSI command to be aborted */
5508 abort = (struct CommandList *) sc->host_scribble;
5509 if (abort == NULL) {
Webb Scales281a7fd2015-01-23 16:43:35 -06005510 /* This can happen if the command already completed. */
5511 return SUCCESS;
5512 }
5513 refcount = atomic_inc_return(&abort->refcount);
5514 if (refcount == 1) { /* Command is done already. */
5515 cmd_free(h, abort);
5516 return SUCCESS;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005517 }
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005518
5519 /* Don't bother trying the abort if we know it won't work. */
5520 if (abort->cmd_type != CMD_IOACCEL2 &&
5521 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5522 cmd_free(h, abort);
5523 return FAILED;
5524 }
5525
Webb Scalesa58e7e52015-04-23 09:34:16 -05005526 /*
5527 * Check that we're aborting the right command.
5528 * It's possible the CommandList already completed and got re-used.
5529 */
5530 if (abort->scsi_cmd != sc) {
5531 cmd_free(h, abort);
5532 return SUCCESS;
5533 }
5534
5535 abort->abort_pending = true;
Scott Teel17eb87d2014-02-18 13:55:28 -06005536 hpsa_get_tag(h, abort, &taglower, &tagupper);
Webb Scales25163bd2015-04-23 09:32:00 -05005537 reply_queue = hpsa_extract_reply_queue(h, abort);
Scott Teel17eb87d2014-02-18 13:55:28 -06005538 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
Stephen Cameron7fa30302015-01-23 16:44:30 -06005539 as = abort->scsi_cmd;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005540 if (as != NULL)
Robert Elliott4b761552015-04-23 09:33:54 -05005541 ml += sprintf(msg+ml,
5542 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5543 as->cmd_len, as->cmnd[0], as->cmnd[1],
5544 as->serial_number);
5545 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
Webb Scales0d96ef52015-04-23 09:31:55 -05005546 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
Robert Elliott4b761552015-04-23 09:33:54 -05005547
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005548 /*
5549 * Command is in flight, or possibly already completed
5550 * by the firmware (but not to the scsi mid layer) but we can't
5551 * distinguish which. Send the abort down.
5552 */
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005553 if (wait_for_available_abort_cmd(h)) {
5554 dev_warn(&h->pdev->dev,
Robert Elliott4b761552015-04-23 09:33:54 -05005555 "%s FAILED, timeout waiting for an abort command to become available.\n",
5556 msg);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005557 cmd_free(h, abort);
5558 return FAILED;
5559 }
Webb Scales25163bd2015-04-23 09:32:00 -05005560 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005561 atomic_inc(&h->abort_cmds_available);
5562 wake_up_all(&h->abort_cmd_wait_queue);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005563 if (rc != 0) {
Robert Elliott4b761552015-04-23 09:33:54 -05005564 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
Webb Scales0d96ef52015-04-23 09:31:55 -05005565 hpsa_show_dev_msg(KERN_WARNING, h, dev,
Robert Elliott4b761552015-04-23 09:33:54 -05005566 "FAILED to abort command");
Webb Scales281a7fd2015-01-23 16:43:35 -06005567 cmd_free(h, abort);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005568 return FAILED;
5569 }
Robert Elliott4b761552015-04-23 09:33:54 -05005570 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
Webb Scalesd604f532015-04-23 09:35:22 -05005571 wait_event(h->event_sync_wait_queue,
Webb Scalesa58e7e52015-04-23 09:34:16 -05005572 abort->scsi_cmd != sc || lockup_detected(h));
Webb Scales281a7fd2015-01-23 16:43:35 -06005573 cmd_free(h, abort);
Webb Scalesa58e7e52015-04-23 09:34:16 -05005574 return !lockup_detected(h) ? SUCCESS : FAILED;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005575}
5576
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005577/*
Webb Scales73153fe2015-04-23 09:35:04 -05005578 * For operations with an associated SCSI command, a command block is allocated
5579 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5580 * block request tag as an index into a table of entries. cmd_tagged_free() is
5581 * the complement, although cmd_free() may be called instead.
5582 */
5583static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
5584 struct scsi_cmnd *scmd)
5585{
5586 int idx = hpsa_get_cmd_index(scmd);
5587 struct CommandList *c = h->cmd_pool + idx;
5588
5589 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
5590 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
5591 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
5592 /* The index value comes from the block layer, so if it's out of
5593 * bounds, it's probably not our bug.
5594 */
5595 BUG();
5596 }
5597
5598 atomic_inc(&c->refcount);
5599 if (unlikely(!hpsa_is_cmd_idle(c))) {
5600 /*
5601 * We expect that the SCSI layer will hand us a unique tag
5602 * value. Thus, there should never be a collision here between
5603 * two requests...because if the selected command isn't idle
5604 * then someone is going to be very disappointed.
5605 */
5606 dev_err(&h->pdev->dev,
5607 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5608 idx);
5609 if (c->scsi_cmd != NULL)
5610 scsi_print_command(c->scsi_cmd);
5611 scsi_print_command(scmd);
5612 }
5613
5614 hpsa_cmd_partial_init(h, idx, c);
5615 return c;
5616}
5617
5618static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
5619{
5620 /*
5621 * Release our reference to the block. We don't need to do anything
5622 * else to free it, because it is accessed by index. (There's no point
5623 * in checking the result of the decrement, since we cannot guarantee
5624 * that there isn't a concurrent abort which is also accessing it.)
5625 */
5626 (void)atomic_dec(&c->refcount);
5627}
5628
5629/*
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005630 * For operations that cannot sleep, a command block is allocated at init,
5631 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5632 * which ones are free or in use. Lock must be held when calling this.
5633 * cmd_free() is the complement.
Robert Elliottbf43caf2015-04-23 09:33:38 -05005634 * This function never gives up and returns NULL. If it hangs,
5635 * another thread must call cmd_free() to free some tags.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005636 */
Webb Scales281a7fd2015-01-23 16:43:35 -06005637
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005638static struct CommandList *cmd_alloc(struct ctlr_info *h)
5639{
5640 struct CommandList *c;
Stephen Cameron360c73b2015-04-23 09:32:32 -05005641 int refcount, i;
Webb Scales73153fe2015-04-23 09:35:04 -05005642 int offset = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005643
Robert Elliott33811022015-01-23 16:43:41 -06005644 /*
5645 * There is some *extremely* small but non-zero chance that that
Stephen M. Cameron4c413122014-11-14 17:27:29 -06005646 * multiple threads could get in here, and one thread could
5647 * be scanning through the list of bits looking for a free
5648 * one, but the free ones are always behind him, and other
5649 * threads sneak in behind him and eat them before he can
5650 * get to them, so that while there is always a free one, a
5651 * very unlucky thread might be starved anyway, never able to
5652 * beat the other threads. In reality, this happens so
5653 * infrequently as to be indistinguishable from never.
Webb Scales73153fe2015-04-23 09:35:04 -05005654 *
5655 * Note that we start allocating commands before the SCSI host structure
5656 * is initialized. Since the search starts at bit zero, this
5657 * all works, since we have at least one command structure available;
5658 * however, it means that the structures with the low indexes have to be
5659 * reserved for driver-initiated requests, while requests from the block
5660 * layer will use the higher indexes.
Stephen M. Cameron4c413122014-11-14 17:27:29 -06005661 */
5662
Webb Scales281a7fd2015-01-23 16:43:35 -06005663 for (;;) {
Webb Scales73153fe2015-04-23 09:35:04 -05005664 i = find_next_zero_bit(h->cmd_pool_bits,
5665 HPSA_NRESERVED_CMDS,
5666 offset);
5667 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
Webb Scales281a7fd2015-01-23 16:43:35 -06005668 offset = 0;
5669 continue;
5670 }
5671 c = h->cmd_pool + i;
5672 refcount = atomic_inc_return(&c->refcount);
5673 if (unlikely(refcount > 1)) {
5674 cmd_free(h, c); /* already in use */
Webb Scales73153fe2015-04-23 09:35:04 -05005675 offset = (i + 1) % HPSA_NRESERVED_CMDS;
Webb Scales281a7fd2015-01-23 16:43:35 -06005676 continue;
5677 }
5678 set_bit(i & (BITS_PER_LONG - 1),
5679 h->cmd_pool_bits + (i / BITS_PER_LONG));
5680 break; /* it's ours now. */
5681 }
Stephen Cameron360c73b2015-04-23 09:32:32 -05005682 hpsa_cmd_partial_init(h, i, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005683 return c;
5684}
5685
Webb Scales73153fe2015-04-23 09:35:04 -05005686/*
5687 * This is the complementary operation to cmd_alloc(). Note, however, in some
5688 * corner cases it may also be used to free blocks allocated by
5689 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
5690 * the clear-bit is harmless.
5691 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005692static void cmd_free(struct ctlr_info *h, struct CommandList *c)
5693{
Webb Scales281a7fd2015-01-23 16:43:35 -06005694 if (atomic_dec_and_test(&c->refcount)) {
5695 int i;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005696
Webb Scales281a7fd2015-01-23 16:43:35 -06005697 i = c - h->cmd_pool;
5698 clear_bit(i & (BITS_PER_LONG - 1),
5699 h->cmd_pool_bits + (i / BITS_PER_LONG));
5700 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005701}
5702
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005703#ifdef CONFIG_COMPAT
5704
Don Brace42a91642014-11-14 17:26:27 -06005705static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
5706 void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005707{
5708 IOCTL32_Command_struct __user *arg32 =
5709 (IOCTL32_Command_struct __user *) arg;
5710 IOCTL_Command_struct arg64;
5711 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
5712 int err;
5713 u32 cp;
5714
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06005715 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005716 err = 0;
5717 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5718 sizeof(arg64.LUN_info));
5719 err |= copy_from_user(&arg64.Request, &arg32->Request,
5720 sizeof(arg64.Request));
5721 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5722 sizeof(arg64.error_info));
5723 err |= get_user(arg64.buf_size, &arg32->buf_size);
5724 err |= get_user(cp, &arg32->buf);
5725 arg64.buf = compat_ptr(cp);
5726 err |= copy_to_user(p, &arg64, sizeof(arg64));
5727
5728 if (err)
5729 return -EFAULT;
5730
Don Brace42a91642014-11-14 17:26:27 -06005731 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005732 if (err)
5733 return err;
5734 err |= copy_in_user(&arg32->error_info, &p->error_info,
5735 sizeof(arg32->error_info));
5736 if (err)
5737 return -EFAULT;
5738 return err;
5739}
5740
5741static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
Don Brace42a91642014-11-14 17:26:27 -06005742 int cmd, void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005743{
5744 BIG_IOCTL32_Command_struct __user *arg32 =
5745 (BIG_IOCTL32_Command_struct __user *) arg;
5746 BIG_IOCTL_Command_struct arg64;
5747 BIG_IOCTL_Command_struct __user *p =
5748 compat_alloc_user_space(sizeof(arg64));
5749 int err;
5750 u32 cp;
5751
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06005752 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005753 err = 0;
5754 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5755 sizeof(arg64.LUN_info));
5756 err |= copy_from_user(&arg64.Request, &arg32->Request,
5757 sizeof(arg64.Request));
5758 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5759 sizeof(arg64.error_info));
5760 err |= get_user(arg64.buf_size, &arg32->buf_size);
5761 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
5762 err |= get_user(cp, &arg32->buf);
5763 arg64.buf = compat_ptr(cp);
5764 err |= copy_to_user(p, &arg64, sizeof(arg64));
5765
5766 if (err)
5767 return -EFAULT;
5768
Don Brace42a91642014-11-14 17:26:27 -06005769 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005770 if (err)
5771 return err;
5772 err |= copy_in_user(&arg32->error_info, &p->error_info,
5773 sizeof(arg32->error_info));
5774 if (err)
5775 return -EFAULT;
5776 return err;
5777}
Stephen M. Cameron71fe75a2010-02-04 08:43:51 -06005778
Don Brace42a91642014-11-14 17:26:27 -06005779static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
Stephen M. Cameron71fe75a2010-02-04 08:43:51 -06005780{
5781 switch (cmd) {
5782 case CCISS_GETPCIINFO:
5783 case CCISS_GETINTINFO:
5784 case CCISS_SETINTINFO:
5785 case CCISS_GETNODENAME:
5786 case CCISS_SETNODENAME:
5787 case CCISS_GETHEARTBEAT:
5788 case CCISS_GETBUSTYPES:
5789 case CCISS_GETFIRMVER:
5790 case CCISS_GETDRIVVER:
5791 case CCISS_REVALIDVOLS:
5792 case CCISS_DEREGDISK:
5793 case CCISS_REGNEWDISK:
5794 case CCISS_REGNEWD:
5795 case CCISS_RESCANDISK:
5796 case CCISS_GETLUNINFO:
5797 return hpsa_ioctl(dev, cmd, arg);
5798
5799 case CCISS_PASSTHRU32:
5800 return hpsa_ioctl32_passthru(dev, cmd, arg);
5801 case CCISS_BIG_PASSTHRU32:
5802 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
5803
5804 default:
5805 return -ENOIOCTLCMD;
5806 }
5807}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005808#endif
5809
5810static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
5811{
5812 struct hpsa_pci_info pciinfo;
5813
5814 if (!argp)
5815 return -EINVAL;
5816 pciinfo.domain = pci_domain_nr(h->pdev->bus);
5817 pciinfo.bus = h->pdev->bus->number;
5818 pciinfo.dev_fn = h->pdev->devfn;
5819 pciinfo.board_id = h->board_id;
5820 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
5821 return -EFAULT;
5822 return 0;
5823}
5824
5825static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
5826{
5827 DriverVer_type DriverVer;
5828 unsigned char vmaj, vmin, vsubmin;
5829 int rc;
5830
5831 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
5832 &vmaj, &vmin, &vsubmin);
5833 if (rc != 3) {
5834 dev_info(&h->pdev->dev, "driver version string '%s' "
5835 "unrecognized.", HPSA_DRIVER_VERSION);
5836 vmaj = 0;
5837 vmin = 0;
5838 vsubmin = 0;
5839 }
5840 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
5841 if (!argp)
5842 return -EINVAL;
5843 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
5844 return -EFAULT;
5845 return 0;
5846}
5847
5848static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5849{
5850 IOCTL_Command_struct iocommand;
5851 struct CommandList *c;
5852 char *buff = NULL;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005853 u64 temp64;
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005854 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005855
5856 if (!argp)
5857 return -EINVAL;
5858 if (!capable(CAP_SYS_RAWIO))
5859 return -EPERM;
5860 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
5861 return -EFAULT;
5862 if ((iocommand.buf_size < 1) &&
5863 (iocommand.Request.Type.Direction != XFER_NONE)) {
5864 return -EINVAL;
5865 }
5866 if (iocommand.buf_size > 0) {
5867 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
5868 if (buff == NULL)
Robert Elliott2dd02d72015-04-23 09:33:43 -05005869 return -ENOMEM;
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05005870 if (iocommand.Request.Type.Direction & XFER_WRITE) {
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005871 /* Copy the data into the buffer we created */
5872 if (copy_from_user(buff, iocommand.buf,
5873 iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005874 rc = -EFAULT;
5875 goto out_kfree;
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005876 }
5877 } else {
5878 memset(buff, 0, iocommand.buf_size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005879 }
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005880 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06005881 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05005882
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005883 /* Fill in the command type */
5884 c->cmd_type = CMD_IOCTL_PEND;
Webb Scalesa58e7e52015-04-23 09:34:16 -05005885 c->scsi_cmd = SCSI_CMD_BUSY;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005886 /* Fill in Command Header */
5887 c->Header.ReplyQueue = 0; /* unused in simple mode */
5888 if (iocommand.buf_size > 0) { /* buffer to fill */
5889 c->Header.SGList = 1;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005890 c->Header.SGTotal = cpu_to_le16(1);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005891 } else { /* no buffers to fill */
5892 c->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005893 c->Header.SGTotal = cpu_to_le16(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005894 }
5895 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005896
5897 /* Fill in Request block */
5898 memcpy(&c->Request, &iocommand.Request,
5899 sizeof(c->Request));
5900
5901 /* Fill in the scatter gather information */
5902 if (iocommand.buf_size > 0) {
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005903 temp64 = pci_map_single(h->pdev, buff,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005904 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005905 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
5906 c->SG[0].Addr = cpu_to_le64(0);
5907 c->SG[0].Len = cpu_to_le32(0);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06005908 rc = -ENOMEM;
5909 goto out;
5910 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005911 c->SG[0].Addr = cpu_to_le64(temp64);
5912 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
5913 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005914 }
Webb Scales25163bd2015-04-23 09:32:00 -05005915 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
Stephen M. Cameronc2dd32e2011-06-03 09:57:29 -05005916 if (iocommand.buf_size > 0)
5917 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005918 check_ioctl_unit_attention(h, c);
Webb Scales25163bd2015-04-23 09:32:00 -05005919 if (rc) {
5920 rc = -EIO;
5921 goto out;
5922 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005923
5924 /* Copy the error information out */
5925 memcpy(&iocommand.error_info, c->err_info,
5926 sizeof(iocommand.error_info));
5927 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005928 rc = -EFAULT;
5929 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005930 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05005931 if ((iocommand.Request.Type.Direction & XFER_READ) &&
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005932 iocommand.buf_size > 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005933 /* Copy the data out of the buffer we created */
5934 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005935 rc = -EFAULT;
5936 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005937 }
5938 }
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005939out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06005940 cmd_free(h, c);
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005941out_kfree:
5942 kfree(buff);
5943 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005944}
5945
5946static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5947{
5948 BIG_IOCTL_Command_struct *ioc;
5949 struct CommandList *c;
5950 unsigned char **buff = NULL;
5951 int *buff_size = NULL;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005952 u64 temp64;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005953 BYTE sg_used = 0;
5954 int status = 0;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06005955 u32 left;
5956 u32 sz;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005957 BYTE __user *data_ptr;
5958
5959 if (!argp)
5960 return -EINVAL;
5961 if (!capable(CAP_SYS_RAWIO))
5962 return -EPERM;
5963 ioc = (BIG_IOCTL_Command_struct *)
5964 kmalloc(sizeof(*ioc), GFP_KERNEL);
5965 if (!ioc) {
5966 status = -ENOMEM;
5967 goto cleanup1;
5968 }
5969 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5970 status = -EFAULT;
5971 goto cleanup1;
5972 }
5973 if ((ioc->buf_size < 1) &&
5974 (ioc->Request.Type.Direction != XFER_NONE)) {
5975 status = -EINVAL;
5976 goto cleanup1;
5977 }
5978 /* Check kmalloc limits using all SGs */
5979 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5980 status = -EINVAL;
5981 goto cleanup1;
5982 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06005983 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005984 status = -EINVAL;
5985 goto cleanup1;
5986 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06005987 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005988 if (!buff) {
5989 status = -ENOMEM;
5990 goto cleanup1;
5991 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06005992 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005993 if (!buff_size) {
5994 status = -ENOMEM;
5995 goto cleanup1;
5996 }
5997 left = ioc->buf_size;
5998 data_ptr = ioc->buf;
5999 while (left) {
6000 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6001 buff_size[sg_used] = sz;
6002 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6003 if (buff[sg_used] == NULL) {
6004 status = -ENOMEM;
6005 goto cleanup1;
6006 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05006007 if (ioc->Request.Type.Direction & XFER_WRITE) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006008 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
Stephen M. Cameron0758f4f2014-07-03 10:18:03 -05006009 status = -EFAULT;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006010 goto cleanup1;
6011 }
6012 } else
6013 memset(buff[sg_used], 0, sz);
6014 left -= sz;
6015 data_ptr += sz;
6016 sg_used++;
6017 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06006018 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05006019
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006020 c->cmd_type = CMD_IOCTL_PEND;
Webb Scalesa58e7e52015-04-23 09:34:16 -05006021 c->scsi_cmd = SCSI_CMD_BUSY;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006022 c->Header.ReplyQueue = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006023 c->Header.SGList = (u8) sg_used;
6024 c->Header.SGTotal = cpu_to_le16(sg_used);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006025 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006026 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6027 if (ioc->buf_size > 0) {
6028 int i;
6029 for (i = 0; i < sg_used; i++) {
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006030 temp64 = pci_map_single(h->pdev, buff[i],
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006031 buff_size[i], PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006032 if (dma_mapping_error(&h->pdev->dev,
6033 (dma_addr_t) temp64)) {
6034 c->SG[i].Addr = cpu_to_le64(0);
6035 c->SG[i].Len = cpu_to_le32(0);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06006036 hpsa_pci_unmap(h->pdev, c, i,
6037 PCI_DMA_BIDIRECTIONAL);
6038 status = -ENOMEM;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05006039 goto cleanup0;
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06006040 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006041 c->SG[i].Addr = cpu_to_le64(temp64);
6042 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6043 c->SG[i].Ext = cpu_to_le32(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006044 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006045 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006046 }
Webb Scales25163bd2015-04-23 09:32:00 -05006047 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06006048 if (sg_used)
6049 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006050 check_ioctl_unit_attention(h, c);
Webb Scales25163bd2015-04-23 09:32:00 -05006051 if (status) {
6052 status = -EIO;
6053 goto cleanup0;
6054 }
6055
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006056 /* Copy the error information out */
6057 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6058 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006059 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05006060 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006061 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05006062 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
Don Brace2b08b3e2015-01-23 16:41:09 -06006063 int i;
6064
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006065 /* Copy the data out of the buffer we created */
6066 BYTE __user *ptr = ioc->buf;
6067 for (i = 0; i < sg_used; i++) {
6068 if (copy_to_user(ptr, buff[i], buff_size[i])) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006069 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05006070 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006071 }
6072 ptr += buff_size[i];
6073 }
6074 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006075 status = 0;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05006076cleanup0:
Stephen Cameron45fcb862015-01-23 16:43:04 -06006077 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006078cleanup1:
6079 if (buff) {
Don Brace2b08b3e2015-01-23 16:41:09 -06006080 int i;
6081
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006082 for (i = 0; i < sg_used; i++)
6083 kfree(buff[i]);
6084 kfree(buff);
6085 }
6086 kfree(buff_size);
6087 kfree(ioc);
6088 return status;
6089}
6090
6091static void check_ioctl_unit_attention(struct ctlr_info *h,
6092 struct CommandList *c)
6093{
6094 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6095 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6096 (void) check_for_unit_attention(h, c);
6097}
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006098
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006099/*
6100 * ioctl
6101 */
Don Brace42a91642014-11-14 17:26:27 -06006102static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006103{
6104 struct ctlr_info *h;
6105 void __user *argp = (void __user *)arg;
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006106 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006107
6108 h = sdev_to_hba(dev);
6109
6110 switch (cmd) {
6111 case CCISS_DEREGDISK:
6112 case CCISS_REGNEWDISK:
6113 case CCISS_REGNEWD:
Stephen M. Camerona08a8472010-02-04 08:43:16 -06006114 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006115 return 0;
6116 case CCISS_GETPCIINFO:
6117 return hpsa_getpciinfo_ioctl(h, argp);
6118 case CCISS_GETDRIVVER:
6119 return hpsa_getdrivver_ioctl(h, argp);
6120 case CCISS_PASSTHRU:
Don Brace34f0c622015-01-23 16:43:46 -06006121 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006122 return -EAGAIN;
6123 rc = hpsa_passthru_ioctl(h, argp);
Don Brace34f0c622015-01-23 16:43:46 -06006124 atomic_inc(&h->passthru_cmds_avail);
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006125 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006126 case CCISS_BIG_PASSTHRU:
Don Brace34f0c622015-01-23 16:43:46 -06006127 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006128 return -EAGAIN;
6129 rc = hpsa_big_passthru_ioctl(h, argp);
Don Brace34f0c622015-01-23 16:43:46 -06006130 atomic_inc(&h->passthru_cmds_avail);
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006131 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006132 default:
6133 return -ENOTTY;
6134 }
6135}
6136
Robert Elliottbf43caf2015-04-23 09:33:38 -05006137static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006138 u8 reset_type)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006139{
6140 struct CommandList *c;
6141
6142 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05006143
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006144 /* fill_cmd can't fail here, no data buffer to map */
6145 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006146 RAID_CTLR_LUNID, TYPE_MSG);
6147 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6148 c->waiting = NULL;
6149 enqueue_cmd_and_start_io(h, c);
6150 /* Don't wait for completion, the reset won't complete. Don't free
6151 * the command either. This is the last command we will send before
6152 * re-initializing everything, so it doesn't matter and won't leak.
6153 */
Robert Elliottbf43caf2015-04-23 09:33:38 -05006154 return;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006155}
6156
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006157static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06006158 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006159 int cmd_type)
6160{
6161 int pci_dir = XFER_NONE;
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05006162 u64 tag; /* for commands to be aborted */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006163
6164 c->cmd_type = CMD_IOCTL_PEND;
Webb Scalesa58e7e52015-04-23 09:34:16 -05006165 c->scsi_cmd = SCSI_CMD_BUSY;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006166 c->Header.ReplyQueue = 0;
6167 if (buff != NULL && size > 0) {
6168 c->Header.SGList = 1;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006169 c->Header.SGTotal = cpu_to_le16(1);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006170 } else {
6171 c->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006172 c->Header.SGTotal = cpu_to_le16(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006173 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006174 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6175
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006176 if (cmd_type == TYPE_CMD) {
6177 switch (cmd) {
6178 case HPSA_INQUIRY:
6179 /* are we trying to read a vital product page */
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06006180 if (page_code & VPD_PAGE) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006181 c->Request.CDB[1] = 0x01;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06006182 c->Request.CDB[2] = (page_code & 0xff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006183 }
6184 c->Request.CDBLen = 6;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006185 c->Request.type_attr_dir =
6186 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006187 c->Request.Timeout = 0;
6188 c->Request.CDB[0] = HPSA_INQUIRY;
6189 c->Request.CDB[4] = size & 0xFF;
6190 break;
6191 case HPSA_REPORT_LOG:
6192 case HPSA_REPORT_PHYS:
6193 /* Talking to controller so It's a physical command
6194 mode = 00 target = 0. Nothing to write.
6195 */
6196 c->Request.CDBLen = 12;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006197 c->Request.type_attr_dir =
6198 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006199 c->Request.Timeout = 0;
6200 c->Request.CDB[0] = cmd;
6201 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6202 c->Request.CDB[7] = (size >> 16) & 0xFF;
6203 c->Request.CDB[8] = (size >> 8) & 0xFF;
6204 c->Request.CDB[9] = size & 0xFF;
6205 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006206 case HPSA_CACHE_FLUSH:
6207 c->Request.CDBLen = 12;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006208 c->Request.type_attr_dir =
6209 TYPE_ATTR_DIR(cmd_type,
6210 ATTR_SIMPLE, XFER_WRITE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006211 c->Request.Timeout = 0;
6212 c->Request.CDB[0] = BMIC_WRITE;
6213 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
Stephen M. Cameronbb158ea2011-10-26 16:21:17 -05006214 c->Request.CDB[7] = (size >> 8) & 0xFF;
6215 c->Request.CDB[8] = size & 0xFF;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006216 break;
6217 case TEST_UNIT_READY:
6218 c->Request.CDBLen = 6;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006219 c->Request.type_attr_dir =
6220 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006221 c->Request.Timeout = 0;
6222 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006223 case HPSA_GET_RAID_MAP:
6224 c->Request.CDBLen = 12;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006225 c->Request.type_attr_dir =
6226 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006227 c->Request.Timeout = 0;
6228 c->Request.CDB[0] = HPSA_CISS_READ;
6229 c->Request.CDB[1] = cmd;
6230 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6231 c->Request.CDB[7] = (size >> 16) & 0xFF;
6232 c->Request.CDB[8] = (size >> 8) & 0xFF;
6233 c->Request.CDB[9] = size & 0xFF;
6234 break;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06006235 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6236 c->Request.CDBLen = 10;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006237 c->Request.type_attr_dir =
6238 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameron316b2212014-02-21 16:25:15 -06006239 c->Request.Timeout = 0;
6240 c->Request.CDB[0] = BMIC_READ;
6241 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6242 c->Request.CDB[7] = (size >> 16) & 0xFF;
6243 c->Request.CDB[8] = (size >> 8) & 0xFF;
6244 break;
Don Brace03383732015-01-23 16:43:30 -06006245 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6246 c->Request.CDBLen = 10;
6247 c->Request.type_attr_dir =
6248 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6249 c->Request.Timeout = 0;
6250 c->Request.CDB[0] = BMIC_READ;
6251 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6252 c->Request.CDB[7] = (size >> 16) & 0xFF;
6253 c->Request.CDB[8] = (size >> 8) & 0XFF;
6254 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006255 default:
6256 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6257 BUG();
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006258 return -1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006259 }
6260 } else if (cmd_type == TYPE_MSG) {
6261 switch (cmd) {
6262
6263 case HPSA_DEVICE_RESET_MSG:
6264 c->Request.CDBLen = 16;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006265 c->Request.type_attr_dir =
6266 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006267 c->Request.Timeout = 0; /* Don't time out */
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006268 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6269 c->Request.CDB[0] = cmd;
Stephen M. Cameron21e89af2012-07-26 11:34:10 -05006270 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006271 /* If bytes 4-7 are zero, it means reset the */
6272 /* LunID device */
6273 c->Request.CDB[4] = 0x00;
6274 c->Request.CDB[5] = 0x00;
6275 c->Request.CDB[6] = 0x00;
6276 c->Request.CDB[7] = 0x00;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05006277 break;
6278 case HPSA_ABORT_MSG:
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05006279 memcpy(&tag, buff, sizeof(tag));
Don Brace2b08b3e2015-01-23 16:41:09 -06006280 dev_dbg(&h->pdev->dev,
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05006281 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6282 tag, c->Header.tag);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05006283 c->Request.CDBLen = 16;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006284 c->Request.type_attr_dir =
6285 TYPE_ATTR_DIR(cmd_type,
6286 ATTR_SIMPLE, XFER_WRITE);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05006287 c->Request.Timeout = 0; /* Don't time out */
6288 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
6289 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
6290 c->Request.CDB[2] = 0x00; /* reserved */
6291 c->Request.CDB[3] = 0x00; /* reserved */
6292 /* Tag to abort goes in CDB[4]-CDB[11] */
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05006293 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
Stephen M. Cameron75167d22012-05-01 11:42:51 -05006294 c->Request.CDB[12] = 0x00; /* reserved */
6295 c->Request.CDB[13] = 0x00; /* reserved */
6296 c->Request.CDB[14] = 0x00; /* reserved */
6297 c->Request.CDB[15] = 0x00; /* reserved */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006298 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006299 default:
6300 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6301 cmd);
6302 BUG();
6303 }
6304 } else {
6305 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6306 BUG();
6307 }
6308
Stephen M. Camerona505b862014-11-14 17:27:04 -06006309 switch (GET_DIR(c->Request.type_attr_dir)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006310 case XFER_READ:
6311 pci_dir = PCI_DMA_FROMDEVICE;
6312 break;
6313 case XFER_WRITE:
6314 pci_dir = PCI_DMA_TODEVICE;
6315 break;
6316 case XFER_NONE:
6317 pci_dir = PCI_DMA_NONE;
6318 break;
6319 default:
6320 pci_dir = PCI_DMA_BIDIRECTIONAL;
6321 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006322 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6323 return -1;
6324 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006325}
6326
6327/*
6328 * Map (physical) PCI mem into (virtual) kernel space
6329 */
6330static void __iomem *remap_pci_mem(ulong base, ulong size)
6331{
6332 ulong page_base = ((ulong) base) & PAGE_MASK;
6333 ulong page_offs = ((ulong) base) - page_base;
Stephen M. Cameron088ba34c2012-07-26 11:34:23 -05006334 void __iomem *page_remapped = ioremap_nocache(page_base,
6335 page_offs + size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006336
6337 return page_remapped ? (page_remapped + page_offs) : NULL;
6338}
6339
Matt Gates254f7962012-05-01 11:43:06 -05006340static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006341{
Matt Gates254f7962012-05-01 11:43:06 -05006342 return h->access.command_completed(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006343}
6344
Stephen M. Cameron900c5442010-02-04 08:42:35 -06006345static inline bool interrupt_pending(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006346{
6347 return h->access.intr_pending(h);
6348}
6349
6350static inline long interrupt_not_for_us(struct ctlr_info *h)
6351{
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006352 return (h->access.intr_pending(h) == 0) ||
6353 (h->interrupts_enabled == 0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006354}
6355
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06006356static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6357 u32 raw_tag)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006358{
6359 if (unlikely(tag_index >= h->nr_cmds)) {
6360 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6361 return 1;
6362 }
6363 return 0;
6364}
6365
Stephen M. Cameron5a3d16f2012-05-01 11:42:46 -05006366static inline void finish_cmd(struct CommandList *c)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006367{
Stephen M. Camerone85c5972012-05-01 11:43:42 -05006368 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
Scott Teelc3497752014-02-18 13:56:34 -06006369 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6370 || c->cmd_type == CMD_IOACCEL2))
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05006371 complete_scsi_command(c);
Stephen Cameron8be986c2015-04-23 09:34:06 -05006372 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006373 complete(c->waiting);
Stephen M. Camerona104c992010-02-04 08:42:24 -06006374}
6375
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006376
6377static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
Stephen M. Camerona104c992010-02-04 08:42:24 -06006378{
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006379#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
6380#define HPSA_SIMPLE_ERROR_BITS 0x03
Stephen M. Cameron960a30e72011-02-15 15:33:03 -06006381 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006382 return tag & ~HPSA_SIMPLE_ERROR_BITS;
6383 return tag & ~HPSA_PERF_ERROR_BITS;
Stephen M. Camerona104c992010-02-04 08:42:24 -06006384}
6385
Don Brace303932f2010-02-04 08:42:40 -06006386/* process completion of an indexed ("direct lookup") command */
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05006387static inline void process_indexed_cmd(struct ctlr_info *h,
Don Brace303932f2010-02-04 08:42:40 -06006388 u32 raw_tag)
6389{
6390 u32 tag_index;
6391 struct CommandList *c;
6392
Don Bracef2405db2015-01-23 16:43:09 -06006393 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05006394 if (!bad_tag(h, tag_index, raw_tag)) {
6395 c = h->cmd_pool + tag_index;
6396 finish_cmd(c);
6397 }
Don Brace303932f2010-02-04 08:42:40 -06006398}
6399
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006400/* Some controllers, like p400, will give us one interrupt
6401 * after a soft reset, even if we turned interrupts off.
6402 * Only need to check for this in the hpsa_xxx_discard_completions
6403 * functions.
6404 */
6405static int ignore_bogus_interrupt(struct ctlr_info *h)
6406{
6407 if (likely(!reset_devices))
6408 return 0;
6409
6410 if (likely(h->interrupts_enabled))
6411 return 0;
6412
6413 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6414 "(known firmware bug.) Ignoring.\n");
6415
6416 return 1;
6417}
6418
Matt Gates254f7962012-05-01 11:43:06 -05006419/*
6420 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6421 * Relies on (h-q[x] == x) being true for x such that
6422 * 0 <= x < MAX_REPLY_QUEUES.
6423 */
6424static struct ctlr_info *queue_to_hba(u8 *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006425{
Matt Gates254f7962012-05-01 11:43:06 -05006426 return container_of((queue - *queue), struct ctlr_info, q[0]);
6427}
6428
6429static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6430{
6431 struct ctlr_info *h = queue_to_hba(queue);
6432 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006433 u32 raw_tag;
6434
6435 if (ignore_bogus_interrupt(h))
6436 return IRQ_NONE;
6437
6438 if (interrupt_not_for_us(h))
6439 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006440 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006441 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05006442 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006443 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05006444 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006445 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006446 return IRQ_HANDLED;
6447}
6448
Matt Gates254f7962012-05-01 11:43:06 -05006449static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006450{
Matt Gates254f7962012-05-01 11:43:06 -05006451 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006452 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05006453 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006454
6455 if (ignore_bogus_interrupt(h))
6456 return IRQ_NONE;
6457
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006458 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05006459 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006460 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05006461 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006462 return IRQ_HANDLED;
6463}
6464
Matt Gates254f7962012-05-01 11:43:06 -05006465static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006466{
Matt Gates254f7962012-05-01 11:43:06 -05006467 struct ctlr_info *h = queue_to_hba((u8 *) queue);
Don Brace303932f2010-02-04 08:42:40 -06006468 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05006469 u8 q = *(u8 *) queue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006470
6471 if (interrupt_not_for_us(h))
6472 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006473 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006474 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05006475 raw_tag = get_next_completion(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006476 while (raw_tag != FIFO_EMPTY) {
Don Bracef2405db2015-01-23 16:43:09 -06006477 process_indexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05006478 raw_tag = next_command(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006479 }
6480 }
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006481 return IRQ_HANDLED;
6482}
6483
Matt Gates254f7962012-05-01 11:43:06 -05006484static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006485{
Matt Gates254f7962012-05-01 11:43:06 -05006486 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006487 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05006488 u8 q = *(u8 *) queue;
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006489
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006490 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05006491 raw_tag = get_next_completion(h, q);
Don Brace303932f2010-02-04 08:42:40 -06006492 while (raw_tag != FIFO_EMPTY) {
Don Bracef2405db2015-01-23 16:43:09 -06006493 process_indexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05006494 raw_tag = next_command(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006495 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006496 return IRQ_HANDLED;
6497}
6498
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006499/* Send a message CDB to the firmware. Careful, this only works
6500 * in simple mode, not performant mode due to the tag lookup.
6501 * We only ever use this immediately after a controller reset.
6502 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006503static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6504 unsigned char type)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006505{
6506 struct Command {
6507 struct CommandListHeader CommandHeader;
6508 struct RequestBlock Request;
6509 struct ErrDescriptor ErrorDescriptor;
6510 };
6511 struct Command *cmd;
6512 static const size_t cmd_sz = sizeof(*cmd) +
6513 sizeof(cmd->ErrorDescriptor);
6514 dma_addr_t paddr64;
Don Brace2b08b3e2015-01-23 16:41:09 -06006515 __le32 paddr32;
6516 u32 tag;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006517 void __iomem *vaddr;
6518 int i, err;
6519
6520 vaddr = pci_ioremap_bar(pdev, 0);
6521 if (vaddr == NULL)
6522 return -ENOMEM;
6523
6524 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6525 * CCISS commands, so they must be allocated from the lower 4GiB of
6526 * memory.
6527 */
6528 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6529 if (err) {
6530 iounmap(vaddr);
Robert Elliott1eaec8f2015-01-23 16:42:37 -06006531 return err;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006532 }
6533
6534 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
6535 if (cmd == NULL) {
6536 iounmap(vaddr);
6537 return -ENOMEM;
6538 }
6539
6540 /* This must fit, because of the 32-bit consistent DMA mask. Also,
6541 * although there's no guarantee, we assume that the address is at
6542 * least 4-byte aligned (most likely, it's page-aligned).
6543 */
Don Brace2b08b3e2015-01-23 16:41:09 -06006544 paddr32 = cpu_to_le32(paddr64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006545
6546 cmd->CommandHeader.ReplyQueue = 0;
6547 cmd->CommandHeader.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006548 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
Don Brace2b08b3e2015-01-23 16:41:09 -06006549 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006550 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
6551
6552 cmd->Request.CDBLen = 16;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006553 cmd->Request.type_attr_dir =
6554 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006555 cmd->Request.Timeout = 0; /* Don't time out */
6556 cmd->Request.CDB[0] = opcode;
6557 cmd->Request.CDB[1] = type;
6558 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006559 cmd->ErrorDescriptor.Addr =
Don Brace2b08b3e2015-01-23 16:41:09 -06006560 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006561 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006562
Don Brace2b08b3e2015-01-23 16:41:09 -06006563 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006564
6565 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
6566 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
Don Brace2b08b3e2015-01-23 16:41:09 -06006567 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006568 break;
6569 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
6570 }
6571
6572 iounmap(vaddr);
6573
6574 /* we leak the DMA buffer here ... no choice since the controller could
6575 * still complete the command.
6576 */
6577 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
6578 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
6579 opcode, type);
6580 return -ETIMEDOUT;
6581 }
6582
6583 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
6584
6585 if (tag & HPSA_ERROR_BIT) {
6586 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
6587 opcode, type);
6588 return -EIO;
6589 }
6590
6591 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
6592 opcode, type);
6593 return 0;
6594}
6595
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006596#define hpsa_noop(p) hpsa_message(p, 3, 0)
6597
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006598static int hpsa_controller_hard_reset(struct pci_dev *pdev,
Don Brace42a91642014-11-14 17:26:27 -06006599 void __iomem *vaddr, u32 use_doorbell)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006600{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006601
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006602 if (use_doorbell) {
6603 /* For everything after the P600, the PCI power state method
6604 * of resetting the controller doesn't work, so we have this
6605 * other way using the doorbell register.
6606 */
6607 dev_info(&pdev->dev, "using doorbell to reset controller\n");
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006608 writel(use_doorbell, vaddr + SA5_DOORBELL);
Stephen M. Cameron85009232013-09-23 13:33:36 -05006609
Justin Lindley00701a92014-05-29 10:52:47 -05006610 /* PMC hardware guys tell us we need a 10 second delay after
Stephen M. Cameron85009232013-09-23 13:33:36 -05006611 * doorbell reset and before any attempt to talk to the board
6612 * at all to ensure that this actually works and doesn't fall
6613 * over in some weird corner cases.
6614 */
Justin Lindley00701a92014-05-29 10:52:47 -05006615 msleep(10000);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006616 } else { /* Try to do it the PCI power state way */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006617
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006618 /* Quoting from the Open CISS Specification: "The Power
6619 * Management Control/Status Register (CSR) controls the power
6620 * state of the device. The normal operating state is D0,
6621 * CSR=00h. The software off state is D3, CSR=03h. To reset
6622 * the controller, place the interface device in D3 then to D0,
6623 * this causes a secondary PCI reset which will reset the
6624 * controller." */
6625
Don Brace2662cab2015-01-23 16:41:25 -06006626 int rc = 0;
6627
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006628 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
Don Brace2662cab2015-01-23 16:41:25 -06006629
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006630 /* enter the D3hot power management state */
Don Brace2662cab2015-01-23 16:41:25 -06006631 rc = pci_set_power_state(pdev, PCI_D3hot);
6632 if (rc)
6633 return rc;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006634
6635 msleep(500);
6636
6637 /* enter the D0 power management state */
Don Brace2662cab2015-01-23 16:41:25 -06006638 rc = pci_set_power_state(pdev, PCI_D0);
6639 if (rc)
6640 return rc;
Mike Millerc4853ef2011-10-21 08:19:43 +02006641
6642 /*
6643 * The P600 requires a small delay when changing states.
6644 * Otherwise we may think the board did not reset and we bail.
6645 * This for kdump only and is particular to the P600.
6646 */
6647 msleep(500);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006648 }
6649 return 0;
6650}
6651
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006652static void init_driver_version(char *driver_version, int len)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006653{
6654 memset(driver_version, 0, len);
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06006655 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006656}
6657
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006658static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006659{
6660 char *driver_version;
6661 int i, size = sizeof(cfgtable->driver_version);
6662
6663 driver_version = kmalloc(size, GFP_KERNEL);
6664 if (!driver_version)
6665 return -ENOMEM;
6666
6667 init_driver_version(driver_version, size);
6668 for (i = 0; i < size; i++)
6669 writeb(driver_version[i], &cfgtable->driver_version[i]);
6670 kfree(driver_version);
6671 return 0;
6672}
6673
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006674static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
6675 unsigned char *driver_ver)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006676{
6677 int i;
6678
6679 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
6680 driver_ver[i] = readb(&cfgtable->driver_version[i]);
6681}
6682
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006683static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006684{
6685
6686 char *driver_ver, *old_driver_ver;
6687 int rc, size = sizeof(cfgtable->driver_version);
6688
6689 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
6690 if (!old_driver_ver)
6691 return -ENOMEM;
6692 driver_ver = old_driver_ver + size;
6693
6694 /* After a reset, the 32 bytes of "driver version" in the cfgtable
6695 * should have been changed, otherwise we know the reset failed.
6696 */
6697 init_driver_version(old_driver_ver, size);
6698 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
6699 rc = !memcmp(driver_ver, old_driver_ver, size);
6700 kfree(old_driver_ver);
6701 return rc;
6702}
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006703/* This does a hard reset of the controller using PCI power management
6704 * states or the using the doorbell register.
6705 */
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02006706static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006707{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006708 u64 cfg_offset;
6709 u32 cfg_base_addr;
6710 u64 cfg_base_addr_index;
6711 void __iomem *vaddr;
6712 unsigned long paddr;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006713 u32 misc_fw_support;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006714 int rc;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006715 struct CfgTable __iomem *cfgtable;
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006716 u32 use_doorbell;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006717 u16 command_register;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006718
6719 /* For controllers as old as the P600, this is very nearly
6720 * the same thing as
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006721 *
6722 * pci_save_state(pci_dev);
6723 * pci_set_power_state(pci_dev, PCI_D3hot);
6724 * pci_set_power_state(pci_dev, PCI_D0);
6725 * pci_restore_state(pci_dev);
6726 *
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006727 * For controllers newer than the P600, the pci power state
6728 * method of resetting doesn't work so we have another way
6729 * using the doorbell register.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006730 */
Stephen M. Cameron18867652010-06-16 13:51:45 -05006731
Robert Elliott60f923b2015-01-23 16:42:06 -06006732 if (!ctlr_is_resettable(board_id)) {
6733 dev_warn(&pdev->dev, "Controller not resettable\n");
Stephen M. Cameron25c1e56a2011-01-06 14:48:18 -06006734 return -ENODEV;
6735 }
Stephen M. Cameron46380782011-05-03 15:00:01 -05006736
6737 /* if controller is soft- but not hard resettable... */
6738 if (!ctlr_is_hard_resettable(board_id))
6739 return -ENOTSUPP; /* try soft reset later. */
Stephen M. Cameron18867652010-06-16 13:51:45 -05006740
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006741 /* Save the PCI command register */
6742 pci_read_config_word(pdev, 4, &command_register);
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006743 pci_save_state(pdev);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006744
6745 /* find the first memory BAR, so we can find the cfg table */
6746 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
6747 if (rc)
6748 return rc;
6749 vaddr = remap_pci_mem(paddr, 0x250);
6750 if (!vaddr)
6751 return -ENOMEM;
6752
6753 /* find cfgtable in order to check if reset via doorbell is supported */
6754 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
6755 &cfg_base_addr_index, &cfg_offset);
6756 if (rc)
6757 goto unmap_vaddr;
6758 cfgtable = remap_pci_mem(pci_resource_start(pdev,
6759 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
6760 if (!cfgtable) {
6761 rc = -ENOMEM;
6762 goto unmap_vaddr;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006763 }
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006764 rc = write_driver_ver_to_cfgtable(cfgtable);
6765 if (rc)
Tomas Henzl03741d92015-01-23 16:41:14 -06006766 goto unmap_cfgtable;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006767
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006768 /* If reset via doorbell register is supported, use that.
6769 * There are two such methods. Favor the newest method.
6770 */
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006771 misc_fw_support = readl(&cfgtable->misc_fw_support);
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006772 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6773 if (use_doorbell) {
6774 use_doorbell = DOORBELL_CTLR_RESET2;
6775 } else {
6776 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6777 if (use_doorbell) {
Stephen Cameron050f7142015-01-23 16:42:22 -06006778 dev_warn(&pdev->dev,
6779 "Soft reset not supported. Firmware update is required.\n");
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006780 rc = -ENOTSUPP; /* try soft reset */
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006781 goto unmap_cfgtable;
6782 }
6783 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006784
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006785 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6786 if (rc)
6787 goto unmap_cfgtable;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006788
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006789 pci_restore_state(pdev);
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006790 pci_write_config_word(pdev, 4, command_register);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006791
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006792 /* Some devices (notably the HP Smart Array 5i Controller)
6793 need a little pause here */
6794 msleep(HPSA_POST_RESET_PAUSE_MSECS);
6795
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006796 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6797 if (rc) {
6798 dev_warn(&pdev->dev,
Stephen Cameron050f7142015-01-23 16:42:22 -06006799 "Failed waiting for board to become ready after hard reset\n");
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006800 goto unmap_cfgtable;
6801 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006802
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006803 rc = controller_reset_failed(vaddr);
6804 if (rc < 0)
6805 goto unmap_cfgtable;
6806 if (rc) {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006807 dev_warn(&pdev->dev, "Unable to successfully reset "
6808 "controller. Will try soft reset.\n");
6809 rc = -ENOTSUPP;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006810 } else {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006811 dev_info(&pdev->dev, "board ready after hard reset.\n");
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006812 }
6813
6814unmap_cfgtable:
6815 iounmap(cfgtable);
6816
6817unmap_vaddr:
6818 iounmap(vaddr);
6819 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006820}
6821
6822/*
6823 * We cannot read the structure directly, for portability we must use
6824 * the io functions.
6825 * This is for debug only.
6826 */
Don Brace42a91642014-11-14 17:26:27 -06006827static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006828{
Stephen M. Cameron58f86652010-05-27 15:13:58 -05006829#ifdef HPSA_DEBUG
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006830 int i;
6831 char temp_name[17];
6832
6833 dev_info(dev, "Controller Configuration information\n");
6834 dev_info(dev, "------------------------------------\n");
6835 for (i = 0; i < 4; i++)
6836 temp_name[i] = readb(&(tb->Signature[i]));
6837 temp_name[4] = '\0';
6838 dev_info(dev, " Signature = %s\n", temp_name);
6839 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
6840 dev_info(dev, " Transport methods supported = 0x%x\n",
6841 readl(&(tb->TransportSupport)));
6842 dev_info(dev, " Transport methods active = 0x%x\n",
6843 readl(&(tb->TransportActive)));
6844 dev_info(dev, " Requested transport Method = 0x%x\n",
6845 readl(&(tb->HostWrite.TransportRequest)));
6846 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
6847 readl(&(tb->HostWrite.CoalIntDelay)));
6848 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
6849 readl(&(tb->HostWrite.CoalIntCount)));
Robert Elliott69d6e332015-01-23 16:41:56 -06006850 dev_info(dev, " Max outstanding commands = %d\n",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006851 readl(&(tb->CmdsOutMax)));
6852 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6853 for (i = 0; i < 16; i++)
6854 temp_name[i] = readb(&(tb->ServerName[i]));
6855 temp_name[16] = '\0';
6856 dev_info(dev, " Server Name = %s\n", temp_name);
6857 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6858 readl(&(tb->HeartBeat)));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006859#endif /* HPSA_DEBUG */
Stephen M. Cameron58f86652010-05-27 15:13:58 -05006860}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006861
6862static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6863{
6864 int i, offset, mem_type, bar_type;
6865
6866 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6867 return 0;
6868 offset = 0;
6869 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6870 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6871 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6872 offset += 4;
6873 else {
6874 mem_type = pci_resource_flags(pdev, i) &
6875 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6876 switch (mem_type) {
6877 case PCI_BASE_ADDRESS_MEM_TYPE_32:
6878 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6879 offset += 4; /* 32 bit */
6880 break;
6881 case PCI_BASE_ADDRESS_MEM_TYPE_64:
6882 offset += 8;
6883 break;
6884 default: /* reserved in PCI 2.2 */
6885 dev_warn(&pdev->dev,
6886 "base address is invalid\n");
6887 return -1;
6888 break;
6889 }
6890 }
6891 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6892 return i + 1;
6893 }
6894 return -1;
6895}
6896
Robert Elliottcc64c812015-04-23 09:33:12 -05006897static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
6898{
6899 if (h->msix_vector) {
6900 if (h->pdev->msix_enabled)
6901 pci_disable_msix(h->pdev);
Robert Elliott105a3db2015-04-23 09:33:48 -05006902 h->msix_vector = 0;
Robert Elliottcc64c812015-04-23 09:33:12 -05006903 } else if (h->msi_vector) {
6904 if (h->pdev->msi_enabled)
6905 pci_disable_msi(h->pdev);
Robert Elliott105a3db2015-04-23 09:33:48 -05006906 h->msi_vector = 0;
Robert Elliottcc64c812015-04-23 09:33:12 -05006907 }
6908}
6909
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006910/* If MSI/MSI-X is supported by the kernel we will try to enable it on
Stephen Cameron050f7142015-01-23 16:42:22 -06006911 * controllers that are capable. If not, we use legacy INTx mode.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006912 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006913static void hpsa_interrupt_mode(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006914{
6915#ifdef CONFIG_PCI_MSI
Matt Gates254f7962012-05-01 11:43:06 -05006916 int err, i;
6917 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6918
6919 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6920 hpsa_msix_entries[i].vector = 0;
6921 hpsa_msix_entries[i].entry = i;
6922 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006923
6924 /* Some boards advertise MSI but don't really support it */
Stephen M. Cameron6b3f4c52010-05-27 15:13:02 -05006925 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6926 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006927 goto default_int_mode;
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006928 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06006929 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006930 h->msix_vector = MAX_REPLY_QUEUES;
Stephen M. Cameronf89439b2014-05-29 10:53:02 -05006931 if (h->msix_vector > num_online_cpus())
6932 h->msix_vector = num_online_cpus();
Alexander Gordeev18fce3c2014-08-18 08:01:42 +02006933 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
6934 1, h->msix_vector);
6935 if (err < 0) {
6936 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
6937 h->msix_vector = 0;
6938 goto single_msi_mode;
6939 } else if (err < h->msix_vector) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006940 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006941 "available\n", err);
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006942 }
Alexander Gordeev18fce3c2014-08-18 08:01:42 +02006943 h->msix_vector = err;
6944 for (i = 0; i < h->msix_vector; i++)
6945 h->intr[i] = hpsa_msix_entries[i].vector;
6946 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006947 }
Alexander Gordeev18fce3c2014-08-18 08:01:42 +02006948single_msi_mode:
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006949 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06006950 dev_info(&h->pdev->dev, "MSI capable controller\n");
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006951 if (!pci_enable_msi(h->pdev))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006952 h->msi_vector = 1;
6953 else
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006954 dev_warn(&h->pdev->dev, "MSI init failed\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006955 }
6956default_int_mode:
6957#endif /* CONFIG_PCI_MSI */
6958 /* if we get here we're going to use the default interrupt mode */
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006959 h->intr[h->intr_mode] = h->pdev->irq;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006960}
6961
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006962static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006963{
6964 int i;
6965 u32 subsystem_vendor_id, subsystem_device_id;
6966
6967 subsystem_vendor_id = pdev->subsystem_vendor;
6968 subsystem_device_id = pdev->subsystem_device;
6969 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6970 subsystem_vendor_id;
6971
6972 for (i = 0; i < ARRAY_SIZE(products); i++)
6973 if (*board_id == products[i].board_id)
6974 return i;
6975
Stephen M. Cameron6798cc02010-06-16 13:51:20 -05006976 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6977 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6978 !hpsa_allow_any) {
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006979 dev_warn(&pdev->dev, "unrecognized board ID: "
6980 "0x%08x, ignoring.\n", *board_id);
6981 return -ENODEV;
6982 }
6983 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6984}
6985
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006986static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6987 unsigned long *memory_bar)
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006988{
6989 int i;
6990
6991 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006992 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006993 /* addressing mode bits already removed */
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006994 *memory_bar = pci_resource_start(pdev, i);
6995 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006996 *memory_bar);
6997 return 0;
6998 }
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006999 dev_warn(&pdev->dev, "no memory BAR found\n");
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05007000 return -ENODEV;
7001}
7002
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007003static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7004 int wait_for_ready)
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05007005{
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007006 int i, iterations;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05007007 u32 scratchpad;
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007008 if (wait_for_ready)
7009 iterations = HPSA_BOARD_READY_ITERATIONS;
7010 else
7011 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05007012
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007013 for (i = 0; i < iterations; i++) {
7014 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7015 if (wait_for_ready) {
7016 if (scratchpad == HPSA_FIRMWARE_READY)
7017 return 0;
7018 } else {
7019 if (scratchpad != HPSA_FIRMWARE_READY)
7020 return 0;
7021 }
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05007022 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7023 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007024 dev_warn(&pdev->dev, "board not ready, timed out.\n");
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05007025 return -ENODEV;
7026}
7027
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007028static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7029 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7030 u64 *cfg_offset)
Stephen M. Camerona51fd472010-06-16 13:51:30 -05007031{
7032 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7033 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7034 *cfg_base_addr &= (u32) 0x0000ffff;
7035 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7036 if (*cfg_base_addr_index == -1) {
7037 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7038 return -ENODEV;
7039 }
7040 return 0;
7041}
7042
Robert Elliott195f2c62015-04-23 09:33:17 -05007043static void hpsa_free_cfgtables(struct ctlr_info *h)
7044{
Robert Elliott105a3db2015-04-23 09:33:48 -05007045 if (h->transtable) {
Robert Elliott195f2c62015-04-23 09:33:17 -05007046 iounmap(h->transtable);
Robert Elliott105a3db2015-04-23 09:33:48 -05007047 h->transtable = NULL;
7048 }
7049 if (h->cfgtable) {
Robert Elliott195f2c62015-04-23 09:33:17 -05007050 iounmap(h->cfgtable);
Robert Elliott105a3db2015-04-23 09:33:48 -05007051 h->cfgtable = NULL;
7052 }
Robert Elliott195f2c62015-04-23 09:33:17 -05007053}
7054
7055/* Find and map CISS config table and transfer table
7056+ * several items must be unmapped (freed) later
7057+ * */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007058static int hpsa_find_cfgtables(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007059{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06007060 u64 cfg_offset;
7061 u32 cfg_base_addr;
7062 u64 cfg_base_addr_index;
Don Brace303932f2010-02-04 08:42:40 -06007063 u32 trans_offset;
Stephen M. Camerona51fd472010-06-16 13:51:30 -05007064 int rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007065
Stephen M. Camerona51fd472010-06-16 13:51:30 -05007066 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7067 &cfg_base_addr_index, &cfg_offset);
7068 if (rc)
7069 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007070 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
Stephen M. Camerona51fd472010-06-16 13:51:30 -05007071 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
Robert Elliottcd3c81c2015-01-23 16:42:27 -06007072 if (!h->cfgtable) {
7073 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007074 return -ENOMEM;
Robert Elliottcd3c81c2015-01-23 16:42:27 -06007075 }
Stephen M. Cameron580ada32011-05-03 14:59:10 -05007076 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7077 if (rc)
7078 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007079 /* Find performant mode table. */
Stephen M. Camerona51fd472010-06-16 13:51:30 -05007080 trans_offset = readl(&h->cfgtable->TransMethodOffset);
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007081 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7082 cfg_base_addr_index)+cfg_offset+trans_offset,
7083 sizeof(*h->transtable));
Robert Elliott195f2c62015-04-23 09:33:17 -05007084 if (!h->transtable) {
7085 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7086 hpsa_free_cfgtables(h);
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007087 return -ENOMEM;
Robert Elliott195f2c62015-04-23 09:33:17 -05007088 }
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007089 return 0;
7090}
7091
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007092static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05007093{
Stephen Cameron41ce4c32015-04-23 09:31:47 -05007094#define MIN_MAX_COMMANDS 16
7095 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7096
7097 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
Stephen M. Cameron72ceeae2011-01-06 14:48:13 -06007098
7099 /* Limit commands in memory limited kdump scenario. */
7100 if (reset_devices && h->max_commands > 32)
7101 h->max_commands = 32;
7102
Stephen Cameron41ce4c32015-04-23 09:31:47 -05007103 if (h->max_commands < MIN_MAX_COMMANDS) {
7104 dev_warn(&h->pdev->dev,
7105 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7106 h->max_commands,
7107 MIN_MAX_COMMANDS);
7108 h->max_commands = MIN_MAX_COMMANDS;
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05007109 }
7110}
7111
Webb Scalesc7ee65b2015-01-23 16:42:17 -06007112/* If the controller reports that the total max sg entries is greater than 512,
7113 * then we know that chained SG blocks work. (Original smart arrays did not
7114 * support chained SG blocks and would return zero for max sg entries.)
7115 */
7116static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7117{
7118 return h->maxsgentries > 512;
7119}
7120
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007121/* Interrogate the hardware for some limits:
7122 * max commands, max SG elements without chaining, and with chaining,
7123 * SG chain block size, etc.
7124 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007125static void hpsa_find_board_params(struct ctlr_info *h)
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007126{
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05007127 hpsa_get_max_perf_mode_cmds(h);
Stephen Cameron45fcb862015-01-23 16:43:04 -06007128 h->nr_cmds = h->max_commands;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007129 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007130 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
Webb Scalesc7ee65b2015-01-23 16:42:17 -06007131 if (hpsa_supports_chained_sg_blocks(h)) {
7132 /* Limit in-command s/g elements to 32 save dma'able memory. */
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007133 h->max_cmd_sg_entries = 32;
Webb Scales1a63ea62014-11-14 17:26:43 -06007134 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007135 h->maxsgentries--; /* save one for chain pointer */
7136 } else {
Webb Scalesc7ee65b2015-01-23 16:42:17 -06007137 /*
7138 * Original smart arrays supported at most 31 s/g entries
7139 * embedded inline in the command (trying to use more
7140 * would lock up the controller)
7141 */
7142 h->max_cmd_sg_entries = 31;
Webb Scales1a63ea62014-11-14 17:26:43 -06007143 h->maxsgentries = 31; /* default to traditional values */
Webb Scalesc7ee65b2015-01-23 16:42:17 -06007144 h->chainsize = 0;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007145 }
Stephen M. Cameron75167d22012-05-01 11:42:51 -05007146
7147 /* Find out what task management functions are supported and cache */
7148 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
Scott Teel0e7a7fc2014-02-18 13:55:59 -06007149 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7150 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7151 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7152 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
Stephen Cameron8be986c2015-04-23 09:34:06 -05007153 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7154 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007155}
7156
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05007157static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7158{
Akinobu Mita0fc9fd42012-04-04 22:14:59 +09007159 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06007160 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05007161 return false;
7162 }
7163 return true;
7164}
7165
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007166static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05007167{
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007168 u32 driver_support;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05007169
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007170 driver_support = readl(&(h->cfgtable->driver_support));
Arnd Bergmann0b9e7b72014-06-26 15:44:52 +02007171 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7172#ifdef CONFIG_X86
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007173 driver_support |= ENABLE_SCSI_PREFETCH;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05007174#endif
Stephen M. Cameron28e13442013-12-04 17:10:21 -06007175 driver_support |= ENABLE_UNIT_ATTN;
7176 writel(driver_support, &(h->cfgtable->driver_support));
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05007177}
7178
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05007179/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7180 * in a prefetch beyond physical memory.
7181 */
7182static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7183{
7184 u32 dma_prefetch;
7185
7186 if (h->board_id != 0x3225103C)
7187 return;
7188 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7189 dma_prefetch |= 0x8000;
7190 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7191}
7192
Robert Elliottc706a792015-01-23 16:45:01 -06007193static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007194{
7195 int i;
7196 u32 doorbell_value;
7197 unsigned long flags;
7198 /* wait until the clear_event_notify bit 6 is cleared by controller. */
Robert Elliott007e7aa2015-01-23 16:44:56 -06007199 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007200 spin_lock_irqsave(&h->lock, flags);
7201 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7202 spin_unlock_irqrestore(&h->lock, flags);
7203 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
Robert Elliottc706a792015-01-23 16:45:01 -06007204 goto done;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007205 /* delay and try again */
Robert Elliott007e7aa2015-01-23 16:44:56 -06007206 msleep(CLEAR_EVENT_WAIT_INTERVAL);
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007207 }
Robert Elliottc706a792015-01-23 16:45:01 -06007208 return -ENODEV;
7209done:
7210 return 0;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007211}
7212
Robert Elliottc706a792015-01-23 16:45:01 -06007213static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007214{
7215 int i;
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06007216 u32 doorbell_value;
7217 unsigned long flags;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007218
7219 /* under certain very rare conditions, this can take awhile.
7220 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7221 * as we enter this code.)
7222 */
Robert Elliott007e7aa2015-01-23 16:44:56 -06007223 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
Webb Scales25163bd2015-04-23 09:32:00 -05007224 if (h->remove_in_progress)
7225 goto done;
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06007226 spin_lock_irqsave(&h->lock, flags);
7227 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7228 spin_unlock_irqrestore(&h->lock, flags);
Dan Carpenter382be662011-02-15 15:33:13 -06007229 if (!(doorbell_value & CFGTBL_ChangeReq))
Robert Elliottc706a792015-01-23 16:45:01 -06007230 goto done;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007231 /* delay and try again */
Robert Elliott007e7aa2015-01-23 16:44:56 -06007232 msleep(MODE_CHANGE_WAIT_INTERVAL);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007233 }
Robert Elliottc706a792015-01-23 16:45:01 -06007234 return -ENODEV;
7235done:
7236 return 0;
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007237}
7238
Robert Elliottc706a792015-01-23 16:45:01 -06007239/* return -ENODEV or other reason on error, 0 on success */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007240static int hpsa_enter_simple_mode(struct ctlr_info *h)
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007241{
7242 u32 trans_support;
7243
7244 trans_support = readl(&(h->cfgtable->TransportSupport));
7245 if (!(trans_support & SIMPLE_MODE))
7246 return -ENOTSUPP;
7247
7248 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007249
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007250 /* Update the field, and then ring the doorbell */
7251 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007252 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007253 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Robert Elliottc706a792015-01-23 16:45:01 -06007254 if (hpsa_wait_for_mode_change_ack(h))
7255 goto error;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007256 print_cfg_table(&h->pdev->dev, h->cfgtable);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007257 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7258 goto error;
Stephen M. Cameron960a30e72011-02-15 15:33:03 -06007259 h->transMethod = CFGTBL_Trans_Simple;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007260 return 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007261error:
Stephen Cameron050f7142015-01-23 16:42:22 -06007262 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007263 return -ENODEV;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007264}
7265
Robert Elliott195f2c62015-04-23 09:33:17 -05007266/* free items allocated or mapped by hpsa_pci_init */
7267static void hpsa_free_pci_init(struct ctlr_info *h)
7268{
7269 hpsa_free_cfgtables(h); /* pci_init 4 */
7270 iounmap(h->vaddr); /* pci_init 3 */
Robert Elliott105a3db2015-04-23 09:33:48 -05007271 h->vaddr = NULL;
Robert Elliott195f2c62015-04-23 09:33:17 -05007272 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
Robert Elliott943a7022015-04-23 09:34:32 -05007273 /*
7274 * call pci_disable_device before pci_release_regions per
7275 * Documentation/PCI/pci.txt
7276 */
Robert Elliott195f2c62015-04-23 09:33:17 -05007277 pci_disable_device(h->pdev); /* pci_init 1 */
Robert Elliott943a7022015-04-23 09:34:32 -05007278 pci_release_regions(h->pdev); /* pci_init 2 */
Robert Elliott195f2c62015-04-23 09:33:17 -05007279}
7280
7281/* several items must be freed later */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007282static int hpsa_pci_init(struct ctlr_info *h)
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007283{
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007284 int prod_index, err;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007285
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05007286 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
7287 if (prod_index < 0)
Robert Elliott60f923b2015-01-23 16:42:06 -06007288 return prod_index;
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05007289 h->product_name = products[prod_index].product_name;
7290 h->access = *(products[prod_index].access);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007291
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05007292 h->needs_abort_tags_swizzled =
7293 ctlr_needs_abort_tags_swizzled(h->board_id);
7294
Matthew Garrette5a44df2011-11-11 11:14:23 -05007295 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7296 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7297
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05007298 err = pci_enable_device(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007299 if (err) {
Robert Elliott195f2c62015-04-23 09:33:17 -05007300 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
Robert Elliott943a7022015-04-23 09:34:32 -05007301 pci_disable_device(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007302 return err;
7303 }
7304
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06007305 err = pci_request_regions(h->pdev, HPSA);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007306 if (err) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05007307 dev_err(&h->pdev->dev,
Robert Elliott195f2c62015-04-23 09:33:17 -05007308 "failed to obtain PCI resources\n");
Robert Elliott943a7022015-04-23 09:34:32 -05007309 pci_disable_device(h->pdev);
7310 return err;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007311 }
Robert Elliott4fa604e2014-11-14 17:27:24 -06007312
7313 pci_set_master(h->pdev);
7314
Stephen M. Cameron6b3f4c52010-05-27 15:13:02 -05007315 hpsa_interrupt_mode(h);
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05007316 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05007317 if (err)
Robert Elliott195f2c62015-04-23 09:33:17 -05007318 goto clean2; /* intmode+region, pci */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007319 h->vaddr = remap_pci_mem(h->paddr, 0x250);
Stephen M. Cameron204892e2010-05-27 15:13:22 -05007320 if (!h->vaddr) {
Robert Elliott195f2c62015-04-23 09:33:17 -05007321 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
Stephen M. Cameron204892e2010-05-27 15:13:22 -05007322 err = -ENOMEM;
Robert Elliott195f2c62015-04-23 09:33:17 -05007323 goto clean2; /* intmode+region, pci */
Stephen M. Cameron204892e2010-05-27 15:13:22 -05007324 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007325 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05007326 if (err)
Robert Elliott195f2c62015-04-23 09:33:17 -05007327 goto clean3; /* vaddr, intmode+region, pci */
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007328 err = hpsa_find_cfgtables(h);
7329 if (err)
Robert Elliott195f2c62015-04-23 09:33:17 -05007330 goto clean3; /* vaddr, intmode+region, pci */
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007331 hpsa_find_board_params(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007332
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05007333 if (!hpsa_CISS_signature_present(h)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007334 err = -ENODEV;
Robert Elliott195f2c62015-04-23 09:33:17 -05007335 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007336 }
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007337 hpsa_set_driver_support_bits(h);
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05007338 hpsa_p600_dma_prefetch_quirk(h);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007339 err = hpsa_enter_simple_mode(h);
7340 if (err)
Robert Elliott195f2c62015-04-23 09:33:17 -05007341 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007342 return 0;
7343
Robert Elliott195f2c62015-04-23 09:33:17 -05007344clean4: /* cfgtables, vaddr, intmode+region, pci */
7345 hpsa_free_cfgtables(h);
7346clean3: /* vaddr, intmode+region, pci */
7347 iounmap(h->vaddr);
Robert Elliott105a3db2015-04-23 09:33:48 -05007348 h->vaddr = NULL;
Robert Elliott195f2c62015-04-23 09:33:17 -05007349clean2: /* intmode+region, pci */
7350 hpsa_disable_interrupt_mode(h);
Robert Elliott943a7022015-04-23 09:34:32 -05007351 /*
7352 * call pci_disable_device before pci_release_regions per
7353 * Documentation/PCI/pci.txt
7354 */
Robert Elliott195f2c62015-04-23 09:33:17 -05007355 pci_disable_device(h->pdev);
Robert Elliott943a7022015-04-23 09:34:32 -05007356 pci_release_regions(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007357 return err;
7358}
7359
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007360static void hpsa_hba_inquiry(struct ctlr_info *h)
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06007361{
7362 int rc;
7363
7364#define HBA_INQUIRY_BYTE_COUNT 64
7365 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7366 if (!h->hba_inquiry_data)
7367 return;
7368 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7369 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7370 if (rc != 0) {
7371 kfree(h->hba_inquiry_data);
7372 h->hba_inquiry_data = NULL;
7373 }
7374}
7375
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02007376static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007377{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007378 int rc, i;
Tomas Henzl3b747292015-01-23 16:41:20 -06007379 void __iomem *vaddr;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007380
7381 if (!reset_devices)
7382 return 0;
7383
Tomas Henzl132aa222014-08-14 16:12:39 +02007384 /* kdump kernel is loading, we don't know in which state is
7385 * the pci interface. The dev->enable_cnt is equal zero
7386 * so we call enable+disable, wait a while and switch it on.
7387 */
7388 rc = pci_enable_device(pdev);
7389 if (rc) {
7390 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7391 return -ENODEV;
7392 }
7393 pci_disable_device(pdev);
7394 msleep(260); /* a randomly chosen number */
7395 rc = pci_enable_device(pdev);
7396 if (rc) {
7397 dev_warn(&pdev->dev, "failed to enable device.\n");
7398 return -ENODEV;
7399 }
Robert Elliott4fa604e2014-11-14 17:27:24 -06007400
Tomas Henzl859c75a2014-09-12 14:44:15 +02007401 pci_set_master(pdev);
Robert Elliott4fa604e2014-11-14 17:27:24 -06007402
Tomas Henzl3b747292015-01-23 16:41:20 -06007403 vaddr = pci_ioremap_bar(pdev, 0);
7404 if (vaddr == NULL) {
7405 rc = -ENOMEM;
7406 goto out_disable;
7407 }
7408 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7409 iounmap(vaddr);
7410
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007411 /* Reset the controller with a PCI power-cycle or via doorbell */
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02007412 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007413
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007414 /* -ENOTSUPP here means we cannot reset the controller
7415 * but it's already (and still) up and running in
Stephen M. Cameron18867652010-06-16 13:51:45 -05007416 * "performant mode". Or, it might be 640x, which can't reset
7417 * due to concerns about shared bbwc between 6402/6404 pair.
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007418 */
Robert Elliottadf1b3a2015-01-23 16:42:01 -06007419 if (rc)
Tomas Henzl132aa222014-08-14 16:12:39 +02007420 goto out_disable;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007421
7422 /* Now try to get the controller to respond to a no-op */
Robert Elliott1ba66c92015-01-23 16:42:11 -06007423 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007424 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7425 if (hpsa_noop(pdev) == 0)
7426 break;
7427 else
7428 dev_warn(&pdev->dev, "no-op failed%s\n",
7429 (i < 11 ? "; re-trying" : ""));
7430 }
Tomas Henzl132aa222014-08-14 16:12:39 +02007431
7432out_disable:
7433
7434 pci_disable_device(pdev);
7435 return rc;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007436}
7437
Robert Elliott1fb7c982015-04-23 09:33:22 -05007438static void hpsa_free_cmd_pool(struct ctlr_info *h)
7439{
7440 kfree(h->cmd_pool_bits);
Robert Elliott105a3db2015-04-23 09:33:48 -05007441 h->cmd_pool_bits = NULL;
7442 if (h->cmd_pool) {
Robert Elliott1fb7c982015-04-23 09:33:22 -05007443 pci_free_consistent(h->pdev,
7444 h->nr_cmds * sizeof(struct CommandList),
7445 h->cmd_pool,
7446 h->cmd_pool_dhandle);
Robert Elliott105a3db2015-04-23 09:33:48 -05007447 h->cmd_pool = NULL;
7448 h->cmd_pool_dhandle = 0;
7449 }
7450 if (h->errinfo_pool) {
Robert Elliott1fb7c982015-04-23 09:33:22 -05007451 pci_free_consistent(h->pdev,
7452 h->nr_cmds * sizeof(struct ErrorInfo),
7453 h->errinfo_pool,
7454 h->errinfo_pool_dhandle);
Robert Elliott105a3db2015-04-23 09:33:48 -05007455 h->errinfo_pool = NULL;
7456 h->errinfo_pool_dhandle = 0;
7457 }
Robert Elliott1fb7c982015-04-23 09:33:22 -05007458}
7459
Robert Elliottd37ffbe2015-04-23 09:32:27 -05007460static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007461{
7462 h->cmd_pool_bits = kzalloc(
7463 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7464 sizeof(unsigned long), GFP_KERNEL);
7465 h->cmd_pool = pci_alloc_consistent(h->pdev,
7466 h->nr_cmds * sizeof(*h->cmd_pool),
7467 &(h->cmd_pool_dhandle));
7468 h->errinfo_pool = pci_alloc_consistent(h->pdev,
7469 h->nr_cmds * sizeof(*h->errinfo_pool),
7470 &(h->errinfo_pool_dhandle));
7471 if ((h->cmd_pool_bits == NULL)
7472 || (h->cmd_pool == NULL)
7473 || (h->errinfo_pool == NULL)) {
7474 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
Robert Elliott2c143342015-01-23 16:42:48 -06007475 goto clean_up;
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007476 }
Stephen Cameron360c73b2015-04-23 09:32:32 -05007477 hpsa_preinitialize_commands(h);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007478 return 0;
Robert Elliott2c143342015-01-23 16:42:48 -06007479clean_up:
7480 hpsa_free_cmd_pool(h);
7481 return -ENOMEM;
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007482}
7483
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05007484static void hpsa_irq_affinity_hints(struct ctlr_info *h)
7485{
Fabian Frederickec429952015-01-23 16:41:46 -06007486 int i, cpu;
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05007487
7488 cpu = cpumask_first(cpu_online_mask);
7489 for (i = 0; i < h->msix_vector; i++) {
Fabian Frederickec429952015-01-23 16:41:46 -06007490 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05007491 cpu = cpumask_next(cpu, cpu_online_mask);
7492 }
7493}
7494
Robert Elliottec501a12015-01-23 16:41:40 -06007495/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7496static void hpsa_free_irqs(struct ctlr_info *h)
7497{
7498 int i;
7499
7500 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
7501 /* Single reply queue, only one irq to free */
7502 i = h->intr_mode;
7503 irq_set_affinity_hint(h->intr[i], NULL);
7504 free_irq(h->intr[i], &h->q[i]);
Robert Elliott105a3db2015-04-23 09:33:48 -05007505 h->q[i] = 0;
Robert Elliottec501a12015-01-23 16:41:40 -06007506 return;
7507 }
7508
7509 for (i = 0; i < h->msix_vector; i++) {
7510 irq_set_affinity_hint(h->intr[i], NULL);
7511 free_irq(h->intr[i], &h->q[i]);
Robert Elliott105a3db2015-04-23 09:33:48 -05007512 h->q[i] = 0;
Robert Elliottec501a12015-01-23 16:41:40 -06007513 }
Robert Elliotta4e17fc2015-01-23 16:41:51 -06007514 for (; i < MAX_REPLY_QUEUES; i++)
7515 h->q[i] = 0;
Robert Elliottec501a12015-01-23 16:41:40 -06007516}
7517
Robert Elliott9ee61792015-01-23 16:42:32 -06007518/* returns 0 on success; cleans up and returns -Enn on error */
7519static int hpsa_request_irqs(struct ctlr_info *h,
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007520 irqreturn_t (*msixhandler)(int, void *),
7521 irqreturn_t (*intxhandler)(int, void *))
7522{
Matt Gates254f7962012-05-01 11:43:06 -05007523 int rc, i;
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007524
Matt Gates254f7962012-05-01 11:43:06 -05007525 /*
7526 * initialize h->q[x] = x so that interrupt handlers know which
7527 * queue to process.
7528 */
7529 for (i = 0; i < MAX_REPLY_QUEUES; i++)
7530 h->q[i] = (u8) i;
7531
Hannes Reineckeeee0f032014-01-15 13:30:53 +01007532 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
Matt Gates254f7962012-05-01 11:43:06 -05007533 /* If performant mode and MSI-X, use multiple reply queues */
Robert Elliotta4e17fc2015-01-23 16:41:51 -06007534 for (i = 0; i < h->msix_vector; i++) {
Robert Elliott8b470042015-04-23 09:34:58 -05007535 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
Matt Gates254f7962012-05-01 11:43:06 -05007536 rc = request_irq(h->intr[i], msixhandler,
Robert Elliott8b470042015-04-23 09:34:58 -05007537 0, h->intrname[i],
Matt Gates254f7962012-05-01 11:43:06 -05007538 &h->q[i]);
Robert Elliotta4e17fc2015-01-23 16:41:51 -06007539 if (rc) {
7540 int j;
7541
7542 dev_err(&h->pdev->dev,
7543 "failed to get irq %d for %s\n",
7544 h->intr[i], h->devname);
7545 for (j = 0; j < i; j++) {
7546 free_irq(h->intr[j], &h->q[j]);
7547 h->q[j] = 0;
7548 }
7549 for (; j < MAX_REPLY_QUEUES; j++)
7550 h->q[j] = 0;
7551 return rc;
7552 }
7553 }
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05007554 hpsa_irq_affinity_hints(h);
Matt Gates254f7962012-05-01 11:43:06 -05007555 } else {
7556 /* Use single reply pool */
Hannes Reineckeeee0f032014-01-15 13:30:53 +01007557 if (h->msix_vector > 0 || h->msi_vector) {
Robert Elliott8b470042015-04-23 09:34:58 -05007558 if (h->msix_vector)
7559 sprintf(h->intrname[h->intr_mode],
7560 "%s-msix", h->devname);
7561 else
7562 sprintf(h->intrname[h->intr_mode],
7563 "%s-msi", h->devname);
Matt Gates254f7962012-05-01 11:43:06 -05007564 rc = request_irq(h->intr[h->intr_mode],
Robert Elliott8b470042015-04-23 09:34:58 -05007565 msixhandler, 0,
7566 h->intrname[h->intr_mode],
Matt Gates254f7962012-05-01 11:43:06 -05007567 &h->q[h->intr_mode]);
7568 } else {
Robert Elliott8b470042015-04-23 09:34:58 -05007569 sprintf(h->intrname[h->intr_mode],
7570 "%s-intx", h->devname);
Matt Gates254f7962012-05-01 11:43:06 -05007571 rc = request_irq(h->intr[h->intr_mode],
Robert Elliott8b470042015-04-23 09:34:58 -05007572 intxhandler, IRQF_SHARED,
7573 h->intrname[h->intr_mode],
Matt Gates254f7962012-05-01 11:43:06 -05007574 &h->q[h->intr_mode]);
7575 }
Robert Elliott105a3db2015-04-23 09:33:48 -05007576 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
Matt Gates254f7962012-05-01 11:43:06 -05007577 }
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007578 if (rc) {
Robert Elliott195f2c62015-04-23 09:33:17 -05007579 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007580 h->intr[h->intr_mode], h->devname);
Robert Elliott195f2c62015-04-23 09:33:17 -05007581 hpsa_free_irqs(h);
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007582 return -ENODEV;
7583 }
7584 return 0;
7585}
7586
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007587static int hpsa_kdump_soft_reset(struct ctlr_info *h)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007588{
Robert Elliott39c53f52015-04-23 09:35:14 -05007589 int rc;
Robert Elliottbf43caf2015-04-23 09:33:38 -05007590 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007591
7592 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
Robert Elliott39c53f52015-04-23 09:35:14 -05007593 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
7594 if (rc) {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007595 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
Robert Elliott39c53f52015-04-23 09:35:14 -05007596 return rc;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007597 }
7598
7599 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
Robert Elliott39c53f52015-04-23 09:35:14 -05007600 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7601 if (rc) {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007602 dev_warn(&h->pdev->dev, "Board failed to become ready "
7603 "after soft reset.\n");
Robert Elliott39c53f52015-04-23 09:35:14 -05007604 return rc;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007605 }
7606
7607 return 0;
7608}
7609
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007610static void hpsa_free_reply_queues(struct ctlr_info *h)
7611{
7612 int i;
7613
7614 for (i = 0; i < h->nreply_queues; i++) {
7615 if (!h->reply_queue[i].head)
7616 continue;
Robert Elliott1fb7c982015-04-23 09:33:22 -05007617 pci_free_consistent(h->pdev,
7618 h->reply_queue_size,
7619 h->reply_queue[i].head,
7620 h->reply_queue[i].busaddr);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007621 h->reply_queue[i].head = NULL;
7622 h->reply_queue[i].busaddr = 0;
7623 }
Robert Elliott105a3db2015-04-23 09:33:48 -05007624 h->reply_queue_size = 0;
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007625}
7626
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05007627static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
7628{
Robert Elliott105a3db2015-04-23 09:33:48 -05007629 hpsa_free_performant_mode(h); /* init_one 7 */
7630 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
7631 hpsa_free_cmd_pool(h); /* init_one 5 */
7632 hpsa_free_irqs(h); /* init_one 4 */
Robert Elliott2946e822015-04-23 09:35:09 -05007633 scsi_host_put(h->scsi_host); /* init_one 3 */
7634 h->scsi_host = NULL; /* init_one 3 */
7635 hpsa_free_pci_init(h); /* init_one 2_5 */
Robert Elliott9ecd9532015-04-23 09:34:43 -05007636 free_percpu(h->lockup_detected); /* init_one 2 */
7637 h->lockup_detected = NULL; /* init_one 2 */
7638 if (h->resubmit_wq) {
7639 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
7640 h->resubmit_wq = NULL;
7641 }
7642 if (h->rescan_ctlr_wq) {
7643 destroy_workqueue(h->rescan_ctlr_wq);
7644 h->rescan_ctlr_wq = NULL;
7645 }
Robert Elliott105a3db2015-04-23 09:33:48 -05007646 kfree(h); /* init_one 1 */
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007647}
7648
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007649/* Called when controller lockup detected. */
Don Bracef2405db2015-01-23 16:43:09 -06007650static void fail_all_outstanding_cmds(struct ctlr_info *h)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007651{
Webb Scales281a7fd2015-01-23 16:43:35 -06007652 int i, refcount;
7653 struct CommandList *c;
Webb Scales25163bd2015-04-23 09:32:00 -05007654 int failcount = 0;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007655
Don Brace080ef1c2015-01-23 16:43:25 -06007656 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
Don Bracef2405db2015-01-23 16:43:09 -06007657 for (i = 0; i < h->nr_cmds; i++) {
Don Bracef2405db2015-01-23 16:43:09 -06007658 c = h->cmd_pool + i;
Webb Scales281a7fd2015-01-23 16:43:35 -06007659 refcount = atomic_inc_return(&c->refcount);
7660 if (refcount > 1) {
Webb Scales25163bd2015-04-23 09:32:00 -05007661 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
Webb Scales281a7fd2015-01-23 16:43:35 -06007662 finish_cmd(c);
Stephen Cameron433b5f42015-04-23 09:32:11 -05007663 atomic_dec(&h->commands_outstanding);
Webb Scales25163bd2015-04-23 09:32:00 -05007664 failcount++;
Webb Scales281a7fd2015-01-23 16:43:35 -06007665 }
7666 cmd_free(h, c);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007667 }
Webb Scales25163bd2015-04-23 09:32:00 -05007668 dev_warn(&h->pdev->dev,
7669 "failed %d commands in fail_all\n", failcount);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007670}
7671
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007672static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
7673{
Rusty Russellc8ed0012015-03-05 10:49:19 +10307674 int cpu;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007675
Rusty Russellc8ed0012015-03-05 10:49:19 +10307676 for_each_online_cpu(cpu) {
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007677 u32 *lockup_detected;
7678 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
7679 *lockup_detected = value;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007680 }
7681 wmb(); /* be sure the per-cpu variables are out to memory */
7682}
7683
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007684static void controller_lockup_detected(struct ctlr_info *h)
7685{
7686 unsigned long flags;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007687 u32 lockup_detected;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007688
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007689 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7690 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007691 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
7692 if (!lockup_detected) {
7693 /* no heartbeat, but controller gave us a zero. */
7694 dev_warn(&h->pdev->dev,
Webb Scales25163bd2015-04-23 09:32:00 -05007695 "lockup detected after %d but scratchpad register is zero\n",
7696 h->heartbeat_sample_interval / HZ);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007697 lockup_detected = 0xffffffff;
7698 }
7699 set_lockup_detected_for_all_cpus(h, lockup_detected);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007700 spin_unlock_irqrestore(&h->lock, flags);
Webb Scales25163bd2015-04-23 09:32:00 -05007701 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
7702 lockup_detected, h->heartbeat_sample_interval / HZ);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007703 pci_disable_device(h->pdev);
Don Bracef2405db2015-01-23 16:43:09 -06007704 fail_all_outstanding_cmds(h);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007705}
7706
Webb Scales25163bd2015-04-23 09:32:00 -05007707static int detect_controller_lockup(struct ctlr_info *h)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007708{
7709 u64 now;
7710 u32 heartbeat;
7711 unsigned long flags;
7712
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007713 now = get_jiffies_64();
7714 /* If we've received an interrupt recently, we're ok. */
7715 if (time_after64(h->last_intr_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05007716 (h->heartbeat_sample_interval), now))
Webb Scales25163bd2015-04-23 09:32:00 -05007717 return false;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007718
7719 /*
7720 * If we've already checked the heartbeat recently, we're ok.
7721 * This could happen if someone sends us a signal. We
7722 * otherwise don't care about signals in this thread.
7723 */
7724 if (time_after64(h->last_heartbeat_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05007725 (h->heartbeat_sample_interval), now))
Webb Scales25163bd2015-04-23 09:32:00 -05007726 return false;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007727
7728 /* If heartbeat has not changed since we last looked, we're not ok. */
7729 spin_lock_irqsave(&h->lock, flags);
7730 heartbeat = readl(&h->cfgtable->HeartBeat);
7731 spin_unlock_irqrestore(&h->lock, flags);
7732 if (h->last_heartbeat == heartbeat) {
7733 controller_lockup_detected(h);
Webb Scales25163bd2015-04-23 09:32:00 -05007734 return true;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007735 }
7736
7737 /* We're ok. */
7738 h->last_heartbeat = heartbeat;
7739 h->last_heartbeat_timestamp = now;
Webb Scales25163bd2015-04-23 09:32:00 -05007740 return false;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007741}
7742
Stephen M. Cameron98465902014-02-21 16:25:00 -06007743static void hpsa_ack_ctlr_events(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007744{
7745 int i;
7746 char *event_type;
7747
Stephen Camerone4aa3e62015-01-23 16:44:07 -06007748 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7749 return;
7750
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007751 /* Ask the controller to clear the events we're handling. */
Stephen M. Cameron1f7cee82014-02-18 13:56:09 -06007752 if ((h->transMethod & (CFGTBL_Trans_io_accel1
7753 | CFGTBL_Trans_io_accel2)) &&
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007754 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
7755 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
7756
7757 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
7758 event_type = "state change";
7759 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
7760 event_type = "configuration change";
7761 /* Stop sending new RAID offload reqs via the IO accelerator */
7762 scsi_block_requests(h->scsi_host);
7763 for (i = 0; i < h->ndevices; i++)
7764 h->dev[i]->offload_enabled = 0;
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007765 hpsa_drain_accel_commands(h);
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007766 /* Set 'accelerator path config change' bit */
7767 dev_warn(&h->pdev->dev,
7768 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
7769 h->events, event_type);
7770 writel(h->events, &(h->cfgtable->clear_event_notify));
7771 /* Set the "clear event notify field update" bit 6 */
7772 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7773 /* Wait until ctlr clears 'clear event notify field', bit 6 */
7774 hpsa_wait_for_clear_event_notify_ack(h);
7775 scsi_unblock_requests(h->scsi_host);
7776 } else {
7777 /* Acknowledge controller notification events. */
7778 writel(h->events, &(h->cfgtable->clear_event_notify));
7779 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7780 hpsa_wait_for_clear_event_notify_ack(h);
7781#if 0
7782 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7783 hpsa_wait_for_mode_change_ack(h);
7784#endif
7785 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06007786 return;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007787}
7788
7789/* Check a register on the controller to see if there are configuration
7790 * changes (added/changed/removed logical drives, etc.) which mean that
Scott Teele863d682014-02-18 13:57:05 -06007791 * we should rescan the controller for devices.
7792 * Also check flag for driver-initiated rescan.
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007793 */
Stephen M. Cameron98465902014-02-21 16:25:00 -06007794static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007795{
7796 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
Stephen M. Cameron98465902014-02-21 16:25:00 -06007797 return 0;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007798
7799 h->events = readl(&(h->cfgtable->event_notify));
Stephen M. Cameron98465902014-02-21 16:25:00 -06007800 return h->events & RESCAN_REQUIRED_EVENT_BITS;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007801}
7802
Stephen M. Cameron98465902014-02-21 16:25:00 -06007803/*
7804 * Check if any of the offline devices have become ready
7805 */
7806static int hpsa_offline_devices_ready(struct ctlr_info *h)
7807{
7808 unsigned long flags;
7809 struct offline_device_entry *d;
7810 struct list_head *this, *tmp;
7811
7812 spin_lock_irqsave(&h->offline_device_lock, flags);
7813 list_for_each_safe(this, tmp, &h->offline_device_list) {
7814 d = list_entry(this, struct offline_device_entry,
7815 offline_list);
7816 spin_unlock_irqrestore(&h->offline_device_lock, flags);
Stephen M. Camerond1fea472014-07-03 10:17:58 -05007817 if (!hpsa_volume_offline(h, d->scsi3addr)) {
7818 spin_lock_irqsave(&h->offline_device_lock, flags);
7819 list_del(&d->offline_list);
7820 spin_unlock_irqrestore(&h->offline_device_lock, flags);
Stephen M. Cameron98465902014-02-21 16:25:00 -06007821 return 1;
Stephen M. Camerond1fea472014-07-03 10:17:58 -05007822 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06007823 spin_lock_irqsave(&h->offline_device_lock, flags);
7824 }
7825 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7826 return 0;
7827}
7828
Don Brace6636e7f2015-01-23 16:45:17 -06007829static void hpsa_rescan_ctlr_worker(struct work_struct *work)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007830{
7831 unsigned long flags;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007832 struct ctlr_info *h = container_of(to_delayed_work(work),
Don Brace6636e7f2015-01-23 16:45:17 -06007833 struct ctlr_info, rescan_ctlr_work);
7834
7835
7836 if (h->remove_in_progress)
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007837 return;
Stephen M. Cameron98465902014-02-21 16:25:00 -06007838
7839 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
7840 scsi_host_get(h->scsi_host);
Stephen M. Cameron98465902014-02-21 16:25:00 -06007841 hpsa_ack_ctlr_events(h);
7842 hpsa_scan_start(h->scsi_host);
7843 scsi_host_put(h->scsi_host);
7844 }
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007845 spin_lock_irqsave(&h->lock, flags);
Don Brace6636e7f2015-01-23 16:45:17 -06007846 if (!h->remove_in_progress)
7847 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007848 h->heartbeat_sample_interval);
7849 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007850}
7851
Don Brace6636e7f2015-01-23 16:45:17 -06007852static void hpsa_monitor_ctlr_worker(struct work_struct *work)
7853{
7854 unsigned long flags;
7855 struct ctlr_info *h = container_of(to_delayed_work(work),
7856 struct ctlr_info, monitor_ctlr_work);
7857
7858 detect_controller_lockup(h);
7859 if (lockup_detected(h))
7860 return;
7861
7862 spin_lock_irqsave(&h->lock, flags);
7863 if (!h->remove_in_progress)
7864 schedule_delayed_work(&h->monitor_ctlr_work,
7865 h->heartbeat_sample_interval);
7866 spin_unlock_irqrestore(&h->lock, flags);
7867}
7868
7869static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
7870 char *name)
7871{
7872 struct workqueue_struct *wq = NULL;
Don Brace6636e7f2015-01-23 16:45:17 -06007873
Don Brace397ea9c2015-02-06 17:44:15 -06007874 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
Don Brace6636e7f2015-01-23 16:45:17 -06007875 if (!wq)
7876 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
7877
7878 return wq;
7879}
7880
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007881static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007882{
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007883 int dac, rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007884 struct ctlr_info *h;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007885 int try_soft_reset = 0;
7886 unsigned long flags;
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02007887 u32 board_id;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007888
7889 if (number_of_controllers == 0)
7890 printk(KERN_INFO DRIVER_NAME "\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007891
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02007892 rc = hpsa_lookup_board_id(pdev, &board_id);
7893 if (rc < 0) {
7894 dev_warn(&pdev->dev, "Board ID not found\n");
7895 return rc;
7896 }
7897
7898 rc = hpsa_init_reset_devices(pdev, board_id);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007899 if (rc) {
7900 if (rc != -ENOTSUPP)
7901 return rc;
7902 /* If the reset fails in a particular way (it has no way to do
7903 * a proper hard reset, so returns -ENOTSUPP) we can try to do
7904 * a soft reset once we get the controller configured up to the
7905 * point that it can accept a command.
7906 */
7907 try_soft_reset = 1;
7908 rc = 0;
7909 }
7910
7911reinit_after_soft_reset:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007912
Don Brace303932f2010-02-04 08:42:40 -06007913 /* Command structures must be aligned on a 32-byte boundary because
7914 * the 5 lower bits of the address are used by the hardware. and by
7915 * the driver. See comments in hpsa.h for more info.
7916 */
Don Brace303932f2010-02-04 08:42:40 -06007917 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007918 h = kzalloc(sizeof(*h), GFP_KERNEL);
Robert Elliott105a3db2015-04-23 09:33:48 -05007919 if (!h) {
7920 dev_err(&pdev->dev, "Failed to allocate controller head\n");
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007921 return -ENOMEM;
Robert Elliott105a3db2015-04-23 09:33:48 -05007922 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007923
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05007924 h->pdev = pdev;
Robert Elliott105a3db2015-04-23 09:33:48 -05007925
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06007926 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
Stephen M. Cameron98465902014-02-21 16:25:00 -06007927 INIT_LIST_HEAD(&h->offline_device_list);
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06007928 spin_lock_init(&h->lock);
Stephen M. Cameron98465902014-02-21 16:25:00 -06007929 spin_lock_init(&h->offline_device_lock);
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06007930 spin_lock_init(&h->scan_lock);
Don Brace34f0c622015-01-23 16:43:46 -06007931 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05007932 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007933
7934 /* Allocate and clear per-cpu variable lockup_detected */
7935 h->lockup_detected = alloc_percpu(u32);
Stephen M. Cameron2a5ac322014-07-03 10:18:08 -05007936 if (!h->lockup_detected) {
Robert Elliott105a3db2015-04-23 09:33:48 -05007937 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
Stephen M. Cameron2a5ac322014-07-03 10:18:08 -05007938 rc = -ENOMEM;
Robert Elliott2efa5922015-04-23 09:34:53 -05007939 goto clean1; /* aer/h */
Stephen M. Cameron2a5ac322014-07-03 10:18:08 -05007940 }
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007941 set_lockup_detected_for_all_cpus(h, 0);
7942
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05007943 rc = hpsa_pci_init(h);
Robert Elliott105a3db2015-04-23 09:33:48 -05007944 if (rc)
Robert Elliott2946e822015-04-23 09:35:09 -05007945 goto clean2; /* lu, aer/h */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007946
Robert Elliott2946e822015-04-23 09:35:09 -05007947 /* relies on h-> settings made by hpsa_pci_init, including
7948 * interrupt_mode h->intr */
7949 rc = hpsa_scsi_host_alloc(h);
7950 if (rc)
7951 goto clean2_5; /* pci, lu, aer/h */
7952
7953 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007954 h->ctlr = number_of_controllers;
7955 number_of_controllers++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007956
7957 /* configure PCI DMA stuff */
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007958 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7959 if (rc == 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007960 dac = 1;
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007961 } else {
7962 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7963 if (rc == 0) {
7964 dac = 0;
7965 } else {
7966 dev_err(&pdev->dev, "no suitable DMA available\n");
Robert Elliott2946e822015-04-23 09:35:09 -05007967 goto clean3; /* shost, pci, lu, aer/h */
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007968 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007969 }
7970
7971 /* make sure the board interrupts are off */
7972 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05007973
Robert Elliott105a3db2015-04-23 09:33:48 -05007974 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
7975 if (rc)
Robert Elliott2946e822015-04-23 09:35:09 -05007976 goto clean3; /* shost, pci, lu, aer/h */
Robert Elliottd37ffbe2015-04-23 09:32:27 -05007977 rc = hpsa_alloc_cmd_pool(h);
Robert Elliott8947fd12015-01-23 16:42:54 -06007978 if (rc)
Robert Elliott2946e822015-04-23 09:35:09 -05007979 goto clean4; /* irq, shost, pci, lu, aer/h */
Robert Elliott105a3db2015-04-23 09:33:48 -05007980 rc = hpsa_alloc_sg_chain_blocks(h);
7981 if (rc)
Robert Elliott2946e822015-04-23 09:35:09 -05007982 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
Stephen M. Camerona08a8472010-02-04 08:43:16 -06007983 init_waitqueue_head(&h->scan_wait_queue);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05007984 init_waitqueue_head(&h->abort_cmd_wait_queue);
Webb Scalesd604f532015-04-23 09:35:22 -05007985 init_waitqueue_head(&h->event_sync_wait_queue);
7986 mutex_init(&h->reset_mutex);
Stephen M. Camerona08a8472010-02-04 08:43:16 -06007987 h->scan_finished = 1; /* no scan currently in progress */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007988
7989 pci_set_drvdata(pdev, h);
Stephen M. Cameron9a413382011-05-03 14:59:41 -05007990 h->ndevices = 0;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06007991 h->hba_mode_enabled = 0;
Robert Elliott2946e822015-04-23 09:35:09 -05007992
Stephen M. Cameron9a413382011-05-03 14:59:41 -05007993 spin_lock_init(&h->devlock);
Robert Elliott105a3db2015-04-23 09:33:48 -05007994 rc = hpsa_put_ctlr_into_performant_mode(h);
7995 if (rc)
Robert Elliott2946e822015-04-23 09:35:09 -05007996 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
7997
7998 /* hook into SCSI subsystem */
7999 rc = hpsa_scsi_add_host(h);
8000 if (rc)
8001 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
Robert Elliott2efa5922015-04-23 09:34:53 -05008002
8003 /* create the resubmit workqueue */
8004 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8005 if (!h->rescan_ctlr_wq) {
8006 rc = -ENOMEM;
8007 goto clean7;
8008 }
8009
8010 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8011 if (!h->resubmit_wq) {
8012 rc = -ENOMEM;
8013 goto clean7; /* aer/h */
8014 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008015
Robert Elliott105a3db2015-04-23 09:33:48 -05008016 /*
8017 * At this point, the controller is ready to take commands.
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008018 * Now, if reset_devices and the hard reset didn't work, try
8019 * the soft reset and see if that works.
8020 */
8021 if (try_soft_reset) {
8022
8023 /* This is kind of gross. We may or may not get a completion
8024 * from the soft reset command, and if we do, then the value
8025 * from the fifo may or may not be valid. So, we wait 10 secs
8026 * after the reset throwing away any completions we get during
8027 * that time. Unregister the interrupt handler and register
8028 * fake ones to scoop up any residual completions.
8029 */
8030 spin_lock_irqsave(&h->lock, flags);
8031 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8032 spin_unlock_irqrestore(&h->lock, flags);
Robert Elliottec501a12015-01-23 16:41:40 -06008033 hpsa_free_irqs(h);
Robert Elliott9ee61792015-01-23 16:42:32 -06008034 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008035 hpsa_intx_discard_completions);
8036 if (rc) {
Robert Elliott9ee61792015-01-23 16:42:32 -06008037 dev_warn(&h->pdev->dev,
8038 "Failed to request_irq after soft reset.\n");
Robert Elliottd4987572015-04-23 09:34:37 -05008039 /*
Robert Elliottb2ef4802015-04-23 09:34:48 -05008040 * cannot goto clean7 or free_irqs will be called
8041 * again. Instead, do its work
8042 */
8043 hpsa_free_performant_mode(h); /* clean7 */
8044 hpsa_free_sg_chain_blocks(h); /* clean6 */
8045 hpsa_free_cmd_pool(h); /* clean5 */
8046 /*
8047 * skip hpsa_free_irqs(h) clean4 since that
8048 * was just called before request_irqs failed
Robert Elliottd4987572015-04-23 09:34:37 -05008049 */
8050 goto clean3;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008051 }
8052
8053 rc = hpsa_kdump_soft_reset(h);
8054 if (rc)
8055 /* Neither hard nor soft reset worked, we're hosed. */
Don Brace7ef73232015-07-18 11:12:33 -05008056 goto clean7;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008057
8058 dev_info(&h->pdev->dev, "Board READY.\n");
8059 dev_info(&h->pdev->dev,
8060 "Waiting for stale completions to drain.\n");
8061 h->access.set_intr_mask(h, HPSA_INTR_ON);
8062 msleep(10000);
8063 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8064
8065 rc = controller_reset_failed(h->cfgtable);
8066 if (rc)
8067 dev_info(&h->pdev->dev,
8068 "Soft reset appears to have failed.\n");
8069
8070 /* since the controller's reset, we have to go back and re-init
8071 * everything. Easiest to just forget what we've done and do it
8072 * all over again.
8073 */
8074 hpsa_undo_allocations_after_kdump_soft_reset(h);
8075 try_soft_reset = 0;
8076 if (rc)
Robert Elliottb2ef4802015-04-23 09:34:48 -05008077 /* don't goto clean, we already unallocated */
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05008078 return -ENODEV;
8079
8080 goto reinit_after_soft_reset;
8081 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008082
Robert Elliott105a3db2015-04-23 09:33:48 -05008083 /* Enable Accelerated IO path at driver layer */
8084 h->acciopath_status = 1;
Scott Teelda0697b2014-02-18 13:57:00 -06008085
Scott Teele863d682014-02-18 13:57:05 -06008086
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008087 /* Turn the interrupts on so we can service requests */
8088 h->access.set_intr_mask(h, HPSA_INTR_ON);
8089
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06008090 hpsa_hba_inquiry(h);
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008091
8092 /* Monitor the controller for firmware lockups */
8093 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8094 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8095 schedule_delayed_work(&h->monitor_ctlr_work,
8096 h->heartbeat_sample_interval);
Don Brace6636e7f2015-01-23 16:45:17 -06008097 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8098 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8099 h->heartbeat_sample_interval);
Stephen M. Cameron88bf6d62013-11-01 11:02:25 -05008100 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008101
Robert Elliott2946e822015-04-23 09:35:09 -05008102clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
Robert Elliott105a3db2015-04-23 09:33:48 -05008103 hpsa_free_performant_mode(h);
8104 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8105clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06008106 hpsa_free_sg_chain_blocks(h);
Robert Elliott2946e822015-04-23 09:35:09 -05008107clean5: /* cmd, irq, shost, pci, lu, aer/h */
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05008108 hpsa_free_cmd_pool(h);
Robert Elliott2946e822015-04-23 09:35:09 -05008109clean4: /* irq, shost, pci, lu, aer/h */
Robert Elliottec501a12015-01-23 16:41:40 -06008110 hpsa_free_irqs(h);
Robert Elliott2946e822015-04-23 09:35:09 -05008111clean3: /* shost, pci, lu, aer/h */
8112 scsi_host_put(h->scsi_host);
8113 h->scsi_host = NULL;
8114clean2_5: /* pci, lu, aer/h */
Robert Elliott195f2c62015-04-23 09:33:17 -05008115 hpsa_free_pci_init(h);
Robert Elliott2946e822015-04-23 09:35:09 -05008116clean2: /* lu, aer/h */
Robert Elliott105a3db2015-04-23 09:33:48 -05008117 if (h->lockup_detected) {
Stephen M. Cameron094963d2014-05-29 10:53:18 -05008118 free_percpu(h->lockup_detected);
Robert Elliott105a3db2015-04-23 09:33:48 -05008119 h->lockup_detected = NULL;
8120 }
8121clean1: /* wq/aer/h */
8122 if (h->resubmit_wq) {
8123 destroy_workqueue(h->resubmit_wq);
8124 h->resubmit_wq = NULL;
8125 }
8126 if (h->rescan_ctlr_wq) {
8127 destroy_workqueue(h->rescan_ctlr_wq);
8128 h->rescan_ctlr_wq = NULL;
8129 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008130 kfree(h);
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06008131 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008132}
8133
8134static void hpsa_flush_cache(struct ctlr_info *h)
8135{
8136 char *flush_buf;
8137 struct CommandList *c;
Webb Scales25163bd2015-04-23 09:32:00 -05008138 int rc;
Stephen M. Cameron702890e2013-09-23 13:33:30 -05008139
Stephen M. Cameron094963d2014-05-29 10:53:18 -05008140 if (unlikely(lockup_detected(h)))
Stephen M. Cameron702890e2013-09-23 13:33:30 -05008141 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008142 flush_buf = kzalloc(4, GFP_KERNEL);
8143 if (!flush_buf)
8144 return;
8145
Stephen Cameron45fcb862015-01-23 16:43:04 -06008146 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05008147
Stephen M. Camerona2dac132013-02-20 11:24:41 -06008148 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8149 RAID_CTLR_LUNID, TYPE_CMD)) {
8150 goto out;
8151 }
Webb Scales25163bd2015-04-23 09:32:00 -05008152 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8153 PCI_DMA_TODEVICE, NO_TIMEOUT);
8154 if (rc)
8155 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008156 if (c->err_info->CommandStatus != 0)
Stephen M. Camerona2dac132013-02-20 11:24:41 -06008157out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008158 dev_warn(&h->pdev->dev,
8159 "error flushing cache on controller\n");
Stephen Cameron45fcb862015-01-23 16:43:04 -06008160 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008161 kfree(flush_buf);
8162}
8163
8164static void hpsa_shutdown(struct pci_dev *pdev)
8165{
8166 struct ctlr_info *h;
8167
8168 h = pci_get_drvdata(pdev);
8169 /* Turn board interrupts off and send the flush cache command
8170 * sendcmd will turn off interrupt, and send the flush...
8171 * To write all data in the battery backed cache to disks
8172 */
8173 hpsa_flush_cache(h);
8174 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Robert Elliott105a3db2015-04-23 09:33:48 -05008175 hpsa_free_irqs(h); /* init_one 4 */
Robert Elliottcc64c812015-04-23 09:33:12 -05008176 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008177}
8178
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008179static void hpsa_free_device_info(struct ctlr_info *h)
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06008180{
8181 int i;
8182
Robert Elliott105a3db2015-04-23 09:33:48 -05008183 for (i = 0; i < h->ndevices; i++) {
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06008184 kfree(h->dev[i]);
Robert Elliott105a3db2015-04-23 09:33:48 -05008185 h->dev[i] = NULL;
8186 }
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06008187}
8188
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008189static void hpsa_remove_one(struct pci_dev *pdev)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008190{
8191 struct ctlr_info *h;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008192 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008193
8194 if (pci_get_drvdata(pdev) == NULL) {
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008195 dev_err(&pdev->dev, "unable to remove device\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008196 return;
8197 }
8198 h = pci_get_drvdata(pdev);
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008199
8200 /* Get rid of any controller monitoring work items */
8201 spin_lock_irqsave(&h->lock, flags);
8202 h->remove_in_progress = 1;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008203 spin_unlock_irqrestore(&h->lock, flags);
Don Brace6636e7f2015-01-23 16:45:17 -06008204 cancel_delayed_work_sync(&h->monitor_ctlr_work);
8205 cancel_delayed_work_sync(&h->rescan_ctlr_work);
8206 destroy_workqueue(h->rescan_ctlr_wq);
8207 destroy_workqueue(h->resubmit_wq);
Robert Elliottcc64c812015-04-23 09:33:12 -05008208
Robert Elliott105a3db2015-04-23 09:33:48 -05008209 /* includes hpsa_free_irqs - init_one 4 */
Robert Elliott195f2c62015-04-23 09:33:17 -05008210 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008211 hpsa_shutdown(pdev);
Robert Elliottcc64c812015-04-23 09:33:12 -05008212
Robert Elliott105a3db2015-04-23 09:33:48 -05008213 hpsa_free_device_info(h); /* scan */
8214
Robert Elliott2946e822015-04-23 09:35:09 -05008215 kfree(h->hba_inquiry_data); /* init_one 10 */
8216 h->hba_inquiry_data = NULL; /* init_one 10 */
8217 if (h->scsi_host)
8218 scsi_remove_host(h->scsi_host); /* init_one 8 */
8219 hpsa_free_ioaccel2_sg_chain_blocks(h);
Robert Elliott105a3db2015-04-23 09:33:48 -05008220 hpsa_free_performant_mode(h); /* init_one 7 */
8221 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8222 hpsa_free_cmd_pool(h); /* init_one 5 */
8223
8224 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
Robert Elliott195f2c62015-04-23 09:33:17 -05008225
Robert Elliott2946e822015-04-23 09:35:09 -05008226 scsi_host_put(h->scsi_host); /* init_one 3 */
8227 h->scsi_host = NULL; /* init_one 3 */
8228
Robert Elliott195f2c62015-04-23 09:33:17 -05008229 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
Robert Elliott2946e822015-04-23 09:35:09 -05008230 hpsa_free_pci_init(h); /* init_one 2.5 */
Robert Elliott195f2c62015-04-23 09:33:17 -05008231
Robert Elliott105a3db2015-04-23 09:33:48 -05008232 free_percpu(h->lockup_detected); /* init_one 2 */
8233 h->lockup_detected = NULL; /* init_one 2 */
8234 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8235 kfree(h); /* init_one 1 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008236}
8237
8238static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8239 __attribute__((unused)) pm_message_t state)
8240{
8241 return -ENOSYS;
8242}
8243
8244static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8245{
8246 return -ENOSYS;
8247}
8248
8249static struct pci_driver hpsa_pci_driver = {
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06008250 .name = HPSA,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008251 .probe = hpsa_init_one,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008252 .remove = hpsa_remove_one,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008253 .id_table = hpsa_pci_device_id, /* id_table */
8254 .shutdown = hpsa_shutdown,
8255 .suspend = hpsa_suspend,
8256 .resume = hpsa_resume,
8257};
8258
Don Brace303932f2010-02-04 08:42:40 -06008259/* Fill in bucket_map[], given nsgs (the max number of
8260 * scatter gather elements supported) and bucket[],
8261 * which is an array of 8 integers. The bucket[] array
8262 * contains 8 different DMA transfer sizes (in 16
8263 * byte increments) which the controller uses to fetch
8264 * commands. This function fills in bucket_map[], which
8265 * maps a given number of scatter gather elements to one of
8266 * the 8 DMA transfer sizes. The point of it is to allow the
8267 * controller to only do as much DMA as needed to fetch the
8268 * command, with the DMA transfer size encoded in the lower
8269 * bits of the command address.
8270 */
8271static void calc_bucket_map(int bucket[], int num_buckets,
Don Brace2b08b3e2015-01-23 16:41:09 -06008272 int nsgs, int min_blocks, u32 *bucket_map)
Don Brace303932f2010-02-04 08:42:40 -06008273{
8274 int i, j, b, size;
8275
Don Brace303932f2010-02-04 08:42:40 -06008276 /* Note, bucket_map must have nsgs+1 entries. */
8277 for (i = 0; i <= nsgs; i++) {
8278 /* Compute size of a command with i SG entries */
Matt Gatese1f7de02014-02-18 13:55:17 -06008279 size = i + min_blocks;
Don Brace303932f2010-02-04 08:42:40 -06008280 b = num_buckets; /* Assume the biggest bucket */
8281 /* Find the bucket that is just big enough */
Matt Gatese1f7de02014-02-18 13:55:17 -06008282 for (j = 0; j < num_buckets; j++) {
Don Brace303932f2010-02-04 08:42:40 -06008283 if (bucket[j] >= size) {
8284 b = j;
8285 break;
8286 }
8287 }
8288 /* for a command with i SG entries, use bucket b. */
8289 bucket_map[i] = b;
8290 }
8291}
8292
Robert Elliott105a3db2015-04-23 09:33:48 -05008293/*
8294 * return -ENODEV on err, 0 on success (or no action)
8295 * allocates numerous items that must be freed later
8296 */
Robert Elliottc706a792015-01-23 16:45:01 -06008297static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
Don Brace303932f2010-02-04 08:42:40 -06008298{
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008299 int i;
8300 unsigned long register_value;
Matt Gatese1f7de02014-02-18 13:55:17 -06008301 unsigned long transMethod = CFGTBL_Trans_Performant |
8302 (trans_support & CFGTBL_Trans_use_short_tags) |
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008303 CFGTBL_Trans_enable_directed_msix |
8304 (trans_support & (CFGTBL_Trans_io_accel1 |
8305 CFGTBL_Trans_io_accel2));
Matt Gatese1f7de02014-02-18 13:55:17 -06008306 struct access_method access = SA5_performant_access;
Stephen M. Camerondef342b2010-05-27 15:14:39 -05008307
8308 /* This is a bit complicated. There are 8 registers on
8309 * the controller which we write to to tell it 8 different
8310 * sizes of commands which there may be. It's a way of
8311 * reducing the DMA done to fetch each command. Encoded into
8312 * each command's tag are 3 bits which communicate to the controller
8313 * which of the eight sizes that command fits within. The size of
8314 * each command depends on how many scatter gather entries there are.
8315 * Each SG entry requires 16 bytes. The eight registers are programmed
8316 * with the number of 16-byte blocks a command of that size requires.
8317 * The smallest command possible requires 5 such 16 byte blocks.
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008318 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
Stephen M. Camerondef342b2010-05-27 15:14:39 -05008319 * blocks. Note, this only extends to the SG entries contained
8320 * within the command block, and does not extend to chained blocks
8321 * of SG elements. bft[] contains the eight values we write to
8322 * the registers. They are not evenly distributed, but have more
8323 * sizes for small commands, and fewer sizes for larger commands.
8324 */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008325 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008326#define MIN_IOACCEL2_BFT_ENTRY 5
8327#define HPSA_IOACCEL2_HEADER_SZ 4
8328 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
8329 13, 14, 15, 16, 17, 18, 19,
8330 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
8331 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
8332 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
8333 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
8334 16 * MIN_IOACCEL2_BFT_ENTRY);
8335 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008336 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
Don Brace303932f2010-02-04 08:42:40 -06008337 /* 5 = 1 s/g entry or 4k
8338 * 6 = 2 s/g entry or 8k
8339 * 8 = 4 s/g entry or 16k
8340 * 10 = 6 s/g entry or 24k
8341 */
Don Brace303932f2010-02-04 08:42:40 -06008342
Stephen M. Cameronb3a52e72014-05-29 10:53:23 -05008343 /* If the controller supports either ioaccel method then
8344 * we can also use the RAID stack submit path that does not
8345 * perform the superfluous readl() after each command submission.
8346 */
8347 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
8348 access = SA5_performant_access_no_read;
8349
Don Brace303932f2010-02-04 08:42:40 -06008350 /* Controller spec: zero out this buffer. */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05008351 for (i = 0; i < h->nreply_queues; i++)
8352 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
Don Brace303932f2010-02-04 08:42:40 -06008353
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008354 bft[7] = SG_ENTRIES_IN_CMD + 4;
8355 calc_bucket_map(bft, ARRAY_SIZE(bft),
Matt Gatese1f7de02014-02-18 13:55:17 -06008356 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
Don Brace303932f2010-02-04 08:42:40 -06008357 for (i = 0; i < 8; i++)
8358 writel(bft[i], &h->transtable->BlockFetch[i]);
8359
8360 /* size of controller ring buffer */
8361 writel(h->max_commands, &h->transtable->RepQSize);
Matt Gates254f7962012-05-01 11:43:06 -05008362 writel(h->nreply_queues, &h->transtable->RepQCount);
Don Brace303932f2010-02-04 08:42:40 -06008363 writel(0, &h->transtable->RepQCtrAddrLow32);
8364 writel(0, &h->transtable->RepQCtrAddrHigh32);
Matt Gates254f7962012-05-01 11:43:06 -05008365
8366 for (i = 0; i < h->nreply_queues; i++) {
8367 writel(0, &h->transtable->RepQAddr[i].upper);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05008368 writel(h->reply_queue[i].busaddr,
Matt Gates254f7962012-05-01 11:43:06 -05008369 &h->transtable->RepQAddr[i].lower);
8370 }
8371
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008372 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Matt Gatese1f7de02014-02-18 13:55:17 -06008373 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
8374 /*
8375 * enable outbound interrupt coalescing in accelerator mode;
8376 */
8377 if (trans_support & CFGTBL_Trans_io_accel1) {
8378 access = SA5_ioaccel_mode1_access;
8379 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8380 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
Scott Teelc3497752014-02-18 13:56:34 -06008381 } else {
8382 if (trans_support & CFGTBL_Trans_io_accel2) {
8383 access = SA5_ioaccel_mode2_access;
8384 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8385 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8386 }
Matt Gatese1f7de02014-02-18 13:55:17 -06008387 }
Don Brace303932f2010-02-04 08:42:40 -06008388 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Robert Elliottc706a792015-01-23 16:45:01 -06008389 if (hpsa_wait_for_mode_change_ack(h)) {
8390 dev_err(&h->pdev->dev,
8391 "performant mode problem - doorbell timeout\n");
8392 return -ENODEV;
8393 }
Don Brace303932f2010-02-04 08:42:40 -06008394 register_value = readl(&(h->cfgtable->TransportActive));
8395 if (!(register_value & CFGTBL_Trans_Performant)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06008396 dev_err(&h->pdev->dev,
8397 "performant mode problem - transport not active\n");
Robert Elliottc706a792015-01-23 16:45:01 -06008398 return -ENODEV;
Don Brace303932f2010-02-04 08:42:40 -06008399 }
Stephen M. Cameron960a30e72011-02-15 15:33:03 -06008400 /* Change the access methods to the performant access methods */
Matt Gatese1f7de02014-02-18 13:55:17 -06008401 h->access = access;
8402 h->transMethod = transMethod;
8403
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008404 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
8405 (trans_support & CFGTBL_Trans_io_accel2)))
Robert Elliottc706a792015-01-23 16:45:01 -06008406 return 0;
Matt Gatese1f7de02014-02-18 13:55:17 -06008407
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008408 if (trans_support & CFGTBL_Trans_io_accel1) {
8409 /* Set up I/O accelerator mode */
8410 for (i = 0; i < h->nreply_queues; i++) {
8411 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
8412 h->reply_queue[i].current_entry =
8413 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
8414 }
8415 bft[7] = h->ioaccel_maxsg + 8;
8416 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
8417 h->ioaccel1_blockFetchTable);
8418
8419 /* initialize all reply queue entries to unused */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05008420 for (i = 0; i < h->nreply_queues; i++)
8421 memset(h->reply_queue[i].head,
8422 (u8) IOACCEL_MODE1_REPLY_UNUSED,
8423 h->reply_queue_size);
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008424
8425 /* set all the constant fields in the accelerator command
8426 * frames once at init time to save CPU cycles later.
8427 */
8428 for (i = 0; i < h->nr_cmds; i++) {
8429 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
8430
8431 cp->function = IOACCEL1_FUNCTION_SCSIIO;
8432 cp->err_info = (u32) (h->errinfo_pool_dhandle +
8433 (i * sizeof(struct ErrorInfo)));
8434 cp->err_info_len = sizeof(struct ErrorInfo);
8435 cp->sgl_offset = IOACCEL1_SGLOFFSET;
Don Brace2b08b3e2015-01-23 16:41:09 -06008436 cp->host_context_flags =
8437 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008438 cp->timeout_sec = 0;
8439 cp->ReplyQueue = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06008440 cp->tag =
Don Bracef2405db2015-01-23 16:43:09 -06008441 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06008442 cp->host_addr =
8443 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008444 (i * sizeof(struct io_accel1_cmd)));
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008445 }
8446 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8447 u64 cfg_offset, cfg_base_addr_index;
8448 u32 bft2_offset, cfg_base_addr;
8449 int rc;
8450
8451 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
8452 &cfg_base_addr_index, &cfg_offset);
8453 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
8454 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
8455 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
8456 4, h->ioaccel2_blockFetchTable);
8457 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
8458 BUILD_BUG_ON(offsetof(struct CfgTable,
8459 io_accel_request_size_offset) != 0xb8);
8460 h->ioaccel2_bft2_regs =
8461 remap_pci_mem(pci_resource_start(h->pdev,
8462 cfg_base_addr_index) +
8463 cfg_offset + bft2_offset,
8464 ARRAY_SIZE(bft2) *
8465 sizeof(*h->ioaccel2_bft2_regs));
8466 for (i = 0; i < ARRAY_SIZE(bft2); i++)
8467 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
Matt Gatese1f7de02014-02-18 13:55:17 -06008468 }
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008469 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Robert Elliottc706a792015-01-23 16:45:01 -06008470 if (hpsa_wait_for_mode_change_ack(h)) {
8471 dev_err(&h->pdev->dev,
8472 "performant mode problem - enabling ioaccel mode\n");
8473 return -ENODEV;
8474 }
8475 return 0;
Matt Gatese1f7de02014-02-18 13:55:17 -06008476}
8477
Robert Elliott1fb7c982015-04-23 09:33:22 -05008478/* Free ioaccel1 mode command blocks and block fetch table */
8479static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8480{
Robert Elliott105a3db2015-04-23 09:33:48 -05008481 if (h->ioaccel_cmd_pool) {
Robert Elliott1fb7c982015-04-23 09:33:22 -05008482 pci_free_consistent(h->pdev,
8483 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8484 h->ioaccel_cmd_pool,
8485 h->ioaccel_cmd_pool_dhandle);
Robert Elliott105a3db2015-04-23 09:33:48 -05008486 h->ioaccel_cmd_pool = NULL;
8487 h->ioaccel_cmd_pool_dhandle = 0;
8488 }
Robert Elliott1fb7c982015-04-23 09:33:22 -05008489 kfree(h->ioaccel1_blockFetchTable);
Robert Elliott105a3db2015-04-23 09:33:48 -05008490 h->ioaccel1_blockFetchTable = NULL;
Robert Elliott1fb7c982015-04-23 09:33:22 -05008491}
8492
Robert Elliottd37ffbe2015-04-23 09:32:27 -05008493/* Allocate ioaccel1 mode command blocks and block fetch table */
8494static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
Matt Gatese1f7de02014-02-18 13:55:17 -06008495{
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06008496 h->ioaccel_maxsg =
8497 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8498 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
8499 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
8500
Matt Gatese1f7de02014-02-18 13:55:17 -06008501 /* Command structures must be aligned on a 128-byte boundary
8502 * because the 7 lower bits of the address are used by the
8503 * hardware.
8504 */
Matt Gatese1f7de02014-02-18 13:55:17 -06008505 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
8506 IOACCEL1_COMMANDLIST_ALIGNMENT);
8507 h->ioaccel_cmd_pool =
8508 pci_alloc_consistent(h->pdev,
8509 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8510 &(h->ioaccel_cmd_pool_dhandle));
8511
8512 h->ioaccel1_blockFetchTable =
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06008513 kmalloc(((h->ioaccel_maxsg + 1) *
Matt Gatese1f7de02014-02-18 13:55:17 -06008514 sizeof(u32)), GFP_KERNEL);
8515
8516 if ((h->ioaccel_cmd_pool == NULL) ||
8517 (h->ioaccel1_blockFetchTable == NULL))
8518 goto clean_up;
8519
8520 memset(h->ioaccel_cmd_pool, 0,
8521 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
8522 return 0;
8523
8524clean_up:
Robert Elliott1fb7c982015-04-23 09:33:22 -05008525 hpsa_free_ioaccel1_cmd_and_bft(h);
Robert Elliott2dd02d72015-04-23 09:33:43 -05008526 return -ENOMEM;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008527}
8528
Robert Elliott1fb7c982015-04-23 09:33:22 -05008529/* Free ioaccel2 mode command blocks and block fetch table */
8530static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8531{
Webb Scalesd9a729f2015-04-23 09:33:27 -05008532 hpsa_free_ioaccel2_sg_chain_blocks(h);
8533
Robert Elliott105a3db2015-04-23 09:33:48 -05008534 if (h->ioaccel2_cmd_pool) {
Robert Elliott1fb7c982015-04-23 09:33:22 -05008535 pci_free_consistent(h->pdev,
8536 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8537 h->ioaccel2_cmd_pool,
8538 h->ioaccel2_cmd_pool_dhandle);
Robert Elliott105a3db2015-04-23 09:33:48 -05008539 h->ioaccel2_cmd_pool = NULL;
8540 h->ioaccel2_cmd_pool_dhandle = 0;
8541 }
Robert Elliott1fb7c982015-04-23 09:33:22 -05008542 kfree(h->ioaccel2_blockFetchTable);
Robert Elliott105a3db2015-04-23 09:33:48 -05008543 h->ioaccel2_blockFetchTable = NULL;
Robert Elliott1fb7c982015-04-23 09:33:22 -05008544}
8545
Robert Elliottd37ffbe2015-04-23 09:32:27 -05008546/* Allocate ioaccel2 mode command blocks and block fetch table */
8547static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
Stephen M. Cameronaca90122014-02-18 13:56:14 -06008548{
Webb Scalesd9a729f2015-04-23 09:33:27 -05008549 int rc;
8550
Stephen M. Cameronaca90122014-02-18 13:56:14 -06008551 /* Allocate ioaccel2 mode command blocks and block fetch table */
8552
8553 h->ioaccel_maxsg =
8554 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8555 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
8556 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
8557
Stephen M. Cameronaca90122014-02-18 13:56:14 -06008558 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
8559 IOACCEL2_COMMANDLIST_ALIGNMENT);
8560 h->ioaccel2_cmd_pool =
8561 pci_alloc_consistent(h->pdev,
8562 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8563 &(h->ioaccel2_cmd_pool_dhandle));
8564
8565 h->ioaccel2_blockFetchTable =
8566 kmalloc(((h->ioaccel_maxsg + 1) *
8567 sizeof(u32)), GFP_KERNEL);
8568
8569 if ((h->ioaccel2_cmd_pool == NULL) ||
Webb Scalesd9a729f2015-04-23 09:33:27 -05008570 (h->ioaccel2_blockFetchTable == NULL)) {
8571 rc = -ENOMEM;
8572 goto clean_up;
8573 }
8574
8575 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
8576 if (rc)
Stephen M. Cameronaca90122014-02-18 13:56:14 -06008577 goto clean_up;
8578
8579 memset(h->ioaccel2_cmd_pool, 0,
8580 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
8581 return 0;
8582
8583clean_up:
Robert Elliott1fb7c982015-04-23 09:33:22 -05008584 hpsa_free_ioaccel2_cmd_and_bft(h);
Webb Scalesd9a729f2015-04-23 09:33:27 -05008585 return rc;
Stephen M. Cameronaca90122014-02-18 13:56:14 -06008586}
8587
Robert Elliott105a3db2015-04-23 09:33:48 -05008588/* Free items allocated by hpsa_put_ctlr_into_performant_mode */
8589static void hpsa_free_performant_mode(struct ctlr_info *h)
8590{
8591 kfree(h->blockFetchTable);
8592 h->blockFetchTable = NULL;
8593 hpsa_free_reply_queues(h);
8594 hpsa_free_ioaccel1_cmd_and_bft(h);
8595 hpsa_free_ioaccel2_cmd_and_bft(h);
8596}
8597
8598/* return -ENODEV on error, 0 on success (or no action)
8599 * allocates numerous items that must be freed later
8600 */
8601static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008602{
8603 u32 trans_support;
Matt Gatese1f7de02014-02-18 13:55:17 -06008604 unsigned long transMethod = CFGTBL_Trans_Performant |
8605 CFGTBL_Trans_use_short_tags;
Robert Elliott105a3db2015-04-23 09:33:48 -05008606 int i, rc;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008607
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -06008608 if (hpsa_simple_mode)
Robert Elliott105a3db2015-04-23 09:33:48 -05008609 return 0;
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -06008610
scameron@beardog.cce.hp.com67c99a72014-04-14 14:01:09 -05008611 trans_support = readl(&(h->cfgtable->TransportSupport));
8612 if (!(trans_support & PERFORMANT_MODE))
Robert Elliott105a3db2015-04-23 09:33:48 -05008613 return 0;
scameron@beardog.cce.hp.com67c99a72014-04-14 14:01:09 -05008614
Matt Gatese1f7de02014-02-18 13:55:17 -06008615 /* Check for I/O accelerator mode support */
8616 if (trans_support & CFGTBL_Trans_io_accel1) {
8617 transMethod |= CFGTBL_Trans_io_accel1 |
8618 CFGTBL_Trans_enable_directed_msix;
Robert Elliott105a3db2015-04-23 09:33:48 -05008619 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
8620 if (rc)
8621 return rc;
8622 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8623 transMethod |= CFGTBL_Trans_io_accel2 |
Stephen M. Cameronaca90122014-02-18 13:56:14 -06008624 CFGTBL_Trans_enable_directed_msix;
Robert Elliott105a3db2015-04-23 09:33:48 -05008625 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
8626 if (rc)
8627 return rc;
Matt Gatese1f7de02014-02-18 13:55:17 -06008628 }
8629
Hannes Reineckeeee0f032014-01-15 13:30:53 +01008630 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05008631 hpsa_get_max_perf_mode_cmds(h);
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008632 /* Performant mode ring buffer and supporting data structures */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05008633 h->reply_queue_size = h->max_commands * sizeof(u64);
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008634
Matt Gates254f7962012-05-01 11:43:06 -05008635 for (i = 0; i < h->nreply_queues; i++) {
Stephen M. Cameron072b0512014-05-29 10:53:07 -05008636 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
8637 h->reply_queue_size,
8638 &(h->reply_queue[i].busaddr));
Robert Elliott105a3db2015-04-23 09:33:48 -05008639 if (!h->reply_queue[i].head) {
8640 rc = -ENOMEM;
8641 goto clean1; /* rq, ioaccel */
8642 }
Matt Gates254f7962012-05-01 11:43:06 -05008643 h->reply_queue[i].size = h->max_commands;
8644 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
8645 h->reply_queue[i].current_entry = 0;
8646 }
8647
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008648 /* Need a block fetch table for performant mode */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008649 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008650 sizeof(u32)), GFP_KERNEL);
Robert Elliott105a3db2015-04-23 09:33:48 -05008651 if (!h->blockFetchTable) {
8652 rc = -ENOMEM;
8653 goto clean1; /* rq, ioaccel */
8654 }
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008655
Robert Elliott105a3db2015-04-23 09:33:48 -05008656 rc = hpsa_enter_performant_mode(h, trans_support);
8657 if (rc)
8658 goto clean2; /* bft, rq, ioaccel */
8659 return 0;
Don Brace303932f2010-02-04 08:42:40 -06008660
Robert Elliott105a3db2015-04-23 09:33:48 -05008661clean2: /* bft, rq, ioaccel */
Don Brace303932f2010-02-04 08:42:40 -06008662 kfree(h->blockFetchTable);
Robert Elliott105a3db2015-04-23 09:33:48 -05008663 h->blockFetchTable = NULL;
8664clean1: /* rq, ioaccel */
8665 hpsa_free_reply_queues(h);
8666 hpsa_free_ioaccel1_cmd_and_bft(h);
8667 hpsa_free_ioaccel2_cmd_and_bft(h);
8668 return rc;
Don Brace303932f2010-02-04 08:42:40 -06008669}
8670
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06008671static int is_accelerated_cmd(struct CommandList *c)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008672{
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06008673 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
8674}
8675
8676static void hpsa_drain_accel_commands(struct ctlr_info *h)
8677{
8678 struct CommandList *c = NULL;
Don Bracef2405db2015-01-23 16:43:09 -06008679 int i, accel_cmds_out;
Webb Scales281a7fd2015-01-23 16:43:35 -06008680 int refcount;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008681
Don Bracef2405db2015-01-23 16:43:09 -06008682 do { /* wait for all outstanding ioaccel commands to drain out */
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06008683 accel_cmds_out = 0;
Don Bracef2405db2015-01-23 16:43:09 -06008684 for (i = 0; i < h->nr_cmds; i++) {
Don Bracef2405db2015-01-23 16:43:09 -06008685 c = h->cmd_pool + i;
Webb Scales281a7fd2015-01-23 16:43:35 -06008686 refcount = atomic_inc_return(&c->refcount);
8687 if (refcount > 1) /* Command is allocated */
8688 accel_cmds_out += is_accelerated_cmd(c);
8689 cmd_free(h, c);
Don Bracef2405db2015-01-23 16:43:09 -06008690 }
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06008691 if (accel_cmds_out <= 0)
Webb Scales281a7fd2015-01-23 16:43:35 -06008692 break;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008693 msleep(100);
8694 } while (1);
8695}
8696
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008697/*
8698 * This is it. Register the PCI driver information for the cards we control
8699 * the OS will call our registered routines when it finds one of our cards.
8700 */
8701static int __init hpsa_init(void)
8702{
Mike Miller31468402010-02-25 14:03:12 -06008703 return pci_register_driver(&hpsa_pci_driver);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008704}
8705
8706static void __exit hpsa_cleanup(void)
8707{
8708 pci_unregister_driver(&hpsa_pci_driver);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008709}
8710
Matt Gatese1f7de02014-02-18 13:55:17 -06008711static void __attribute__((unused)) verify_offsets(void)
8712{
8713#define VERIFY_OFFSET(member, offset) \
Scott Teeldd0e19f2014-02-18 13:57:31 -06008714 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
8715
8716 VERIFY_OFFSET(structure_size, 0);
8717 VERIFY_OFFSET(volume_blk_size, 4);
8718 VERIFY_OFFSET(volume_blk_cnt, 8);
8719 VERIFY_OFFSET(phys_blk_shift, 16);
8720 VERIFY_OFFSET(parity_rotation_shift, 17);
8721 VERIFY_OFFSET(strip_size, 18);
8722 VERIFY_OFFSET(disk_starting_blk, 20);
8723 VERIFY_OFFSET(disk_blk_cnt, 28);
8724 VERIFY_OFFSET(data_disks_per_row, 36);
8725 VERIFY_OFFSET(metadata_disks_per_row, 38);
8726 VERIFY_OFFSET(row_cnt, 40);
8727 VERIFY_OFFSET(layout_map_count, 42);
8728 VERIFY_OFFSET(flags, 44);
8729 VERIFY_OFFSET(dekindex, 46);
8730 /* VERIFY_OFFSET(reserved, 48 */
8731 VERIFY_OFFSET(data, 64);
8732
8733#undef VERIFY_OFFSET
8734
8735#define VERIFY_OFFSET(member, offset) \
Mike Millerb66cc252014-02-18 13:56:04 -06008736 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
8737
8738 VERIFY_OFFSET(IU_type, 0);
8739 VERIFY_OFFSET(direction, 1);
8740 VERIFY_OFFSET(reply_queue, 2);
8741 /* VERIFY_OFFSET(reserved1, 3); */
8742 VERIFY_OFFSET(scsi_nexus, 4);
8743 VERIFY_OFFSET(Tag, 8);
8744 VERIFY_OFFSET(cdb, 16);
8745 VERIFY_OFFSET(cciss_lun, 32);
8746 VERIFY_OFFSET(data_len, 40);
8747 VERIFY_OFFSET(cmd_priority_task_attr, 44);
8748 VERIFY_OFFSET(sg_count, 45);
8749 /* VERIFY_OFFSET(reserved3 */
8750 VERIFY_OFFSET(err_ptr, 48);
8751 VERIFY_OFFSET(err_len, 56);
8752 /* VERIFY_OFFSET(reserved4 */
8753 VERIFY_OFFSET(sg, 64);
8754
8755#undef VERIFY_OFFSET
8756
8757#define VERIFY_OFFSET(member, offset) \
Matt Gatese1f7de02014-02-18 13:55:17 -06008758 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
8759
8760 VERIFY_OFFSET(dev_handle, 0x00);
8761 VERIFY_OFFSET(reserved1, 0x02);
8762 VERIFY_OFFSET(function, 0x03);
8763 VERIFY_OFFSET(reserved2, 0x04);
8764 VERIFY_OFFSET(err_info, 0x0C);
8765 VERIFY_OFFSET(reserved3, 0x10);
8766 VERIFY_OFFSET(err_info_len, 0x12);
8767 VERIFY_OFFSET(reserved4, 0x13);
8768 VERIFY_OFFSET(sgl_offset, 0x14);
8769 VERIFY_OFFSET(reserved5, 0x15);
8770 VERIFY_OFFSET(transfer_len, 0x1C);
8771 VERIFY_OFFSET(reserved6, 0x20);
8772 VERIFY_OFFSET(io_flags, 0x24);
8773 VERIFY_OFFSET(reserved7, 0x26);
8774 VERIFY_OFFSET(LUN, 0x34);
8775 VERIFY_OFFSET(control, 0x3C);
8776 VERIFY_OFFSET(CDB, 0x40);
8777 VERIFY_OFFSET(reserved8, 0x50);
8778 VERIFY_OFFSET(host_context_flags, 0x60);
8779 VERIFY_OFFSET(timeout_sec, 0x62);
8780 VERIFY_OFFSET(ReplyQueue, 0x64);
8781 VERIFY_OFFSET(reserved9, 0x65);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06008782 VERIFY_OFFSET(tag, 0x68);
Matt Gatese1f7de02014-02-18 13:55:17 -06008783 VERIFY_OFFSET(host_addr, 0x70);
8784 VERIFY_OFFSET(CISS_LUN, 0x78);
8785 VERIFY_OFFSET(SG, 0x78 + 8);
8786#undef VERIFY_OFFSET
8787}
8788
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008789module_init(hpsa_init);
8790module_exit(hpsa_cleanup);