blob: 6fed6d88d7e433e231be66a20032612d315101ec [file] [log] [blame]
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001/*
2 * Disk Array driver for HP Smart Array SAS controllers
Scott Teel51c35132014-02-18 13:57:26 -06003 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
Matthew Garrette5a44df2011-11-11 11:14:23 -050026#include <linux/pci-aspm.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080027#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/fs.h>
31#include <linux/timer.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080032#include <linux/init.h>
33#include <linux/spinlock.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080034#include <linux/compat.h>
35#include <linux/blktrace_api.h>
36#include <linux/uaccess.h>
37#include <linux/io.h>
38#include <linux/dma-mapping.h>
39#include <linux/completion.h>
40#include <linux/moduleparam.h>
41#include <scsi/scsi.h>
42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_host.h>
Stephen M. Cameron667e23d2010-02-25 14:02:51 -060045#include <scsi/scsi_tcq.h>
Stephen Cameron9437ac42015-04-23 09:32:16 -050046#include <scsi/scsi_eh.h>
Webb Scales73153fe2015-04-23 09:35:04 -050047#include <scsi/scsi_dbg.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080048#include <linux/cciss_ioctl.h>
49#include <linux/string.h>
50#include <linux/bitmap.h>
Arun Sharma600634972011-07-26 16:09:06 -070051#include <linux/atomic.h>
Stephen M. Camerona0c12412011-10-26 16:22:04 -050052#include <linux/jiffies.h>
Don Brace42a91642014-11-14 17:26:27 -060053#include <linux/percpu-defs.h>
Stephen M. Cameron094963d2014-05-29 10:53:18 -050054#include <linux/percpu.h>
Don Brace2b08b3e2015-01-23 16:41:09 -060055#include <asm/unaligned.h>
Stephen M. Cameron283b4a92014-02-18 13:55:33 -060056#include <asm/div64.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080057#include "hpsa_cmd.h"
58#include "hpsa.h"
59
60/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
Stephen M. Cameron9a993302014-03-13 17:13:06 -050061#define HPSA_DRIVER_VERSION "3.4.4-1"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080062#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -060063#define HPSA "hpsa"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080064
Robert Elliott007e7aa2015-01-23 16:44:56 -060065/* How long to wait for CISS doorbell communication */
66#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
67#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
68#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
69#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
Stephen M. Cameronedd16362009-12-08 14:09:11 -080070#define MAX_IOCTL_CONFIG_WAIT 1000
71
72/*define how many times we will try a command because of bus resets */
73#define MAX_CMD_RETRIES 3
74
75/* Embedded module documentation macros - see modules.h */
76MODULE_AUTHOR("Hewlett-Packard Company");
77MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
78 HPSA_DRIVER_VERSION);
79MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
80MODULE_VERSION(HPSA_DRIVER_VERSION);
81MODULE_LICENSE("GPL");
82
83static int hpsa_allow_any;
84module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
85MODULE_PARM_DESC(hpsa_allow_any,
86 "Allow hpsa driver to access unknown HP Smart Array hardware");
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -060087static int hpsa_simple_mode;
88module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
89MODULE_PARM_DESC(hpsa_simple_mode,
90 "Use 'simple mode' rather than 'performant mode'");
Stephen M. Cameronedd16362009-12-08 14:09:11 -080091
92/* define the PCI info for the cards we can control */
93static const struct pci_device_id hpsa_pci_device_id[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -080094 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
Mike Miller163dbcd2013-09-04 15:11:10 -050099 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
Mike Millerf8b01eb2010-02-04 08:42:45 -0600101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
scameron@beardog.cce.hp.com9143a962011-03-07 10:44:16 -0600102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
Mike Millerfe0c9612012-09-20 16:05:18 -0500109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
Mike Millerfe0c9612012-09-20 16:05:18 -0500113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
Mike Miller97b9f532013-09-04 15:05:55 -0500115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
Mike Miller97b9f532013-09-04 15:05:55 -0500126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
Stephen M. Cameron8e616a52014-02-18 13:58:02 -0600134 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
135 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
136 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
137 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
138 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
Mike Miller7c03b872010-12-01 11:16:07 -0600139 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
Stephen M. Cameron6798cc02010-06-16 13:51:20 -0500140 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800141 {0,}
142};
143
144MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
145
146/* board_id = Subsystem Device ID & Vendor ID
147 * product = Marketing Name for the board
148 * access = Address of the struct of function pointers
149 */
150static struct board_type products[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800151 {0x3241103C, "Smart Array P212", &SA5_access},
152 {0x3243103C, "Smart Array P410", &SA5_access},
153 {0x3245103C, "Smart Array P410i", &SA5_access},
154 {0x3247103C, "Smart Array P411", &SA5_access},
155 {0x3249103C, "Smart Array P812", &SA5_access},
Mike Miller163dbcd2013-09-04 15:11:10 -0500156 {0x324A103C, "Smart Array P712m", &SA5_access},
157 {0x324B103C, "Smart Array P711m", &SA5_access},
Stephen M. Cameron7d2cce52014-11-14 17:26:38 -0600158 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
Mike Millerfe0c9612012-09-20 16:05:18 -0500159 {0x3350103C, "Smart Array P222", &SA5_access},
160 {0x3351103C, "Smart Array P420", &SA5_access},
161 {0x3352103C, "Smart Array P421", &SA5_access},
162 {0x3353103C, "Smart Array P822", &SA5_access},
163 {0x3354103C, "Smart Array P420i", &SA5_access},
164 {0x3355103C, "Smart Array P220i", &SA5_access},
165 {0x3356103C, "Smart Array P721m", &SA5_access},
Mike Miller1fd6c8e2013-09-04 15:08:29 -0500166 {0x1921103C, "Smart Array P830i", &SA5_access},
167 {0x1922103C, "Smart Array P430", &SA5_access},
168 {0x1923103C, "Smart Array P431", &SA5_access},
169 {0x1924103C, "Smart Array P830", &SA5_access},
170 {0x1926103C, "Smart Array P731m", &SA5_access},
171 {0x1928103C, "Smart Array P230i", &SA5_access},
172 {0x1929103C, "Smart Array P530", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600173 {0x21BD103C, "Smart Array P244br", &SA5_access},
174 {0x21BE103C, "Smart Array P741m", &SA5_access},
175 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
176 {0x21C0103C, "Smart Array P440ar", &SA5_access},
Don Bracec8ae0ab2015-01-23 16:45:12 -0600177 {0x21C1103C, "Smart Array P840ar", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600178 {0x21C2103C, "Smart Array P440", &SA5_access},
179 {0x21C3103C, "Smart Array P441", &SA5_access},
Mike Miller97b9f532013-09-04 15:05:55 -0500180 {0x21C4103C, "Smart Array", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600181 {0x21C5103C, "Smart Array P841", &SA5_access},
182 {0x21C6103C, "Smart HBA H244br", &SA5_access},
183 {0x21C7103C, "Smart HBA H240", &SA5_access},
184 {0x21C8103C, "Smart HBA H241", &SA5_access},
Mike Miller97b9f532013-09-04 15:05:55 -0500185 {0x21C9103C, "Smart Array", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600186 {0x21CA103C, "Smart Array P246br", &SA5_access},
187 {0x21CB103C, "Smart Array P840", &SA5_access},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500188 {0x21CC103C, "Smart Array", &SA5_access},
189 {0x21CD103C, "Smart Array", &SA5_access},
Don Brace27fb8132015-01-23 16:45:07 -0600190 {0x21CE103C, "Smart HBA", &SA5_access},
Stephen M. Cameron8e616a52014-02-18 13:58:02 -0600191 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
192 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
193 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
194 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
195 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800196 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
197};
198
Webb Scalesa58e7e52015-04-23 09:34:16 -0500199#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
200static const struct scsi_cmnd hpsa_cmd_busy;
201#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
202static const struct scsi_cmnd hpsa_cmd_idle;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800203static int number_of_controllers;
204
Stephen M. Cameron10f66012010-06-16 13:51:50 -0500205static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
206static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
Don Brace42a91642014-11-14 17:26:27 -0600207static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800208
209#ifdef CONFIG_COMPAT
Don Brace42a91642014-11-14 17:26:27 -0600210static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
211 void __user *arg);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800212#endif
213
214static void cmd_free(struct ctlr_info *h, struct CommandList *c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800215static struct CommandList *cmd_alloc(struct ctlr_info *h);
Webb Scales73153fe2015-04-23 09:35:04 -0500216static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
217static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
218 struct scsi_cmnd *scmd);
Stephen M. Camerona2dac132013-02-20 11:24:41 -0600219static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -0600220 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800221 int cmd_type);
Robert Elliott2c143342015-01-23 16:42:48 -0600222static void hpsa_free_cmd_pool(struct ctlr_info *h);
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -0600223#define VPD_PAGE (1 << 8)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800224
Jeff Garzikf2812332010-11-16 02:10:29 -0500225static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
Stephen M. Camerona08a8472010-02-04 08:43:16 -0600226static void hpsa_scan_start(struct Scsi_Host *);
227static int hpsa_scan_finished(struct Scsi_Host *sh,
228 unsigned long elapsed_time);
Don Brace7c0a0222015-01-23 16:41:30 -0600229static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800230
231static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500232static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800233static int hpsa_slave_alloc(struct scsi_device *sdev);
Stephen Cameron41ce4c32015-04-23 09:31:47 -0500234static int hpsa_slave_configure(struct scsi_device *sdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800235static void hpsa_slave_destroy(struct scsi_device *sdev);
236
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800237static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800238static int check_for_unit_attention(struct ctlr_info *h,
239 struct CommandList *c);
240static void check_ioctl_unit_attention(struct ctlr_info *h,
241 struct CommandList *c);
Don Brace303932f2010-02-04 08:42:40 -0600242/* performant mode helper functions */
243static void calc_bucket_map(int *bucket, int num_buckets,
Don Brace2b08b3e2015-01-23 16:41:09 -0600244 int nsgs, int min_blocks, u32 *bucket_map);
Robert Elliott105a3db2015-04-23 09:33:48 -0500245static void hpsa_free_performant_mode(struct ctlr_info *h);
246static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
Matt Gates254f7962012-05-01 11:43:06 -0500247static inline u32 next_command(struct ctlr_info *h, u8 q);
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800248static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
249 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
250 u64 *cfg_offset);
251static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
252 unsigned long *memory_bar);
253static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
254static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
255 int wait_for_ready);
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500256static inline void finish_cmd(struct CommandList *c);
Robert Elliottc706a792015-01-23 16:45:01 -0600257static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -0600258#define BOARD_NOT_READY 0
259#define BOARD_READY 1
Stephen M. Cameron23100dd2014-02-18 13:57:37 -0600260static void hpsa_drain_accel_commands(struct ctlr_info *h);
Stephen M. Cameron76438d02014-02-18 13:55:43 -0600261static void hpsa_flush_cache(struct ctlr_info *h);
Scott Teelc3497752014-02-18 13:56:34 -0600262static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
263 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -0600264 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
Don Brace080ef1c2015-01-23 16:43:25 -0600265static void hpsa_command_resubmit_worker(struct work_struct *work);
Webb Scales25163bd2015-04-23 09:32:00 -0500266static u32 lockup_detected(struct ctlr_info *h);
267static int detect_controller_lockup(struct ctlr_info *h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800268
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800269static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
270{
271 unsigned long *priv = shost_priv(sdev->host);
272 return (struct ctlr_info *) *priv;
273}
274
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600275static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
276{
277 unsigned long *priv = shost_priv(sh);
278 return (struct ctlr_info *) *priv;
279}
280
Webb Scalesa58e7e52015-04-23 09:34:16 -0500281static inline bool hpsa_is_cmd_idle(struct CommandList *c)
282{
283 return c->scsi_cmd == SCSI_CMD_IDLE;
284}
285
Stephen Cameron9437ac42015-04-23 09:32:16 -0500286/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
287static void decode_sense_data(const u8 *sense_data, int sense_data_len,
288 u8 *sense_key, u8 *asc, u8 *ascq)
289{
290 struct scsi_sense_hdr sshdr;
291 bool rc;
292
293 *sense_key = -1;
294 *asc = -1;
295 *ascq = -1;
296
297 if (sense_data_len < 1)
298 return;
299
300 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
301 if (rc) {
302 *sense_key = sshdr.sense_key;
303 *asc = sshdr.asc;
304 *ascq = sshdr.ascq;
305 }
306}
307
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800308static int check_for_unit_attention(struct ctlr_info *h,
309 struct CommandList *c)
310{
Stephen Cameron9437ac42015-04-23 09:32:16 -0500311 u8 sense_key, asc, ascq;
312 int sense_len;
313
314 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
315 sense_len = sizeof(c->err_info->SenseInfo);
316 else
317 sense_len = c->err_info->SenseLen;
318
319 decode_sense_data(c->err_info->SenseInfo, sense_len,
320 &sense_key, &asc, &ascq);
321 if (sense_key != UNIT_ATTENTION || asc == -1)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800322 return 0;
323
Stephen Cameron9437ac42015-04-23 09:32:16 -0500324 switch (asc) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800325 case STATE_CHANGED:
Stephen Cameron9437ac42015-04-23 09:32:16 -0500326 dev_warn(&h->pdev->dev,
327 HPSA "%d: a state change detected, command retried\n",
328 h->ctlr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800329 break;
330 case LUN_FAILED:
Stephen M. Cameron7f736952014-11-14 17:26:48 -0600331 dev_warn(&h->pdev->dev,
332 HPSA "%d: LUN failure detected\n", h->ctlr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800333 break;
334 case REPORT_LUNS_CHANGED:
Stephen M. Cameron7f736952014-11-14 17:26:48 -0600335 dev_warn(&h->pdev->dev,
336 HPSA "%d: report LUN data changed\n", h->ctlr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800337 /*
Scott Teel4f4eb9f2012-01-19 14:01:25 -0600338 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
339 * target (array) devices.
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800340 */
341 break;
342 case POWER_OR_RESET:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600343 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800344 "or device reset detected\n", h->ctlr);
345 break;
346 case UNIT_ATTENTION_CLEARED:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600347 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800348 "cleared by another initiator\n", h->ctlr);
349 break;
350 default:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600351 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800352 "unit attention detected\n", h->ctlr);
353 break;
354 }
355 return 1;
356}
357
Matt Bondurant852af202012-05-01 11:42:35 -0500358static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
359{
360 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
361 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
362 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
363 return 0;
364 dev_warn(&h->pdev->dev, HPSA "device busy");
365 return 1;
366}
367
Stephen Camerone985c582015-04-23 09:32:22 -0500368static u32 lockup_detected(struct ctlr_info *h);
369static ssize_t host_show_lockup_detected(struct device *dev,
370 struct device_attribute *attr, char *buf)
371{
372 int ld;
373 struct ctlr_info *h;
374 struct Scsi_Host *shost = class_to_shost(dev);
375
376 h = shost_to_hba(shost);
377 ld = lockup_detected(h);
378
379 return sprintf(buf, "ld=%d\n", ld);
380}
381
Scott Teelda0697b2014-02-18 13:57:00 -0600382static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
383 struct device_attribute *attr,
384 const char *buf, size_t count)
385{
386 int status, len;
387 struct ctlr_info *h;
388 struct Scsi_Host *shost = class_to_shost(dev);
389 char tmpbuf[10];
390
391 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
392 return -EACCES;
393 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
394 strncpy(tmpbuf, buf, len);
395 tmpbuf[len] = '\0';
396 if (sscanf(tmpbuf, "%d", &status) != 1)
397 return -EINVAL;
398 h = shost_to_hba(shost);
399 h->acciopath_status = !!status;
400 dev_warn(&h->pdev->dev,
401 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
402 h->acciopath_status ? "enabled" : "disabled");
403 return count;
404}
405
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600406static ssize_t host_store_raid_offload_debug(struct device *dev,
407 struct device_attribute *attr,
408 const char *buf, size_t count)
409{
410 int debug_level, len;
411 struct ctlr_info *h;
412 struct Scsi_Host *shost = class_to_shost(dev);
413 char tmpbuf[10];
414
415 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
416 return -EACCES;
417 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
418 strncpy(tmpbuf, buf, len);
419 tmpbuf[len] = '\0';
420 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
421 return -EINVAL;
422 if (debug_level < 0)
423 debug_level = 0;
424 h = shost_to_hba(shost);
425 h->raid_offload_debug = debug_level;
426 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
427 h->raid_offload_debug);
428 return count;
429}
430
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800431static ssize_t host_store_rescan(struct device *dev,
432 struct device_attribute *attr,
433 const char *buf, size_t count)
434{
435 struct ctlr_info *h;
436 struct Scsi_Host *shost = class_to_shost(dev);
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600437 h = shost_to_hba(shost);
Mike Miller31468402010-02-25 14:03:12 -0600438 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800439 return count;
440}
441
Stephen M. Camerond28ce022010-05-27 15:14:34 -0500442static ssize_t host_show_firmware_revision(struct device *dev,
443 struct device_attribute *attr, char *buf)
444{
445 struct ctlr_info *h;
446 struct Scsi_Host *shost = class_to_shost(dev);
447 unsigned char *fwrev;
448
449 h = shost_to_hba(shost);
450 if (!h->hba_inquiry_data)
451 return 0;
452 fwrev = &h->hba_inquiry_data[32];
453 return snprintf(buf, 20, "%c%c%c%c\n",
454 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
455}
456
Stephen M. Cameron94a13642011-01-06 14:48:39 -0600457static ssize_t host_show_commands_outstanding(struct device *dev,
458 struct device_attribute *attr, char *buf)
459{
460 struct Scsi_Host *shost = class_to_shost(dev);
461 struct ctlr_info *h = shost_to_hba(shost);
462
Stephen M. Cameron0cbf7682014-11-14 17:27:09 -0600463 return snprintf(buf, 20, "%d\n",
464 atomic_read(&h->commands_outstanding));
Stephen M. Cameron94a13642011-01-06 14:48:39 -0600465}
466
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600467static ssize_t host_show_transport_mode(struct device *dev,
468 struct device_attribute *attr, char *buf)
469{
470 struct ctlr_info *h;
471 struct Scsi_Host *shost = class_to_shost(dev);
472
473 h = shost_to_hba(shost);
474 return snprintf(buf, 20, "%s\n",
Stephen M. Cameron960a30e2011-02-15 15:33:03 -0600475 h->transMethod & CFGTBL_Trans_Performant ?
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600476 "performant" : "simple");
477}
478
Scott Teelda0697b2014-02-18 13:57:00 -0600479static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
480 struct device_attribute *attr, char *buf)
481{
482 struct ctlr_info *h;
483 struct Scsi_Host *shost = class_to_shost(dev);
484
485 h = shost_to_hba(shost);
486 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
487 (h->acciopath_status == 1) ? "enabled" : "disabled");
488}
489
Stephen M. Cameron46380782011-05-03 15:00:01 -0500490/* List of controllers which cannot be hard reset on kexec with reset_devices */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600491static u32 unresettable_controller[] = {
492 0x324a103C, /* Smart Array P712m */
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500493 0x324b103C, /* Smart Array P711m */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600494 0x3223103C, /* Smart Array P800 */
495 0x3234103C, /* Smart Array P400 */
496 0x3235103C, /* Smart Array P400i */
497 0x3211103C, /* Smart Array E200i */
498 0x3212103C, /* Smart Array E200 */
499 0x3213103C, /* Smart Array E200i */
500 0x3214103C, /* Smart Array E200i */
501 0x3215103C, /* Smart Array E200i */
502 0x3237103C, /* Smart Array E500 */
503 0x323D103C, /* Smart Array P700m */
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100504 0x40800E11, /* Smart Array 5i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600505 0x409C0E11, /* Smart Array 6400 */
506 0x409D0E11, /* Smart Array 6400 EM */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100507 0x40700E11, /* Smart Array 5300 */
508 0x40820E11, /* Smart Array 532 */
509 0x40830E11, /* Smart Array 5312 */
510 0x409A0E11, /* Smart Array 641 */
511 0x409B0E11, /* Smart Array 642 */
512 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600513};
514
Stephen M. Cameron46380782011-05-03 15:00:01 -0500515/* List of controllers which cannot even be soft reset */
516static u32 soft_unresettable_controller[] = {
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100517 0x40800E11, /* Smart Array 5i */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100518 0x40700E11, /* Smart Array 5300 */
519 0x40820E11, /* Smart Array 532 */
520 0x40830E11, /* Smart Array 5312 */
521 0x409A0E11, /* Smart Array 641 */
522 0x409B0E11, /* Smart Array 642 */
523 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron46380782011-05-03 15:00:01 -0500524 /* Exclude 640x boards. These are two pci devices in one slot
525 * which share a battery backed cache module. One controls the
526 * cache, the other accesses the cache through the one that controls
527 * it. If we reset the one controlling the cache, the other will
528 * likely not be happy. Just forbid resetting this conjoined mess.
529 * The 640x isn't really supported by hpsa anyway.
530 */
531 0x409C0E11, /* Smart Array 6400 */
532 0x409D0E11, /* Smart Array 6400 EM */
533};
534
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500535static u32 needs_abort_tags_swizzled[] = {
536 0x323D103C, /* Smart Array P700m */
537 0x324a103C, /* Smart Array P712m */
538 0x324b103C, /* SmartArray P711m */
539};
540
541static int board_id_in_array(u32 a[], int nelems, u32 board_id)
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600542{
543 int i;
544
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500545 for (i = 0; i < nelems; i++)
546 if (a[i] == board_id)
547 return 1;
548 return 0;
549}
550
551static int ctlr_is_hard_resettable(u32 board_id)
552{
553 return !board_id_in_array(unresettable_controller,
554 ARRAY_SIZE(unresettable_controller), board_id);
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600555}
556
Stephen M. Cameron46380782011-05-03 15:00:01 -0500557static int ctlr_is_soft_resettable(u32 board_id)
558{
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500559 return !board_id_in_array(soft_unresettable_controller,
560 ARRAY_SIZE(soft_unresettable_controller), board_id);
Stephen M. Cameron46380782011-05-03 15:00:01 -0500561}
562
563static int ctlr_is_resettable(u32 board_id)
564{
565 return ctlr_is_hard_resettable(board_id) ||
566 ctlr_is_soft_resettable(board_id);
567}
568
Stephen Cameron9b5c48c2015-04-23 09:32:06 -0500569static int ctlr_needs_abort_tags_swizzled(u32 board_id)
570{
571 return board_id_in_array(needs_abort_tags_swizzled,
572 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
573}
574
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600575static ssize_t host_show_resettable(struct device *dev,
576 struct device_attribute *attr, char *buf)
577{
578 struct ctlr_info *h;
579 struct Scsi_Host *shost = class_to_shost(dev);
580
581 h = shost_to_hba(shost);
Stephen M. Cameron46380782011-05-03 15:00:01 -0500582 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600583}
584
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800585static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
586{
587 return (scsi3addr[3] & 0xC0) == 0x40;
588}
589
Robert Elliottf2ef0ce2015-01-23 16:41:35 -0600590static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
591 "1(+0)ADM", "UNKNOWN"
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800592};
Scott Teel6b80b182014-02-18 13:56:55 -0600593#define HPSA_RAID_0 0
594#define HPSA_RAID_4 1
595#define HPSA_RAID_1 2 /* also used for RAID 10 */
596#define HPSA_RAID_5 3 /* also used for RAID 50 */
597#define HPSA_RAID_51 4
598#define HPSA_RAID_6 5 /* also used for RAID 60 */
599#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800600#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
601
602static ssize_t raid_level_show(struct device *dev,
603 struct device_attribute *attr, char *buf)
604{
605 ssize_t l = 0;
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600606 unsigned char rlevel;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800607 struct ctlr_info *h;
608 struct scsi_device *sdev;
609 struct hpsa_scsi_dev_t *hdev;
610 unsigned long flags;
611
612 sdev = to_scsi_device(dev);
613 h = sdev_to_hba(sdev);
614 spin_lock_irqsave(&h->lock, flags);
615 hdev = sdev->hostdata;
616 if (!hdev) {
617 spin_unlock_irqrestore(&h->lock, flags);
618 return -ENODEV;
619 }
620
621 /* Is this even a logical drive? */
622 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
623 spin_unlock_irqrestore(&h->lock, flags);
624 l = snprintf(buf, PAGE_SIZE, "N/A\n");
625 return l;
626 }
627
628 rlevel = hdev->raid_level;
629 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600630 if (rlevel > RAID_UNKNOWN)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800631 rlevel = RAID_UNKNOWN;
632 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
633 return l;
634}
635
636static ssize_t lunid_show(struct device *dev,
637 struct device_attribute *attr, char *buf)
638{
639 struct ctlr_info *h;
640 struct scsi_device *sdev;
641 struct hpsa_scsi_dev_t *hdev;
642 unsigned long flags;
643 unsigned char lunid[8];
644
645 sdev = to_scsi_device(dev);
646 h = sdev_to_hba(sdev);
647 spin_lock_irqsave(&h->lock, flags);
648 hdev = sdev->hostdata;
649 if (!hdev) {
650 spin_unlock_irqrestore(&h->lock, flags);
651 return -ENODEV;
652 }
653 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
654 spin_unlock_irqrestore(&h->lock, flags);
655 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
656 lunid[0], lunid[1], lunid[2], lunid[3],
657 lunid[4], lunid[5], lunid[6], lunid[7]);
658}
659
660static ssize_t unique_id_show(struct device *dev,
661 struct device_attribute *attr, char *buf)
662{
663 struct ctlr_info *h;
664 struct scsi_device *sdev;
665 struct hpsa_scsi_dev_t *hdev;
666 unsigned long flags;
667 unsigned char sn[16];
668
669 sdev = to_scsi_device(dev);
670 h = sdev_to_hba(sdev);
671 spin_lock_irqsave(&h->lock, flags);
672 hdev = sdev->hostdata;
673 if (!hdev) {
674 spin_unlock_irqrestore(&h->lock, flags);
675 return -ENODEV;
676 }
677 memcpy(sn, hdev->device_id, sizeof(sn));
678 spin_unlock_irqrestore(&h->lock, flags);
679 return snprintf(buf, 16 * 2 + 2,
680 "%02X%02X%02X%02X%02X%02X%02X%02X"
681 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
682 sn[0], sn[1], sn[2], sn[3],
683 sn[4], sn[5], sn[6], sn[7],
684 sn[8], sn[9], sn[10], sn[11],
685 sn[12], sn[13], sn[14], sn[15]);
686}
687
Scott Teelc1988682014-02-18 13:55:54 -0600688static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
689 struct device_attribute *attr, char *buf)
690{
691 struct ctlr_info *h;
692 struct scsi_device *sdev;
693 struct hpsa_scsi_dev_t *hdev;
694 unsigned long flags;
695 int offload_enabled;
696
697 sdev = to_scsi_device(dev);
698 h = sdev_to_hba(sdev);
699 spin_lock_irqsave(&h->lock, flags);
700 hdev = sdev->hostdata;
701 if (!hdev) {
702 spin_unlock_irqrestore(&h->lock, flags);
703 return -ENODEV;
704 }
705 offload_enabled = hdev->offload_enabled;
706 spin_unlock_irqrestore(&h->lock, flags);
707 return snprintf(buf, 20, "%d\n", offload_enabled);
708}
709
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600710static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
711static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
712static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
713static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
Scott Teelc1988682014-02-18 13:55:54 -0600714static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
715 host_show_hp_ssd_smart_path_enabled, NULL);
Scott Teelda0697b2014-02-18 13:57:00 -0600716static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
717 host_show_hp_ssd_smart_path_status,
718 host_store_hp_ssd_smart_path_status);
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600719static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
720 host_store_raid_offload_debug);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600721static DEVICE_ATTR(firmware_revision, S_IRUGO,
722 host_show_firmware_revision, NULL);
723static DEVICE_ATTR(commands_outstanding, S_IRUGO,
724 host_show_commands_outstanding, NULL);
725static DEVICE_ATTR(transport_mode, S_IRUGO,
726 host_show_transport_mode, NULL);
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600727static DEVICE_ATTR(resettable, S_IRUGO,
728 host_show_resettable, NULL);
Stephen Camerone985c582015-04-23 09:32:22 -0500729static DEVICE_ATTR(lockup_detected, S_IRUGO,
730 host_show_lockup_detected, NULL);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600731
732static struct device_attribute *hpsa_sdev_attrs[] = {
733 &dev_attr_raid_level,
734 &dev_attr_lunid,
735 &dev_attr_unique_id,
Scott Teelc1988682014-02-18 13:55:54 -0600736 &dev_attr_hp_ssd_smart_path_enabled,
Stephen Camerone985c582015-04-23 09:32:22 -0500737 &dev_attr_lockup_detected,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600738 NULL,
739};
740
741static struct device_attribute *hpsa_shost_attrs[] = {
742 &dev_attr_rescan,
743 &dev_attr_firmware_revision,
744 &dev_attr_commands_outstanding,
745 &dev_attr_transport_mode,
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600746 &dev_attr_resettable,
Scott Teelda0697b2014-02-18 13:57:00 -0600747 &dev_attr_hp_ssd_smart_path_status,
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600748 &dev_attr_raid_offload_debug,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600749 NULL,
750};
751
Stephen Cameron41ce4c32015-04-23 09:31:47 -0500752#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
753 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
754
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600755static struct scsi_host_template hpsa_driver_template = {
756 .module = THIS_MODULE,
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600757 .name = HPSA,
758 .proc_name = HPSA,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600759 .queuecommand = hpsa_scsi_queue_command,
760 .scan_start = hpsa_scan_start,
761 .scan_finished = hpsa_scan_finished,
Don Brace7c0a0222015-01-23 16:41:30 -0600762 .change_queue_depth = hpsa_change_queue_depth,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600763 .this_id = -1,
764 .use_clustering = ENABLE_CLUSTERING,
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500765 .eh_abort_handler = hpsa_eh_abort_handler,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600766 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
767 .ioctl = hpsa_ioctl,
768 .slave_alloc = hpsa_slave_alloc,
Stephen Cameron41ce4c32015-04-23 09:31:47 -0500769 .slave_configure = hpsa_slave_configure,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600770 .slave_destroy = hpsa_slave_destroy,
771#ifdef CONFIG_COMPAT
772 .compat_ioctl = hpsa_compat_ioctl,
773#endif
774 .sdev_attrs = hpsa_sdev_attrs,
775 .shost_attrs = hpsa_shost_attrs,
Stephen M. Cameronc0d6a4d2011-10-26 16:20:53 -0500776 .max_sectors = 8192,
Martin K. Petersen54b2b502013-10-23 06:25:40 -0400777 .no_write_same = 1,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600778};
779
Matt Gates254f7962012-05-01 11:43:06 -0500780static inline u32 next_command(struct ctlr_info *h, u8 q)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600781{
782 u32 a;
Stephen M. Cameron072b0512014-05-29 10:53:07 -0500783 struct reply_queue_buffer *rq = &h->reply_queue[q];
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600784
Matt Gatese1f7de02014-02-18 13:55:17 -0600785 if (h->transMethod & CFGTBL_Trans_io_accel1)
786 return h->access.command_completed(h, q);
787
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600788 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
Matt Gates254f7962012-05-01 11:43:06 -0500789 return h->access.command_completed(h, q);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600790
Matt Gates254f7962012-05-01 11:43:06 -0500791 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
792 a = rq->head[rq->current_entry];
793 rq->current_entry++;
Stephen M. Cameron0cbf7682014-11-14 17:27:09 -0600794 atomic_dec(&h->commands_outstanding);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600795 } else {
796 a = FIFO_EMPTY;
797 }
798 /* Check for wraparound */
Matt Gates254f7962012-05-01 11:43:06 -0500799 if (rq->current_entry == h->max_commands) {
800 rq->current_entry = 0;
801 rq->wraparound ^= 1;
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600802 }
803 return a;
804}
805
Scott Teelc3497752014-02-18 13:56:34 -0600806/*
807 * There are some special bits in the bus address of the
808 * command that we have to set for the controller to know
809 * how to process the command:
810 *
811 * Normal performant mode:
812 * bit 0: 1 means performant mode, 0 means simple mode.
813 * bits 1-3 = block fetch table entry
814 * bits 4-6 = command type (== 0)
815 *
816 * ioaccel1 mode:
817 * bit 0 = "performant mode" bit.
818 * bits 1-3 = block fetch table entry
819 * bits 4-6 = command type (== 110)
820 * (command type is needed because ioaccel1 mode
821 * commands are submitted through the same register as normal
822 * mode commands, so this is how the controller knows whether
823 * the command is normal mode or ioaccel1 mode.)
824 *
825 * ioaccel2 mode:
826 * bit 0 = "performant mode" bit.
827 * bits 1-4 = block fetch table entry (note extra bit)
828 * bits 4-6 = not needed, because ioaccel2 mode has
829 * a separate special register for submitting commands.
830 */
831
Webb Scales25163bd2015-04-23 09:32:00 -0500832/*
833 * set_performant_mode: Modify the tag for cciss performant
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600834 * set bit 0 for pull model, bits 3-1 for block fetch
835 * register number
836 */
Webb Scales25163bd2015-04-23 09:32:00 -0500837#define DEFAULT_REPLY_QUEUE (-1)
838static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
839 int reply_queue)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600840{
Matt Gates254f7962012-05-01 11:43:06 -0500841 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600842 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
Webb Scales25163bd2015-04-23 09:32:00 -0500843 if (unlikely(!h->msix_vector))
844 return;
845 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
Matt Gates254f7962012-05-01 11:43:06 -0500846 c->Header.ReplyQueue =
John Kacur804a5cb2013-07-26 16:06:18 +0200847 raw_smp_processor_id() % h->nreply_queues;
Webb Scales25163bd2015-04-23 09:32:00 -0500848 else
849 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
Matt Gates254f7962012-05-01 11:43:06 -0500850 }
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600851}
852
Scott Teelc3497752014-02-18 13:56:34 -0600853static void set_ioaccel1_performant_mode(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -0500854 struct CommandList *c,
855 int reply_queue)
Scott Teelc3497752014-02-18 13:56:34 -0600856{
857 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
858
Webb Scales25163bd2015-04-23 09:32:00 -0500859 /*
860 * Tell the controller to post the reply to the queue for this
Scott Teelc3497752014-02-18 13:56:34 -0600861 * processor. This seems to give the best I/O throughput.
862 */
Webb Scales25163bd2015-04-23 09:32:00 -0500863 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
864 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
865 else
866 cp->ReplyQueue = reply_queue % h->nreply_queues;
867 /*
868 * Set the bits in the address sent down to include:
Scott Teelc3497752014-02-18 13:56:34 -0600869 * - performant mode bit (bit 0)
870 * - pull count (bits 1-3)
871 * - command type (bits 4-6)
872 */
873 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
874 IOACCEL1_BUSADDR_CMDTYPE;
875}
876
Stephen Cameron8be986c2015-04-23 09:34:06 -0500877static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
878 struct CommandList *c,
879 int reply_queue)
880{
881 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
882 &h->ioaccel2_cmd_pool[c->cmdindex];
883
884 /* Tell the controller to post the reply to the queue for this
885 * processor. This seems to give the best I/O throughput.
886 */
887 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
888 cp->reply_queue = smp_processor_id() % h->nreply_queues;
889 else
890 cp->reply_queue = reply_queue % h->nreply_queues;
891 /* Set the bits in the address sent down to include:
892 * - performant mode bit not used in ioaccel mode 2
893 * - pull count (bits 0-3)
894 * - command type isn't needed for ioaccel2
895 */
896 c->busaddr |= h->ioaccel2_blockFetchTable[0];
897}
898
Scott Teelc3497752014-02-18 13:56:34 -0600899static void set_ioaccel2_performant_mode(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -0500900 struct CommandList *c,
901 int reply_queue)
Scott Teelc3497752014-02-18 13:56:34 -0600902{
903 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
904
Webb Scales25163bd2015-04-23 09:32:00 -0500905 /*
906 * Tell the controller to post the reply to the queue for this
Scott Teelc3497752014-02-18 13:56:34 -0600907 * processor. This seems to give the best I/O throughput.
908 */
Webb Scales25163bd2015-04-23 09:32:00 -0500909 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
910 cp->reply_queue = smp_processor_id() % h->nreply_queues;
911 else
912 cp->reply_queue = reply_queue % h->nreply_queues;
913 /*
914 * Set the bits in the address sent down to include:
Scott Teelc3497752014-02-18 13:56:34 -0600915 * - performant mode bit not used in ioaccel mode 2
916 * - pull count (bits 0-3)
917 * - command type isn't needed for ioaccel2
918 */
919 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
920}
921
Stephen M. Camerone85c5972012-05-01 11:43:42 -0500922static int is_firmware_flash_cmd(u8 *cdb)
923{
924 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
925}
926
927/*
928 * During firmware flash, the heartbeat register may not update as frequently
929 * as it should. So we dial down lockup detection during firmware flash. and
930 * dial it back up when firmware flash completes.
931 */
932#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
933#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
934static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
935 struct CommandList *c)
936{
937 if (!is_firmware_flash_cmd(c->Request.CDB))
938 return;
939 atomic_inc(&h->firmware_flash_in_progress);
940 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
941}
942
943static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
944 struct CommandList *c)
945{
946 if (is_firmware_flash_cmd(c->Request.CDB) &&
947 atomic_dec_and_test(&h->firmware_flash_in_progress))
948 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
949}
950
Webb Scales25163bd2015-04-23 09:32:00 -0500951static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
952 struct CommandList *c, int reply_queue)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600953{
Stephen Cameronc05e8862015-01-23 16:44:40 -0600954 dial_down_lockup_detection_during_fw_flash(h, c);
955 atomic_inc(&h->commands_outstanding);
Scott Teelc3497752014-02-18 13:56:34 -0600956 switch (c->cmd_type) {
957 case CMD_IOACCEL1:
Webb Scales25163bd2015-04-23 09:32:00 -0500958 set_ioaccel1_performant_mode(h, c, reply_queue);
Stephen Cameronc05e8862015-01-23 16:44:40 -0600959 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
Scott Teelc3497752014-02-18 13:56:34 -0600960 break;
961 case CMD_IOACCEL2:
Webb Scales25163bd2015-04-23 09:32:00 -0500962 set_ioaccel2_performant_mode(h, c, reply_queue);
Stephen Cameronc05e8862015-01-23 16:44:40 -0600963 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
Scott Teelc3497752014-02-18 13:56:34 -0600964 break;
Stephen Cameron8be986c2015-04-23 09:34:06 -0500965 case IOACCEL2_TMF:
966 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
967 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
968 break;
Scott Teelc3497752014-02-18 13:56:34 -0600969 default:
Webb Scales25163bd2015-04-23 09:32:00 -0500970 set_performant_mode(h, c, reply_queue);
Stephen Cameronc05e8862015-01-23 16:44:40 -0600971 h->access.submit_command(h, c);
Scott Teelc3497752014-02-18 13:56:34 -0600972 }
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600973}
974
Webb Scalesa58e7e52015-04-23 09:34:16 -0500975static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
Webb Scales25163bd2015-04-23 09:32:00 -0500976{
Webb Scalesa58e7e52015-04-23 09:34:16 -0500977 if (unlikely(c->abort_pending))
978 return finish_cmd(c);
979
Webb Scales25163bd2015-04-23 09:32:00 -0500980 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
981}
982
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600983static inline int is_hba_lunid(unsigned char scsi3addr[])
984{
985 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
986}
987
988static inline int is_scsi_rev_5(struct ctlr_info *h)
989{
990 if (!h->hba_inquiry_data)
991 return 0;
992 if ((h->hba_inquiry_data[2] & 0x07) == 5)
993 return 1;
994 return 0;
995}
996
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800997static int hpsa_find_target_lun(struct ctlr_info *h,
998 unsigned char scsi3addr[], int bus, int *target, int *lun)
999{
1000 /* finds an unused bus, target, lun for a new physical device
1001 * assumes h->devlock is held
1002 */
1003 int i, found = 0;
Scott Teelcfe5bad2011-10-26 16:21:07 -05001004 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001005
Akinobu Mita263d9402012-01-21 00:15:27 +09001006 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001007
1008 for (i = 0; i < h->ndevices; i++) {
1009 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
Akinobu Mita263d9402012-01-21 00:15:27 +09001010 __set_bit(h->dev[i]->target, lun_taken);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001011 }
1012
Akinobu Mita263d9402012-01-21 00:15:27 +09001013 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1014 if (i < HPSA_MAX_DEVICES) {
1015 /* *bus = 1; */
1016 *target = i;
1017 *lun = 0;
1018 found = 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001019 }
1020 return !found;
1021}
1022
Webb Scales0d96ef52015-04-23 09:31:55 -05001023static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1024 struct hpsa_scsi_dev_t *dev, char *description)
1025{
1026 dev_printk(level, &h->pdev->dev,
1027 "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
1028 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1029 description,
1030 scsi_device_type(dev->devtype),
1031 dev->vendor,
1032 dev->model,
1033 dev->raid_level > RAID_UNKNOWN ?
1034 "RAID-?" : raid_label[dev->raid_level],
1035 dev->offload_config ? '+' : '-',
1036 dev->offload_enabled ? '+' : '-',
1037 dev->expose_state);
1038}
1039
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001040/* Add an entry into h->dev[] array. */
1041static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
1042 struct hpsa_scsi_dev_t *device,
1043 struct hpsa_scsi_dev_t *added[], int *nadded)
1044{
1045 /* assumes h->devlock is held */
1046 int n = h->ndevices;
1047 int i;
1048 unsigned char addr1[8], addr2[8];
1049 struct hpsa_scsi_dev_t *sd;
1050
Scott Teelcfe5bad2011-10-26 16:21:07 -05001051 if (n >= HPSA_MAX_DEVICES) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001052 dev_err(&h->pdev->dev, "too many devices, some will be "
1053 "inaccessible.\n");
1054 return -1;
1055 }
1056
1057 /* physical devices do not have lun or target assigned until now. */
1058 if (device->lun != -1)
1059 /* Logical device, lun is already assigned. */
1060 goto lun_assigned;
1061
1062 /* If this device a non-zero lun of a multi-lun device
1063 * byte 4 of the 8-byte LUN addr will contain the logical
Don Brace2b08b3e2015-01-23 16:41:09 -06001064 * unit no, zero otherwise.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001065 */
1066 if (device->scsi3addr[4] == 0) {
1067 /* This is not a non-zero lun of a multi-lun device */
1068 if (hpsa_find_target_lun(h, device->scsi3addr,
1069 device->bus, &device->target, &device->lun) != 0)
1070 return -1;
1071 goto lun_assigned;
1072 }
1073
1074 /* This is a non-zero lun of a multi-lun device.
1075 * Search through our list and find the device which
1076 * has the same 8 byte LUN address, excepting byte 4.
1077 * Assign the same bus and target for this new LUN.
1078 * Use the logical unit number from the firmware.
1079 */
1080 memcpy(addr1, device->scsi3addr, 8);
1081 addr1[4] = 0;
1082 for (i = 0; i < n; i++) {
1083 sd = h->dev[i];
1084 memcpy(addr2, sd->scsi3addr, 8);
1085 addr2[4] = 0;
1086 /* differ only in byte 4? */
1087 if (memcmp(addr1, addr2, 8) == 0) {
1088 device->bus = sd->bus;
1089 device->target = sd->target;
1090 device->lun = device->scsi3addr[4];
1091 break;
1092 }
1093 }
1094 if (device->lun == -1) {
1095 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1096 " suspect firmware bug or unsupported hardware "
1097 "configuration.\n");
1098 return -1;
1099 }
1100
1101lun_assigned:
1102
1103 h->dev[n] = device;
1104 h->ndevices++;
1105 added[*nadded] = device;
1106 (*nadded)++;
Webb Scales0d96ef52015-04-23 09:31:55 -05001107 hpsa_show_dev_msg(KERN_INFO, h, device,
1108 device->expose_state & HPSA_SCSI_ADD ? "added" : "masked");
Robert Elliotta473d862015-04-23 09:32:54 -05001109 device->offload_to_be_enabled = device->offload_enabled;
1110 device->offload_enabled = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001111 return 0;
1112}
1113
Scott Teelbd9244f2012-01-19 14:01:30 -06001114/* Update an entry in h->dev[] array. */
1115static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
1116 int entry, struct hpsa_scsi_dev_t *new_entry)
1117{
Robert Elliotta473d862015-04-23 09:32:54 -05001118 int offload_enabled;
Scott Teelbd9244f2012-01-19 14:01:30 -06001119 /* assumes h->devlock is held */
1120 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1121
1122 /* Raid level changed. */
1123 h->dev[entry]->raid_level = new_entry->raid_level;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001124
Don Brace03383732015-01-23 16:43:30 -06001125 /* Raid offload parameters changed. Careful about the ordering. */
1126 if (new_entry->offload_config && new_entry->offload_enabled) {
1127 /*
1128 * if drive is newly offload_enabled, we want to copy the
1129 * raid map data first. If previously offload_enabled and
1130 * offload_config were set, raid map data had better be
1131 * the same as it was before. if raid map data is changed
1132 * then it had better be the case that
1133 * h->dev[entry]->offload_enabled is currently 0.
1134 */
1135 h->dev[entry]->raid_map = new_entry->raid_map;
1136 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
Don Brace03383732015-01-23 16:43:30 -06001137 }
Joe Handzika3144e02015-04-23 09:32:59 -05001138 if (new_entry->hba_ioaccel_enabled) {
1139 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1140 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1141 }
1142 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001143 h->dev[entry]->offload_config = new_entry->offload_config;
Stephen M. Cameron9fb0de22014-02-18 13:56:50 -06001144 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
Don Brace03383732015-01-23 16:43:30 -06001145 h->dev[entry]->queue_depth = new_entry->queue_depth;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001146
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001147 /*
1148 * We can turn off ioaccel offload now, but need to delay turning
1149 * it on until we can update h->dev[entry]->phys_disk[], but we
1150 * can't do that until all the devices are updated.
1151 */
1152 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1153 if (!new_entry->offload_enabled)
1154 h->dev[entry]->offload_enabled = 0;
1155
Robert Elliotta473d862015-04-23 09:32:54 -05001156 offload_enabled = h->dev[entry]->offload_enabled;
1157 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
Webb Scales0d96ef52015-04-23 09:31:55 -05001158 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
Robert Elliotta473d862015-04-23 09:32:54 -05001159 h->dev[entry]->offload_enabled = offload_enabled;
Scott Teelbd9244f2012-01-19 14:01:30 -06001160}
1161
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001162/* Replace an entry from h->dev[] array. */
1163static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1164 int entry, struct hpsa_scsi_dev_t *new_entry,
1165 struct hpsa_scsi_dev_t *added[], int *nadded,
1166 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1167{
1168 /* assumes h->devlock is held */
Scott Teelcfe5bad2011-10-26 16:21:07 -05001169 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001170 removed[*nremoved] = h->dev[entry];
1171 (*nremoved)++;
Stephen M. Cameron01350d02011-08-09 08:18:01 -05001172
1173 /*
1174 * New physical devices won't have target/lun assigned yet
1175 * so we need to preserve the values in the slot we are replacing.
1176 */
1177 if (new_entry->target == -1) {
1178 new_entry->target = h->dev[entry]->target;
1179 new_entry->lun = h->dev[entry]->lun;
1180 }
1181
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001182 h->dev[entry] = new_entry;
1183 added[*nadded] = new_entry;
1184 (*nadded)++;
Webb Scales0d96ef52015-04-23 09:31:55 -05001185 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
Robert Elliotta473d862015-04-23 09:32:54 -05001186 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1187 new_entry->offload_enabled = 0;
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001188}
1189
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001190/* Remove an entry from h->dev[] array. */
1191static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1192 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1193{
1194 /* assumes h->devlock is held */
1195 int i;
1196 struct hpsa_scsi_dev_t *sd;
1197
Scott Teelcfe5bad2011-10-26 16:21:07 -05001198 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001199
1200 sd = h->dev[entry];
1201 removed[*nremoved] = h->dev[entry];
1202 (*nremoved)++;
1203
1204 for (i = entry; i < h->ndevices-1; i++)
1205 h->dev[i] = h->dev[i+1];
1206 h->ndevices--;
Webb Scales0d96ef52015-04-23 09:31:55 -05001207 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001208}
1209
1210#define SCSI3ADDR_EQ(a, b) ( \
1211 (a)[7] == (b)[7] && \
1212 (a)[6] == (b)[6] && \
1213 (a)[5] == (b)[5] && \
1214 (a)[4] == (b)[4] && \
1215 (a)[3] == (b)[3] && \
1216 (a)[2] == (b)[2] && \
1217 (a)[1] == (b)[1] && \
1218 (a)[0] == (b)[0])
1219
1220static void fixup_botched_add(struct ctlr_info *h,
1221 struct hpsa_scsi_dev_t *added)
1222{
1223 /* called when scsi_add_device fails in order to re-adjust
1224 * h->dev[] to match the mid layer's view.
1225 */
1226 unsigned long flags;
1227 int i, j;
1228
1229 spin_lock_irqsave(&h->lock, flags);
1230 for (i = 0; i < h->ndevices; i++) {
1231 if (h->dev[i] == added) {
1232 for (j = i; j < h->ndevices-1; j++)
1233 h->dev[j] = h->dev[j+1];
1234 h->ndevices--;
1235 break;
1236 }
1237 }
1238 spin_unlock_irqrestore(&h->lock, flags);
1239 kfree(added);
1240}
1241
1242static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1243 struct hpsa_scsi_dev_t *dev2)
1244{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001245 /* we compare everything except lun and target as these
1246 * are not yet assigned. Compare parts likely
1247 * to differ first
1248 */
1249 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1250 sizeof(dev1->scsi3addr)) != 0)
1251 return 0;
1252 if (memcmp(dev1->device_id, dev2->device_id,
1253 sizeof(dev1->device_id)) != 0)
1254 return 0;
1255 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1256 return 0;
1257 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1258 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001259 if (dev1->devtype != dev2->devtype)
1260 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001261 if (dev1->bus != dev2->bus)
1262 return 0;
1263 return 1;
1264}
1265
Scott Teelbd9244f2012-01-19 14:01:30 -06001266static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1267 struct hpsa_scsi_dev_t *dev2)
1268{
1269 /* Device attributes that can change, but don't mean
1270 * that the device is a different device, nor that the OS
1271 * needs to be told anything about the change.
1272 */
1273 if (dev1->raid_level != dev2->raid_level)
1274 return 1;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001275 if (dev1->offload_config != dev2->offload_config)
1276 return 1;
1277 if (dev1->offload_enabled != dev2->offload_enabled)
1278 return 1;
Don Brace03383732015-01-23 16:43:30 -06001279 if (dev1->queue_depth != dev2->queue_depth)
1280 return 1;
Scott Teelbd9244f2012-01-19 14:01:30 -06001281 return 0;
1282}
1283
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001284/* Find needle in haystack. If exact match found, return DEVICE_SAME,
1285 * and return needle location in *index. If scsi3addr matches, but not
1286 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
Scott Teelbd9244f2012-01-19 14:01:30 -06001287 * location in *index.
1288 * In the case of a minor device attribute change, such as RAID level, just
1289 * return DEVICE_UPDATED, along with the updated device's location in index.
1290 * If needle not found, return DEVICE_NOT_FOUND.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001291 */
1292static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1293 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1294 int *index)
1295{
1296 int i;
1297#define DEVICE_NOT_FOUND 0
1298#define DEVICE_CHANGED 1
1299#define DEVICE_SAME 2
Scott Teelbd9244f2012-01-19 14:01:30 -06001300#define DEVICE_UPDATED 3
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001301 for (i = 0; i < haystack_size; i++) {
Stephen M. Cameron23231042010-02-04 08:43:36 -06001302 if (haystack[i] == NULL) /* previously removed. */
1303 continue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001304 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1305 *index = i;
Scott Teelbd9244f2012-01-19 14:01:30 -06001306 if (device_is_the_same(needle, haystack[i])) {
1307 if (device_updated(needle, haystack[i]))
1308 return DEVICE_UPDATED;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001309 return DEVICE_SAME;
Scott Teelbd9244f2012-01-19 14:01:30 -06001310 } else {
Stephen M. Cameron98465902014-02-21 16:25:00 -06001311 /* Keep offline devices offline */
1312 if (needle->volume_offline)
1313 return DEVICE_NOT_FOUND;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001314 return DEVICE_CHANGED;
Scott Teelbd9244f2012-01-19 14:01:30 -06001315 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001316 }
1317 }
1318 *index = -1;
1319 return DEVICE_NOT_FOUND;
1320}
1321
Stephen M. Cameron98465902014-02-21 16:25:00 -06001322static void hpsa_monitor_offline_device(struct ctlr_info *h,
1323 unsigned char scsi3addr[])
1324{
1325 struct offline_device_entry *device;
1326 unsigned long flags;
1327
1328 /* Check to see if device is already on the list */
1329 spin_lock_irqsave(&h->offline_device_lock, flags);
1330 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1331 if (memcmp(device->scsi3addr, scsi3addr,
1332 sizeof(device->scsi3addr)) == 0) {
1333 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1334 return;
1335 }
1336 }
1337 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1338
1339 /* Device is not on the list, add it. */
1340 device = kmalloc(sizeof(*device), GFP_KERNEL);
1341 if (!device) {
1342 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1343 return;
1344 }
1345 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1346 spin_lock_irqsave(&h->offline_device_lock, flags);
1347 list_add_tail(&device->offline_list, &h->offline_device_list);
1348 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1349}
1350
1351/* Print a message explaining various offline volume states */
1352static void hpsa_show_volume_status(struct ctlr_info *h,
1353 struct hpsa_scsi_dev_t *sd)
1354{
1355 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1356 dev_info(&h->pdev->dev,
1357 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1358 h->scsi_host->host_no,
1359 sd->bus, sd->target, sd->lun);
1360 switch (sd->volume_offline) {
1361 case HPSA_LV_OK:
1362 break;
1363 case HPSA_LV_UNDERGOING_ERASE:
1364 dev_info(&h->pdev->dev,
1365 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1366 h->scsi_host->host_no,
1367 sd->bus, sd->target, sd->lun);
1368 break;
1369 case HPSA_LV_UNDERGOING_RPI:
1370 dev_info(&h->pdev->dev,
1371 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1372 h->scsi_host->host_no,
1373 sd->bus, sd->target, sd->lun);
1374 break;
1375 case HPSA_LV_PENDING_RPI:
1376 dev_info(&h->pdev->dev,
1377 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1378 h->scsi_host->host_no,
1379 sd->bus, sd->target, sd->lun);
1380 break;
1381 case HPSA_LV_ENCRYPTED_NO_KEY:
1382 dev_info(&h->pdev->dev,
1383 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1384 h->scsi_host->host_no,
1385 sd->bus, sd->target, sd->lun);
1386 break;
1387 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1388 dev_info(&h->pdev->dev,
1389 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1390 h->scsi_host->host_no,
1391 sd->bus, sd->target, sd->lun);
1392 break;
1393 case HPSA_LV_UNDERGOING_ENCRYPTION:
1394 dev_info(&h->pdev->dev,
1395 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1396 h->scsi_host->host_no,
1397 sd->bus, sd->target, sd->lun);
1398 break;
1399 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1400 dev_info(&h->pdev->dev,
1401 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1402 h->scsi_host->host_no,
1403 sd->bus, sd->target, sd->lun);
1404 break;
1405 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1406 dev_info(&h->pdev->dev,
1407 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1408 h->scsi_host->host_no,
1409 sd->bus, sd->target, sd->lun);
1410 break;
1411 case HPSA_LV_PENDING_ENCRYPTION:
1412 dev_info(&h->pdev->dev,
1413 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1414 h->scsi_host->host_no,
1415 sd->bus, sd->target, sd->lun);
1416 break;
1417 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1418 dev_info(&h->pdev->dev,
1419 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1420 h->scsi_host->host_no,
1421 sd->bus, sd->target, sd->lun);
1422 break;
1423 }
1424}
1425
Don Brace03383732015-01-23 16:43:30 -06001426/*
1427 * Figure the list of physical drive pointers for a logical drive with
1428 * raid offload configured.
1429 */
1430static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1431 struct hpsa_scsi_dev_t *dev[], int ndevices,
1432 struct hpsa_scsi_dev_t *logical_drive)
1433{
1434 struct raid_map_data *map = &logical_drive->raid_map;
1435 struct raid_map_disk_data *dd = &map->data[0];
1436 int i, j;
1437 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1438 le16_to_cpu(map->metadata_disks_per_row);
1439 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1440 le16_to_cpu(map->layout_map_count) *
1441 total_disks_per_row;
1442 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1443 total_disks_per_row;
1444 int qdepth;
1445
1446 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1447 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1448
1449 qdepth = 0;
1450 for (i = 0; i < nraid_map_entries; i++) {
1451 logical_drive->phys_disk[i] = NULL;
1452 if (!logical_drive->offload_config)
1453 continue;
1454 for (j = 0; j < ndevices; j++) {
1455 if (dev[j]->devtype != TYPE_DISK)
1456 continue;
1457 if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1458 continue;
1459 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1460 continue;
1461
1462 logical_drive->phys_disk[i] = dev[j];
1463 if (i < nphys_disk)
1464 qdepth = min(h->nr_cmds, qdepth +
1465 logical_drive->phys_disk[i]->queue_depth);
1466 break;
1467 }
1468
1469 /*
1470 * This can happen if a physical drive is removed and
1471 * the logical drive is degraded. In that case, the RAID
1472 * map data will refer to a physical disk which isn't actually
1473 * present. And in that case offload_enabled should already
1474 * be 0, but we'll turn it off here just in case
1475 */
1476 if (!logical_drive->phys_disk[i]) {
1477 logical_drive->offload_enabled = 0;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001478 logical_drive->offload_to_be_enabled = 0;
1479 logical_drive->queue_depth = 8;
Don Brace03383732015-01-23 16:43:30 -06001480 }
1481 }
1482 if (nraid_map_entries)
1483 /*
1484 * This is correct for reads, too high for full stripe writes,
1485 * way too high for partial stripe writes
1486 */
1487 logical_drive->queue_depth = qdepth;
1488 else
1489 logical_drive->queue_depth = h->nr_cmds;
1490}
1491
1492static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1493 struct hpsa_scsi_dev_t *dev[], int ndevices)
1494{
1495 int i;
1496
1497 for (i = 0; i < ndevices; i++) {
1498 if (dev[i]->devtype != TYPE_DISK)
1499 continue;
1500 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1501 continue;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001502
1503 /*
1504 * If offload is currently enabled, the RAID map and
1505 * phys_disk[] assignment *better* not be changing
1506 * and since it isn't changing, we do not need to
1507 * update it.
1508 */
1509 if (dev[i]->offload_enabled)
1510 continue;
1511
Don Brace03383732015-01-23 16:43:30 -06001512 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1513 }
1514}
1515
Stephen M. Cameron4967bd32010-02-04 08:41:49 -06001516static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001517 struct hpsa_scsi_dev_t *sd[], int nsds)
1518{
1519 /* sd contains scsi3 addresses and devtypes, and inquiry
1520 * data. This function takes what's in sd to be the current
1521 * reality and updates h->dev[] to reflect that reality.
1522 */
1523 int i, entry, device_change, changes = 0;
1524 struct hpsa_scsi_dev_t *csd;
1525 unsigned long flags;
1526 struct hpsa_scsi_dev_t **added, **removed;
1527 int nadded, nremoved;
1528 struct Scsi_Host *sh = NULL;
1529
Scott Teelcfe5bad2011-10-26 16:21:07 -05001530 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1531 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001532
1533 if (!added || !removed) {
1534 dev_warn(&h->pdev->dev, "out of memory in "
1535 "adjust_hpsa_scsi_table\n");
1536 goto free_and_out;
1537 }
1538
1539 spin_lock_irqsave(&h->devlock, flags);
1540
1541 /* find any devices in h->dev[] that are not in
1542 * sd[] and remove them from h->dev[], and for any
1543 * devices which have changed, remove the old device
1544 * info and add the new device info.
Scott Teelbd9244f2012-01-19 14:01:30 -06001545 * If minor device attributes change, just update
1546 * the existing device structure.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001547 */
1548 i = 0;
1549 nremoved = 0;
1550 nadded = 0;
1551 while (i < h->ndevices) {
1552 csd = h->dev[i];
1553 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1554 if (device_change == DEVICE_NOT_FOUND) {
1555 changes++;
1556 hpsa_scsi_remove_entry(h, hostno, i,
1557 removed, &nremoved);
1558 continue; /* remove ^^^, hence i not incremented */
1559 } else if (device_change == DEVICE_CHANGED) {
1560 changes++;
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001561 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1562 added, &nadded, removed, &nremoved);
Stephen M. Cameronc7f172d2010-02-04 08:43:31 -06001563 /* Set it to NULL to prevent it from being freed
1564 * at the bottom of hpsa_update_scsi_devices()
1565 */
1566 sd[entry] = NULL;
Scott Teelbd9244f2012-01-19 14:01:30 -06001567 } else if (device_change == DEVICE_UPDATED) {
1568 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001569 }
1570 i++;
1571 }
1572
1573 /* Now, make sure every device listed in sd[] is also
1574 * listed in h->dev[], adding them if they aren't found
1575 */
1576
1577 for (i = 0; i < nsds; i++) {
1578 if (!sd[i]) /* if already added above. */
1579 continue;
Stephen M. Cameron98465902014-02-21 16:25:00 -06001580
1581 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1582 * as the SCSI mid-layer does not handle such devices well.
1583 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1584 * at 160Hz, and prevents the system from coming up.
1585 */
1586 if (sd[i]->volume_offline) {
1587 hpsa_show_volume_status(h, sd[i]);
Webb Scales0d96ef52015-04-23 09:31:55 -05001588 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
Stephen M. Cameron98465902014-02-21 16:25:00 -06001589 continue;
1590 }
1591
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001592 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1593 h->ndevices, &entry);
1594 if (device_change == DEVICE_NOT_FOUND) {
1595 changes++;
1596 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1597 added, &nadded) != 0)
1598 break;
1599 sd[i] = NULL; /* prevent from being freed later. */
1600 } else if (device_change == DEVICE_CHANGED) {
1601 /* should never happen... */
1602 changes++;
1603 dev_warn(&h->pdev->dev,
1604 "device unexpectedly changed.\n");
1605 /* but if it does happen, we just ignore that device */
1606 }
1607 }
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001608 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1609
1610 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1611 * any logical drives that need it enabled.
1612 */
1613 for (i = 0; i < h->ndevices; i++)
1614 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1615
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001616 spin_unlock_irqrestore(&h->devlock, flags);
1617
Stephen M. Cameron98465902014-02-21 16:25:00 -06001618 /* Monitor devices which are in one of several NOT READY states to be
1619 * brought online later. This must be done without holding h->devlock,
1620 * so don't touch h->dev[]
1621 */
1622 for (i = 0; i < nsds; i++) {
1623 if (!sd[i]) /* if already added above. */
1624 continue;
1625 if (sd[i]->volume_offline)
1626 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1627 }
1628
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001629 /* Don't notify scsi mid layer of any changes the first time through
1630 * (or if there are no changes) scsi_scan_host will do it later the
1631 * first time through.
1632 */
1633 if (hostno == -1 || !changes)
1634 goto free_and_out;
1635
1636 sh = h->scsi_host;
1637 /* Notify scsi mid layer of any removed devices */
1638 for (i = 0; i < nremoved; i++) {
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001639 if (removed[i]->expose_state & HPSA_SCSI_ADD) {
1640 struct scsi_device *sdev =
1641 scsi_device_lookup(sh, removed[i]->bus,
1642 removed[i]->target, removed[i]->lun);
1643 if (sdev != NULL) {
1644 scsi_remove_device(sdev);
1645 scsi_device_put(sdev);
1646 } else {
1647 /*
1648 * We don't expect to get here.
1649 * future cmds to this device will get selection
1650 * timeout as if the device was gone.
1651 */
Webb Scales0d96ef52015-04-23 09:31:55 -05001652 hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
1653 "didn't find device for removal.");
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001654 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001655 }
1656 kfree(removed[i]);
1657 removed[i] = NULL;
1658 }
1659
1660 /* Notify scsi mid layer of any added devices */
1661 for (i = 0; i < nadded; i++) {
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001662 if (!(added[i]->expose_state & HPSA_SCSI_ADD))
1663 continue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001664 if (scsi_add_device(sh, added[i]->bus,
1665 added[i]->target, added[i]->lun) == 0)
1666 continue;
Webb Scales0d96ef52015-04-23 09:31:55 -05001667 hpsa_show_dev_msg(KERN_WARNING, h, added[i],
1668 "addition failed, device not added.");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001669 /* now we have to remove it from h->dev,
1670 * since it didn't get added to scsi mid layer
1671 */
1672 fixup_botched_add(h, added[i]);
Robert Elliott105a3db2015-04-23 09:33:48 -05001673 added[i] = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001674 }
1675
1676free_and_out:
1677 kfree(added);
1678 kfree(removed);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001679}
1680
1681/*
Joe Perches9e03aa22013-09-03 13:45:58 -07001682 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001683 * Assume's h->devlock is held.
1684 */
1685static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1686 int bus, int target, int lun)
1687{
1688 int i;
1689 struct hpsa_scsi_dev_t *sd;
1690
1691 for (i = 0; i < h->ndevices; i++) {
1692 sd = h->dev[i];
1693 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1694 return sd;
1695 }
1696 return NULL;
1697}
1698
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001699static int hpsa_slave_alloc(struct scsi_device *sdev)
1700{
1701 struct hpsa_scsi_dev_t *sd;
1702 unsigned long flags;
1703 struct ctlr_info *h;
1704
1705 h = sdev_to_hba(sdev);
1706 spin_lock_irqsave(&h->devlock, flags);
1707 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1708 sdev_id(sdev), sdev->lun);
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001709 if (likely(sd)) {
Don Brace03383732015-01-23 16:43:30 -06001710 atomic_set(&sd->ioaccel_cmds_out, 0);
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001711 sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL;
1712 } else
1713 sdev->hostdata = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001714 spin_unlock_irqrestore(&h->devlock, flags);
1715 return 0;
1716}
1717
Stephen Cameron41ce4c32015-04-23 09:31:47 -05001718/* configure scsi device based on internal per-device structure */
1719static int hpsa_slave_configure(struct scsi_device *sdev)
1720{
1721 struct hpsa_scsi_dev_t *sd;
1722 int queue_depth;
1723
1724 sd = sdev->hostdata;
1725 sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH);
1726
1727 if (sd)
1728 queue_depth = sd->queue_depth != 0 ?
1729 sd->queue_depth : sdev->host->can_queue;
1730 else
1731 queue_depth = sdev->host->can_queue;
1732
1733 scsi_change_queue_depth(sdev, queue_depth);
1734
1735 return 0;
1736}
1737
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001738static void hpsa_slave_destroy(struct scsi_device *sdev)
1739{
Stephen M. Cameronbcc44252010-02-04 08:41:54 -06001740 /* nothing to do. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001741}
1742
Webb Scalesd9a729f2015-04-23 09:33:27 -05001743static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1744{
1745 int i;
1746
1747 if (!h->ioaccel2_cmd_sg_list)
1748 return;
1749 for (i = 0; i < h->nr_cmds; i++) {
1750 kfree(h->ioaccel2_cmd_sg_list[i]);
1751 h->ioaccel2_cmd_sg_list[i] = NULL;
1752 }
1753 kfree(h->ioaccel2_cmd_sg_list);
1754 h->ioaccel2_cmd_sg_list = NULL;
1755}
1756
1757static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1758{
1759 int i;
1760
1761 if (h->chainsize <= 0)
1762 return 0;
1763
1764 h->ioaccel2_cmd_sg_list =
1765 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
1766 GFP_KERNEL);
1767 if (!h->ioaccel2_cmd_sg_list)
1768 return -ENOMEM;
1769 for (i = 0; i < h->nr_cmds; i++) {
1770 h->ioaccel2_cmd_sg_list[i] =
1771 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
1772 h->maxsgentries, GFP_KERNEL);
1773 if (!h->ioaccel2_cmd_sg_list[i])
1774 goto clean;
1775 }
1776 return 0;
1777
1778clean:
1779 hpsa_free_ioaccel2_sg_chain_blocks(h);
1780 return -ENOMEM;
1781}
1782
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001783static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1784{
1785 int i;
1786
1787 if (!h->cmd_sg_list)
1788 return;
1789 for (i = 0; i < h->nr_cmds; i++) {
1790 kfree(h->cmd_sg_list[i]);
1791 h->cmd_sg_list[i] = NULL;
1792 }
1793 kfree(h->cmd_sg_list);
1794 h->cmd_sg_list = NULL;
1795}
1796
Robert Elliott105a3db2015-04-23 09:33:48 -05001797static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001798{
1799 int i;
1800
1801 if (h->chainsize <= 0)
1802 return 0;
1803
1804 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1805 GFP_KERNEL);
Robert Elliott3d4e6af2015-01-23 16:42:42 -06001806 if (!h->cmd_sg_list) {
1807 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001808 return -ENOMEM;
Robert Elliott3d4e6af2015-01-23 16:42:42 -06001809 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001810 for (i = 0; i < h->nr_cmds; i++) {
1811 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1812 h->chainsize, GFP_KERNEL);
Robert Elliott3d4e6af2015-01-23 16:42:42 -06001813 if (!h->cmd_sg_list[i]) {
1814 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001815 goto clean;
Robert Elliott3d4e6af2015-01-23 16:42:42 -06001816 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001817 }
1818 return 0;
1819
1820clean:
1821 hpsa_free_sg_chain_blocks(h);
1822 return -ENOMEM;
1823}
1824
Webb Scalesd9a729f2015-04-23 09:33:27 -05001825static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
1826 struct io_accel2_cmd *cp, struct CommandList *c)
1827{
1828 struct ioaccel2_sg_element *chain_block;
1829 u64 temp64;
1830 u32 chain_size;
1831
1832 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
1833 chain_size = le32_to_cpu(cp->data_len);
1834 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
1835 PCI_DMA_TODEVICE);
1836 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1837 /* prevent subsequent unmapping */
1838 cp->sg->address = 0;
1839 return -1;
1840 }
1841 cp->sg->address = cpu_to_le64(temp64);
1842 return 0;
1843}
1844
1845static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
1846 struct io_accel2_cmd *cp)
1847{
1848 struct ioaccel2_sg_element *chain_sg;
1849 u64 temp64;
1850 u32 chain_size;
1851
1852 chain_sg = cp->sg;
1853 temp64 = le64_to_cpu(chain_sg->address);
1854 chain_size = le32_to_cpu(cp->data_len);
1855 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
1856}
1857
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001858static int hpsa_map_sg_chain_block(struct ctlr_info *h,
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001859 struct CommandList *c)
1860{
1861 struct SGDescriptor *chain_sg, *chain_block;
1862 u64 temp64;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001863 u32 chain_len;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001864
1865 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1866 chain_block = h->cmd_sg_list[c->cmdindex];
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001867 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1868 chain_len = sizeof(*chain_sg) *
Don Brace2b08b3e2015-01-23 16:41:09 -06001869 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001870 chain_sg->Len = cpu_to_le32(chain_len);
1871 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001872 PCI_DMA_TODEVICE);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001873 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1874 /* prevent subsequent unmapping */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001875 chain_sg->Addr = cpu_to_le64(0);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001876 return -1;
1877 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001878 chain_sg->Addr = cpu_to_le64(temp64);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001879 return 0;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001880}
1881
1882static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1883 struct CommandList *c)
1884{
1885 struct SGDescriptor *chain_sg;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001886
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001887 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001888 return;
1889
1890 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001891 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1892 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001893}
1894
Scott Teela09c1442014-02-18 13:57:21 -06001895
1896/* Decode the various types of errors on ioaccel2 path.
1897 * Return 1 for any error that should generate a RAID path retry.
1898 * Return 0 for errors that don't require a RAID path retry.
1899 */
1900static int handle_ioaccel_mode2_error(struct ctlr_info *h,
Scott Teelc3497752014-02-18 13:56:34 -06001901 struct CommandList *c,
1902 struct scsi_cmnd *cmd,
1903 struct io_accel2_cmd *c2)
1904{
1905 int data_len;
Scott Teela09c1442014-02-18 13:57:21 -06001906 int retry = 0;
Joe Handzikc40820d2015-04-23 09:33:32 -05001907 u32 ioaccel2_resid = 0;
Scott Teelc3497752014-02-18 13:56:34 -06001908
1909 switch (c2->error_data.serv_response) {
1910 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1911 switch (c2->error_data.status) {
1912 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1913 break;
1914 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05001915 cmd->result |= SAM_STAT_CHECK_CONDITION;
Scott Teelc3497752014-02-18 13:56:34 -06001916 if (c2->error_data.data_present !=
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05001917 IOACCEL2_SENSE_DATA_PRESENT) {
1918 memset(cmd->sense_buffer, 0,
1919 SCSI_SENSE_BUFFERSIZE);
Scott Teelc3497752014-02-18 13:56:34 -06001920 break;
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05001921 }
Scott Teelc3497752014-02-18 13:56:34 -06001922 /* copy the sense data */
1923 data_len = c2->error_data.sense_data_len;
1924 if (data_len > SCSI_SENSE_BUFFERSIZE)
1925 data_len = SCSI_SENSE_BUFFERSIZE;
1926 if (data_len > sizeof(c2->error_data.sense_data_buff))
1927 data_len =
1928 sizeof(c2->error_data.sense_data_buff);
1929 memcpy(cmd->sense_buffer,
1930 c2->error_data.sense_data_buff, data_len);
Scott Teela09c1442014-02-18 13:57:21 -06001931 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001932 break;
1933 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
Scott Teela09c1442014-02-18 13:57:21 -06001934 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001935 break;
1936 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
Scott Teela09c1442014-02-18 13:57:21 -06001937 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001938 break;
1939 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
Stephen Cameron4a8da222015-04-23 09:32:43 -05001940 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001941 break;
1942 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
Scott Teela09c1442014-02-18 13:57:21 -06001943 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001944 break;
1945 default:
Scott Teela09c1442014-02-18 13:57:21 -06001946 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001947 break;
1948 }
1949 break;
1950 case IOACCEL2_SERV_RESPONSE_FAILURE:
Joe Handzikc40820d2015-04-23 09:33:32 -05001951 switch (c2->error_data.status) {
1952 case IOACCEL2_STATUS_SR_IO_ERROR:
1953 case IOACCEL2_STATUS_SR_IO_ABORTED:
1954 case IOACCEL2_STATUS_SR_OVERRUN:
1955 retry = 1;
1956 break;
1957 case IOACCEL2_STATUS_SR_UNDERRUN:
1958 cmd->result = (DID_OK << 16); /* host byte */
1959 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1960 ioaccel2_resid = get_unaligned_le32(
1961 &c2->error_data.resid_cnt[0]);
1962 scsi_set_resid(cmd, ioaccel2_resid);
1963 break;
1964 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
1965 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
1966 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
1967 /* We will get an event from ctlr to trigger rescan */
1968 retry = 1;
1969 break;
1970 default:
1971 retry = 1;
Joe Handzikc40820d2015-04-23 09:33:32 -05001972 }
Scott Teelc3497752014-02-18 13:56:34 -06001973 break;
1974 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1975 break;
1976 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1977 break;
1978 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
Scott Teela09c1442014-02-18 13:57:21 -06001979 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001980 break;
1981 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
Scott Teelc3497752014-02-18 13:56:34 -06001982 break;
1983 default:
Scott Teela09c1442014-02-18 13:57:21 -06001984 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001985 break;
1986 }
Scott Teela09c1442014-02-18 13:57:21 -06001987
1988 return retry; /* retry on raid path? */
Scott Teelc3497752014-02-18 13:56:34 -06001989}
1990
Webb Scalesa58e7e52015-04-23 09:34:16 -05001991static void hpsa_cmd_resolve_events(struct ctlr_info *h,
1992 struct CommandList *c)
1993{
1994 /*
1995 * Prevent the following race in the abort handler:
1996 *
1997 * 1. LLD is requested to abort a SCSI command
1998 * 2. The SCSI command completes
1999 * 3. The struct CommandList associated with step 2 is made available
2000 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2001 * 5. Abort handler follows scsi_cmnd->host_scribble and
2002 * finds struct CommandList and tries to aborts it
2003 * Now we have aborted the wrong command.
2004 *
2005 * Clear c->scsi_cmd here so that the abort handler will know this
2006 * command has completed. Then, check to see if the abort handler is
2007 * waiting for this command, and, if so, wake it.
2008 */
2009 c->scsi_cmd = SCSI_CMD_IDLE;
2010 mb(); /* Ensure c->scsi_cmd is set to SCSI_CMD_IDLE */
2011 if (c->abort_pending) {
2012 c->abort_pending = false;
2013 wake_up_all(&h->abort_sync_wait_queue);
2014 }
2015}
2016
Webb Scales73153fe2015-04-23 09:35:04 -05002017static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2018 struct CommandList *c)
2019{
2020 hpsa_cmd_resolve_events(h, c);
2021 cmd_tagged_free(h, c);
2022}
2023
Webb Scales8a0ff922015-04-23 09:34:11 -05002024static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2025 struct CommandList *c, struct scsi_cmnd *cmd)
2026{
Webb Scales73153fe2015-04-23 09:35:04 -05002027 hpsa_cmd_resolve_and_free(h, c);
Webb Scales8a0ff922015-04-23 09:34:11 -05002028 cmd->scsi_done(cmd);
2029}
2030
2031static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2032{
2033 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2034 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2035}
2036
Webb Scalesa58e7e52015-04-23 09:34:16 -05002037static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2038{
2039 cmd->result = DID_ABORT << 16;
2040}
2041
2042static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2043 struct scsi_cmnd *cmd)
2044{
2045 hpsa_set_scsi_cmd_aborted(cmd);
2046 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2047 c->Request.CDB, c->err_info->ScsiStatus);
Webb Scales73153fe2015-04-23 09:35:04 -05002048 hpsa_cmd_resolve_and_free(h, c);
Webb Scalesa58e7e52015-04-23 09:34:16 -05002049}
2050
Scott Teelc3497752014-02-18 13:56:34 -06002051static void process_ioaccel2_completion(struct ctlr_info *h,
2052 struct CommandList *c, struct scsi_cmnd *cmd,
2053 struct hpsa_scsi_dev_t *dev)
2054{
2055 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2056
2057 /* check for good status */
2058 if (likely(c2->error_data.serv_response == 0 &&
Webb Scales8a0ff922015-04-23 09:34:11 -05002059 c2->error_data.status == 0))
2060 return hpsa_cmd_free_and_done(h, c, cmd);
Scott Teelc3497752014-02-18 13:56:34 -06002061
Webb Scalesa58e7e52015-04-23 09:34:16 -05002062 /* don't requeue a command which is being aborted */
2063 if (unlikely(c->abort_pending))
2064 return hpsa_cmd_abort_and_free(h, c, cmd);
2065
Webb Scales8a0ff922015-04-23 09:34:11 -05002066 /*
2067 * Any RAID offload error results in retry which will use
Scott Teelc3497752014-02-18 13:56:34 -06002068 * the normal I/O path so the controller can handle whatever's
2069 * wrong.
2070 */
2071 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
2072 c2->error_data.serv_response ==
2073 IOACCEL2_SERV_RESPONSE_FAILURE) {
Don Brace080ef1c2015-01-23 16:43:25 -06002074 if (c2->error_data.status ==
2075 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
2076 dev->offload_enabled = 0;
Webb Scales8a0ff922015-04-23 09:34:11 -05002077
2078 return hpsa_retry_cmd(h, c);
Scott Teelc3497752014-02-18 13:56:34 -06002079 }
Don Brace080ef1c2015-01-23 16:43:25 -06002080
2081 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
Webb Scales8a0ff922015-04-23 09:34:11 -05002082 return hpsa_retry_cmd(h, c);
Don Brace080ef1c2015-01-23 16:43:25 -06002083
Webb Scales8a0ff922015-04-23 09:34:11 -05002084 return hpsa_cmd_free_and_done(h, c, cmd);
Scott Teelc3497752014-02-18 13:56:34 -06002085}
2086
Stephen Cameron9437ac42015-04-23 09:32:16 -05002087/* Returns 0 on success, < 0 otherwise. */
2088static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2089 struct CommandList *cp)
2090{
2091 u8 tmf_status = cp->err_info->ScsiStatus;
2092
2093 switch (tmf_status) {
2094 case CISS_TMF_COMPLETE:
2095 /*
2096 * CISS_TMF_COMPLETE never happens, instead,
2097 * ei->CommandStatus == 0 for this case.
2098 */
2099 case CISS_TMF_SUCCESS:
2100 return 0;
2101 case CISS_TMF_INVALID_FRAME:
2102 case CISS_TMF_NOT_SUPPORTED:
2103 case CISS_TMF_FAILED:
2104 case CISS_TMF_WRONG_LUN:
2105 case CISS_TMF_OVERLAPPED_TAG:
2106 break;
2107 default:
2108 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2109 tmf_status);
2110 break;
2111 }
2112 return -tmf_status;
2113}
2114
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05002115static void complete_scsi_command(struct CommandList *cp)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002116{
2117 struct scsi_cmnd *cmd;
2118 struct ctlr_info *h;
2119 struct ErrorInfo *ei;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002120 struct hpsa_scsi_dev_t *dev;
Webb Scalesd9a729f2015-04-23 09:33:27 -05002121 struct io_accel2_cmd *c2;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002122
Stephen Cameron9437ac42015-04-23 09:32:16 -05002123 u8 sense_key;
2124 u8 asc; /* additional sense code */
2125 u8 ascq; /* additional sense code qualifier */
Stephen M. Camerondb111e12011-06-03 09:57:34 -05002126 unsigned long sense_data_size;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002127
2128 ei = cp->err_info;
Stephen Cameron7fa30302015-01-23 16:44:30 -06002129 cmd = cp->scsi_cmd;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002130 h = cp->h;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002131 dev = cmd->device->hostdata;
Webb Scalesd9a729f2015-04-23 09:33:27 -05002132 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002133
2134 scsi_dma_unmap(cmd); /* undo the DMA mappings */
Matt Gatese1f7de02014-02-18 13:55:17 -06002135 if ((cp->cmd_type == CMD_SCSI) &&
Don Brace2b08b3e2015-01-23 16:41:09 -06002136 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002137 hpsa_unmap_sg_chain_block(h, cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002138
Webb Scalesd9a729f2015-04-23 09:33:27 -05002139 if ((cp->cmd_type == CMD_IOACCEL2) &&
2140 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2141 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2142
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002143 cmd->result = (DID_OK << 16); /* host byte */
2144 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
Scott Teelc3497752014-02-18 13:56:34 -06002145
Don Brace03383732015-01-23 16:43:30 -06002146 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2147 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2148
Webb Scales25163bd2015-04-23 09:32:00 -05002149 /*
2150 * We check for lockup status here as it may be set for
2151 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2152 * fail_all_oustanding_cmds()
2153 */
2154 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2155 /* DID_NO_CONNECT will prevent a retry */
2156 cmd->result = DID_NO_CONNECT << 16;
Webb Scales8a0ff922015-04-23 09:34:11 -05002157 return hpsa_cmd_free_and_done(h, cp, cmd);
Webb Scales25163bd2015-04-23 09:32:00 -05002158 }
2159
Scott Teelc3497752014-02-18 13:56:34 -06002160 if (cp->cmd_type == CMD_IOACCEL2)
2161 return process_ioaccel2_completion(h, cp, cmd, dev);
2162
Robert Elliott6aa4c362014-07-03 10:18:19 -05002163 scsi_set_resid(cmd, ei->ResidualCnt);
Webb Scales8a0ff922015-04-23 09:34:11 -05002164 if (ei->CommandStatus == 0)
2165 return hpsa_cmd_free_and_done(h, cp, cmd);
Robert Elliott6aa4c362014-07-03 10:18:19 -05002166
Matt Gatese1f7de02014-02-18 13:55:17 -06002167 /* For I/O accelerator commands, copy over some fields to the normal
2168 * CISS header used below for error handling.
2169 */
2170 if (cp->cmd_type == CMD_IOACCEL1) {
2171 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
Don Brace2b08b3e2015-01-23 16:41:09 -06002172 cp->Header.SGList = scsi_sg_count(cmd);
2173 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2174 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2175 IOACCEL1_IOFLAGS_CDBLEN_MASK;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002176 cp->Header.tag = c->tag;
Matt Gatese1f7de02014-02-18 13:55:17 -06002177 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2178 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002179
2180 /* Any RAID offload error results in retry which will use
2181 * the normal I/O path so the controller can handle whatever's
2182 * wrong.
2183 */
2184 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
2185 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2186 dev->offload_enabled = 0;
Webb Scalesa58e7e52015-04-23 09:34:16 -05002187 if (!cp->abort_pending)
2188 return hpsa_retry_cmd(h, cp);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002189 }
Matt Gatese1f7de02014-02-18 13:55:17 -06002190 }
2191
Webb Scalesa58e7e52015-04-23 09:34:16 -05002192 if (cp->abort_pending)
2193 ei->CommandStatus = CMD_ABORTED;
2194
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002195 /* an error has occurred */
2196 switch (ei->CommandStatus) {
2197
2198 case CMD_TARGET_STATUS:
Stephen Cameron9437ac42015-04-23 09:32:16 -05002199 cmd->result |= ei->ScsiStatus;
2200 /* copy the sense data */
2201 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2202 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2203 else
2204 sense_data_size = sizeof(ei->SenseInfo);
2205 if (ei->SenseLen < sense_data_size)
2206 sense_data_size = ei->SenseLen;
2207 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2208 if (ei->ScsiStatus)
2209 decode_sense_data(ei->SenseInfo, sense_data_size,
2210 &sense_key, &asc, &ascq);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002211 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
Matt Gates1d3b3602010-02-04 08:43:00 -06002212 if (sense_key == ABORTED_COMMAND) {
Stephen M. Cameron2e311fb2013-09-23 13:33:41 -05002213 cmd->result |= DID_SOFT_ERROR << 16;
Matt Gates1d3b3602010-02-04 08:43:00 -06002214 break;
2215 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002216 break;
2217 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002218 /* Problem was not a check condition
2219 * Pass it up to the upper layers...
2220 */
2221 if (ei->ScsiStatus) {
2222 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2223 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2224 "Returning result: 0x%x\n",
2225 cp, ei->ScsiStatus,
2226 sense_key, asc, ascq,
2227 cmd->result);
2228 } else { /* scsi status is zero??? How??? */
2229 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2230 "Returning no connection.\n", cp),
2231
2232 /* Ordinarily, this case should never happen,
2233 * but there is a bug in some released firmware
2234 * revisions that allows it to happen if, for
2235 * example, a 4100 backplane loses power and
2236 * the tape drive is in it. We assume that
2237 * it's a fatal error of some kind because we
2238 * can't show that it wasn't. We will make it
2239 * look like selection timeout since that is
2240 * the most common reason for this to occur,
2241 * and it's severe enough.
2242 */
2243
2244 cmd->result = DID_NO_CONNECT << 16;
2245 }
2246 break;
2247
2248 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2249 break;
2250 case CMD_DATA_OVERRUN:
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002251 dev_warn(&h->pdev->dev,
2252 "CDB %16phN data overrun\n", cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002253 break;
2254 case CMD_INVALID: {
2255 /* print_bytes(cp, sizeof(*cp), 1, 0);
2256 print_cmd(cp); */
2257 /* We get CMD_INVALID if you address a non-existent device
2258 * instead of a selection timeout (no response). You will
2259 * see this if you yank out a drive, then try to access it.
2260 * This is kind of a shame because it means that any other
2261 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2262 * missing target. */
2263 cmd->result = DID_NO_CONNECT << 16;
2264 }
2265 break;
2266 case CMD_PROTOCOL_ERR:
Stephen M. Cameron256d0ea2012-09-14 16:34:25 -05002267 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002268 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2269 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002270 break;
2271 case CMD_HARDWARE_ERR:
2272 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002273 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2274 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002275 break;
2276 case CMD_CONNECTION_LOST:
2277 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002278 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2279 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002280 break;
2281 case CMD_ABORTED:
Webb Scalesa58e7e52015-04-23 09:34:16 -05002282 /* Return now to avoid calling scsi_done(). */
2283 return hpsa_cmd_abort_and_free(h, cp, cmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002284 case CMD_ABORT_FAILED:
2285 cmd->result = DID_ERROR << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002286 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2287 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002288 break;
2289 case CMD_UNSOLICITED_ABORT:
Stephen M. Cameronf6e76052011-07-26 11:08:52 -05002290 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002291 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2292 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002293 break;
2294 case CMD_TIMEOUT:
2295 cmd->result = DID_TIME_OUT << 16;
Stephen Cameronf42e81e2015-01-23 16:44:35 -06002296 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2297 cp->Request.CDB);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002298 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002299 case CMD_UNABORTABLE:
2300 cmd->result = DID_ERROR << 16;
2301 dev_warn(&h->pdev->dev, "Command unabortable\n");
2302 break;
Stephen Cameron9437ac42015-04-23 09:32:16 -05002303 case CMD_TMF_STATUS:
2304 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2305 cmd->result = DID_ERROR << 16;
2306 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002307 case CMD_IOACCEL_DISABLED:
2308 /* This only handles the direct pass-through case since RAID
2309 * offload is handled above. Just attempt a retry.
2310 */
2311 cmd->result = DID_SOFT_ERROR << 16;
2312 dev_warn(&h->pdev->dev,
2313 "cp %p had HP SSD Smart Path error\n", cp);
2314 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002315 default:
2316 cmd->result = DID_ERROR << 16;
2317 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2318 cp, ei->CommandStatus);
2319 }
Webb Scales8a0ff922015-04-23 09:34:11 -05002320
2321 return hpsa_cmd_free_and_done(h, cp, cmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002322}
2323
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002324static void hpsa_pci_unmap(struct pci_dev *pdev,
2325 struct CommandList *c, int sg_used, int data_direction)
2326{
2327 int i;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002328
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002329 for (i = 0; i < sg_used; i++)
2330 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2331 le32_to_cpu(c->SG[i].Len),
2332 data_direction);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002333}
2334
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002335static int hpsa_map_one(struct pci_dev *pdev,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002336 struct CommandList *cp,
2337 unsigned char *buf,
2338 size_t buflen,
2339 int data_direction)
2340{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002341 u64 addr64;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002342
2343 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2344 cp->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002345 cp->Header.SGTotal = cpu_to_le16(0);
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002346 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002347 }
2348
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002349 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
Shuah Khaneceaae12013-02-20 11:24:34 -06002350 if (dma_mapping_error(&pdev->dev, addr64)) {
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002351 /* Prevent subsequent unmap of something never mapped */
Shuah Khaneceaae12013-02-20 11:24:34 -06002352 cp->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002353 cp->Header.SGTotal = cpu_to_le16(0);
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002354 return -1;
Shuah Khaneceaae12013-02-20 11:24:34 -06002355 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002356 cp->SG[0].Addr = cpu_to_le64(addr64);
2357 cp->SG[0].Len = cpu_to_le32(buflen);
2358 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2359 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2360 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002361 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002362}
2363
Webb Scales25163bd2015-04-23 09:32:00 -05002364#define NO_TIMEOUT ((unsigned long) -1)
2365#define DEFAULT_TIMEOUT 30000 /* milliseconds */
2366static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2367 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002368{
2369 DECLARE_COMPLETION_ONSTACK(wait);
2370
2371 c->waiting = &wait;
Webb Scales25163bd2015-04-23 09:32:00 -05002372 __enqueue_cmd_and_start_io(h, c, reply_queue);
2373 if (timeout_msecs == NO_TIMEOUT) {
2374 /* TODO: get rid of this no-timeout thing */
2375 wait_for_completion_io(&wait);
2376 return IO_OK;
2377 }
2378 if (!wait_for_completion_io_timeout(&wait,
2379 msecs_to_jiffies(timeout_msecs))) {
2380 dev_warn(&h->pdev->dev, "Command timed out.\n");
2381 return -ETIMEDOUT;
2382 }
2383 return IO_OK;
2384}
2385
2386static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2387 int reply_queue, unsigned long timeout_msecs)
2388{
2389 if (unlikely(lockup_detected(h))) {
2390 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2391 return IO_OK;
2392 }
2393 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002394}
2395
Stephen M. Cameron094963d2014-05-29 10:53:18 -05002396static u32 lockup_detected(struct ctlr_info *h)
2397{
2398 int cpu;
2399 u32 rc, *lockup_detected;
2400
2401 cpu = get_cpu();
2402 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2403 rc = *lockup_detected;
2404 put_cpu();
2405 return rc;
2406}
2407
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002408#define MAX_DRIVER_CMD_RETRIES 25
Webb Scales25163bd2015-04-23 09:32:00 -05002409static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2410 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002411{
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002412 int backoff_time = 10, retry_count = 0;
Webb Scales25163bd2015-04-23 09:32:00 -05002413 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002414
2415 do {
Joe Perches7630abd2011-05-08 23:32:40 -07002416 memset(c->err_info, 0, sizeof(*c->err_info));
Webb Scales25163bd2015-04-23 09:32:00 -05002417 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2418 timeout_msecs);
2419 if (rc)
2420 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002421 retry_count++;
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002422 if (retry_count > 3) {
2423 msleep(backoff_time);
2424 if (backoff_time < 1000)
2425 backoff_time *= 2;
2426 }
Matt Bondurant852af202012-05-01 11:42:35 -05002427 } while ((check_for_unit_attention(h, c) ||
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002428 check_for_busy(h, c)) &&
2429 retry_count <= MAX_DRIVER_CMD_RETRIES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002430 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
Webb Scales25163bd2015-04-23 09:32:00 -05002431 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2432 rc = -EIO;
2433 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002434}
2435
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002436static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2437 struct CommandList *c)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002438{
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002439 const u8 *cdb = c->Request.CDB;
2440 const u8 *lun = c->Header.LUN.LunAddrBytes;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002441
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002442 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2443 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2444 txt, lun[0], lun[1], lun[2], lun[3],
2445 lun[4], lun[5], lun[6], lun[7],
2446 cdb[0], cdb[1], cdb[2], cdb[3],
2447 cdb[4], cdb[5], cdb[6], cdb[7],
2448 cdb[8], cdb[9], cdb[10], cdb[11],
2449 cdb[12], cdb[13], cdb[14], cdb[15]);
2450}
2451
2452static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2453 struct CommandList *cp)
2454{
2455 const struct ErrorInfo *ei = cp->err_info;
2456 struct device *d = &cp->h->pdev->dev;
Stephen Cameron9437ac42015-04-23 09:32:16 -05002457 u8 sense_key, asc, ascq;
2458 int sense_len;
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002459
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002460 switch (ei->CommandStatus) {
2461 case CMD_TARGET_STATUS:
Stephen Cameron9437ac42015-04-23 09:32:16 -05002462 if (ei->SenseLen > sizeof(ei->SenseInfo))
2463 sense_len = sizeof(ei->SenseInfo);
2464 else
2465 sense_len = ei->SenseLen;
2466 decode_sense_data(ei->SenseInfo, sense_len,
2467 &sense_key, &asc, &ascq);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002468 hpsa_print_cmd(h, "SCSI status", cp);
2469 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
Stephen Cameron9437ac42015-04-23 09:32:16 -05002470 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2471 sense_key, asc, ascq);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002472 else
Stephen Cameron9437ac42015-04-23 09:32:16 -05002473 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002474 if (ei->ScsiStatus == 0)
2475 dev_warn(d, "SCSI status is abnormally zero. "
2476 "(probably indicates selection timeout "
2477 "reported incorrectly due to a known "
2478 "firmware bug, circa July, 2001.)\n");
2479 break;
2480 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002481 break;
2482 case CMD_DATA_OVERRUN:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002483 hpsa_print_cmd(h, "overrun condition", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002484 break;
2485 case CMD_INVALID: {
2486 /* controller unfortunately reports SCSI passthru's
2487 * to non-existent targets as invalid commands.
2488 */
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002489 hpsa_print_cmd(h, "invalid command", cp);
2490 dev_warn(d, "probably means device no longer present\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002491 }
2492 break;
2493 case CMD_PROTOCOL_ERR:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002494 hpsa_print_cmd(h, "protocol error", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002495 break;
2496 case CMD_HARDWARE_ERR:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002497 hpsa_print_cmd(h, "hardware error", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002498 break;
2499 case CMD_CONNECTION_LOST:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002500 hpsa_print_cmd(h, "connection lost", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002501 break;
2502 case CMD_ABORTED:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002503 hpsa_print_cmd(h, "aborted", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002504 break;
2505 case CMD_ABORT_FAILED:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002506 hpsa_print_cmd(h, "abort failed", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002507 break;
2508 case CMD_UNSOLICITED_ABORT:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002509 hpsa_print_cmd(h, "unsolicited abort", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002510 break;
2511 case CMD_TIMEOUT:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002512 hpsa_print_cmd(h, "timed out", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002513 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002514 case CMD_UNABORTABLE:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002515 hpsa_print_cmd(h, "unabortable", cp);
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002516 break;
Webb Scales25163bd2015-04-23 09:32:00 -05002517 case CMD_CTLR_LOCKUP:
2518 hpsa_print_cmd(h, "controller lockup detected", cp);
2519 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002520 default:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002521 hpsa_print_cmd(h, "unknown status", cp);
2522 dev_warn(d, "Unknown command status %x\n",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002523 ei->CommandStatus);
2524 }
2525}
2526
2527static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002528 u16 page, unsigned char *buf,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002529 unsigned char bufsize)
2530{
2531 int rc = IO_OK;
2532 struct CommandList *c;
2533 struct ErrorInfo *ei;
2534
Stephen Cameron45fcb862015-01-23 16:43:04 -06002535 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002536
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002537 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2538 page, scsi3addr, TYPE_CMD)) {
2539 rc = -1;
2540 goto out;
2541 }
Webb Scales25163bd2015-04-23 09:32:00 -05002542 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2543 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2544 if (rc)
2545 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002546 ei = c->err_info;
2547 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002548 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002549 rc = -1;
2550 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002551out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06002552 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002553 return rc;
2554}
2555
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002556static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2557 unsigned char *scsi3addr, unsigned char page,
2558 struct bmic_controller_parameters *buf, size_t bufsize)
2559{
2560 int rc = IO_OK;
2561 struct CommandList *c;
2562 struct ErrorInfo *ei;
2563
Stephen Cameron45fcb862015-01-23 16:43:04 -06002564 c = cmd_alloc(h);
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002565 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2566 page, scsi3addr, TYPE_CMD)) {
2567 rc = -1;
2568 goto out;
2569 }
Webb Scales25163bd2015-04-23 09:32:00 -05002570 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2571 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2572 if (rc)
2573 goto out;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002574 ei = c->err_info;
2575 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2576 hpsa_scsi_interpret_error(h, c);
2577 rc = -1;
2578 }
2579out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06002580 cmd_free(h, c);
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002581 return rc;
Robert Elliottbf43caf2015-04-23 09:33:38 -05002582}
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002583
Scott Teelbf711ac2014-02-18 13:56:39 -06002584static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
Webb Scales25163bd2015-04-23 09:32:00 -05002585 u8 reset_type, int reply_queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002586{
2587 int rc = IO_OK;
2588 struct CommandList *c;
2589 struct ErrorInfo *ei;
2590
Stephen Cameron45fcb862015-01-23 16:43:04 -06002591 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002592
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002593
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002594 /* fill_cmd can't fail here, no data buffer to map. */
Scott Teelbf711ac2014-02-18 13:56:39 -06002595 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2596 scsi3addr, TYPE_MSG);
2597 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
Webb Scales25163bd2015-04-23 09:32:00 -05002598 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2599 if (rc) {
2600 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2601 goto out;
2602 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002603 /* no unmap needed here because no data xfer. */
2604
2605 ei = c->err_info;
2606 if (ei->CommandStatus != 0) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002607 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002608 rc = -1;
2609 }
Webb Scales25163bd2015-04-23 09:32:00 -05002610out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06002611 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002612 return rc;
2613}
2614
2615static void hpsa_get_raid_level(struct ctlr_info *h,
2616 unsigned char *scsi3addr, unsigned char *raid_level)
2617{
2618 int rc;
2619 unsigned char *buf;
2620
2621 *raid_level = RAID_UNKNOWN;
2622 buf = kzalloc(64, GFP_KERNEL);
2623 if (!buf)
2624 return;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002625 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002626 if (rc == 0)
2627 *raid_level = buf[8];
2628 if (*raid_level > RAID_UNKNOWN)
2629 *raid_level = RAID_UNKNOWN;
2630 kfree(buf);
2631 return;
2632}
2633
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002634#define HPSA_MAP_DEBUG
2635#ifdef HPSA_MAP_DEBUG
2636static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2637 struct raid_map_data *map_buff)
2638{
2639 struct raid_map_disk_data *dd = &map_buff->data[0];
2640 int map, row, col;
2641 u16 map_cnt, row_cnt, disks_per_row;
2642
2643 if (rc != 0)
2644 return;
2645
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06002646 /* Show details only if debugging has been activated. */
2647 if (h->raid_offload_debug < 2)
2648 return;
2649
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002650 dev_info(&h->pdev->dev, "structure_size = %u\n",
2651 le32_to_cpu(map_buff->structure_size));
2652 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2653 le32_to_cpu(map_buff->volume_blk_size));
2654 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2655 le64_to_cpu(map_buff->volume_blk_cnt));
2656 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2657 map_buff->phys_blk_shift);
2658 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2659 map_buff->parity_rotation_shift);
2660 dev_info(&h->pdev->dev, "strip_size = %u\n",
2661 le16_to_cpu(map_buff->strip_size));
2662 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2663 le64_to_cpu(map_buff->disk_starting_blk));
2664 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2665 le64_to_cpu(map_buff->disk_blk_cnt));
2666 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2667 le16_to_cpu(map_buff->data_disks_per_row));
2668 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2669 le16_to_cpu(map_buff->metadata_disks_per_row));
2670 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2671 le16_to_cpu(map_buff->row_cnt));
2672 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2673 le16_to_cpu(map_buff->layout_map_count));
Don Brace2b08b3e2015-01-23 16:41:09 -06002674 dev_info(&h->pdev->dev, "flags = 0x%x\n",
Scott Teeldd0e19f2014-02-18 13:57:31 -06002675 le16_to_cpu(map_buff->flags));
Don Brace2b08b3e2015-01-23 16:41:09 -06002676 dev_info(&h->pdev->dev, "encrypytion = %s\n",
2677 le16_to_cpu(map_buff->flags) &
2678 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
Scott Teeldd0e19f2014-02-18 13:57:31 -06002679 dev_info(&h->pdev->dev, "dekindex = %u\n",
2680 le16_to_cpu(map_buff->dekindex));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002681 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2682 for (map = 0; map < map_cnt; map++) {
2683 dev_info(&h->pdev->dev, "Map%u:\n", map);
2684 row_cnt = le16_to_cpu(map_buff->row_cnt);
2685 for (row = 0; row < row_cnt; row++) {
2686 dev_info(&h->pdev->dev, " Row%u:\n", row);
2687 disks_per_row =
2688 le16_to_cpu(map_buff->data_disks_per_row);
2689 for (col = 0; col < disks_per_row; col++, dd++)
2690 dev_info(&h->pdev->dev,
2691 " D%02u: h=0x%04x xor=%u,%u\n",
2692 col, dd->ioaccel_handle,
2693 dd->xor_mult[0], dd->xor_mult[1]);
2694 disks_per_row =
2695 le16_to_cpu(map_buff->metadata_disks_per_row);
2696 for (col = 0; col < disks_per_row; col++, dd++)
2697 dev_info(&h->pdev->dev,
2698 " M%02u: h=0x%04x xor=%u,%u\n",
2699 col, dd->ioaccel_handle,
2700 dd->xor_mult[0], dd->xor_mult[1]);
2701 }
2702 }
2703}
2704#else
2705static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2706 __attribute__((unused)) int rc,
2707 __attribute__((unused)) struct raid_map_data *map_buff)
2708{
2709}
2710#endif
2711
2712static int hpsa_get_raid_map(struct ctlr_info *h,
2713 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2714{
2715 int rc = 0;
2716 struct CommandList *c;
2717 struct ErrorInfo *ei;
2718
Stephen Cameron45fcb862015-01-23 16:43:04 -06002719 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05002720
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002721 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2722 sizeof(this_device->raid_map), 0,
2723 scsi3addr, TYPE_CMD)) {
Robert Elliott2dd02d72015-04-23 09:33:43 -05002724 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
2725 cmd_free(h, c);
2726 return -1;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002727 }
Webb Scales25163bd2015-04-23 09:32:00 -05002728 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2729 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2730 if (rc)
2731 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002732 ei = c->err_info;
2733 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002734 hpsa_scsi_interpret_error(h, c);
Webb Scales25163bd2015-04-23 09:32:00 -05002735 rc = -1;
2736 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002737 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06002738 cmd_free(h, c);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002739
2740 /* @todo in the future, dynamically allocate RAID map memory */
2741 if (le32_to_cpu(this_device->raid_map.structure_size) >
2742 sizeof(this_device->raid_map)) {
2743 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2744 rc = -1;
2745 }
2746 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2747 return rc;
Webb Scales25163bd2015-04-23 09:32:00 -05002748out:
2749 cmd_free(h, c);
2750 return rc;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002751}
2752
Don Brace03383732015-01-23 16:43:30 -06002753static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2754 unsigned char scsi3addr[], u16 bmic_device_index,
2755 struct bmic_identify_physical_device *buf, size_t bufsize)
2756{
2757 int rc = IO_OK;
2758 struct CommandList *c;
2759 struct ErrorInfo *ei;
2760
2761 c = cmd_alloc(h);
2762 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2763 0, RAID_CTLR_LUNID, TYPE_CMD);
2764 if (rc)
2765 goto out;
2766
2767 c->Request.CDB[2] = bmic_device_index & 0xff;
2768 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2769
Webb Scales25163bd2015-04-23 09:32:00 -05002770 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
2771 NO_TIMEOUT);
Don Brace03383732015-01-23 16:43:30 -06002772 ei = c->err_info;
2773 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2774 hpsa_scsi_interpret_error(h, c);
2775 rc = -1;
2776 }
2777out:
2778 cmd_free(h, c);
2779 return rc;
2780}
2781
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06002782static int hpsa_vpd_page_supported(struct ctlr_info *h,
2783 unsigned char scsi3addr[], u8 page)
2784{
2785 int rc;
2786 int i;
2787 int pages;
2788 unsigned char *buf, bufsize;
2789
2790 buf = kzalloc(256, GFP_KERNEL);
2791 if (!buf)
2792 return 0;
2793
2794 /* Get the size of the page list first */
2795 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2796 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2797 buf, HPSA_VPD_HEADER_SZ);
2798 if (rc != 0)
2799 goto exit_unsupported;
2800 pages = buf[3];
2801 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2802 bufsize = pages + HPSA_VPD_HEADER_SZ;
2803 else
2804 bufsize = 255;
2805
2806 /* Get the whole VPD page list */
2807 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2808 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2809 buf, bufsize);
2810 if (rc != 0)
2811 goto exit_unsupported;
2812
2813 pages = buf[3];
2814 for (i = 1; i <= pages; i++)
2815 if (buf[3 + i] == page)
2816 goto exit_supported;
2817exit_unsupported:
2818 kfree(buf);
2819 return 0;
2820exit_supported:
2821 kfree(buf);
2822 return 1;
2823}
2824
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002825static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2826 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2827{
2828 int rc;
2829 unsigned char *buf;
2830 u8 ioaccel_status;
2831
2832 this_device->offload_config = 0;
2833 this_device->offload_enabled = 0;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05002834 this_device->offload_to_be_enabled = 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002835
2836 buf = kzalloc(64, GFP_KERNEL);
2837 if (!buf)
2838 return;
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06002839 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2840 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002841 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002842 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002843 if (rc != 0)
2844 goto out;
2845
2846#define IOACCEL_STATUS_BYTE 4
2847#define OFFLOAD_CONFIGURED_BIT 0x01
2848#define OFFLOAD_ENABLED_BIT 0x02
2849 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2850 this_device->offload_config =
2851 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2852 if (this_device->offload_config) {
2853 this_device->offload_enabled =
2854 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2855 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2856 this_device->offload_enabled = 0;
2857 }
Stephen Cameron41ce4c32015-04-23 09:31:47 -05002858 this_device->offload_to_be_enabled = this_device->offload_enabled;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002859out:
2860 kfree(buf);
2861 return;
2862}
2863
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002864/* Get the device id from inquiry page 0x83 */
2865static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2866 unsigned char *device_id, int buflen)
2867{
2868 int rc;
2869 unsigned char *buf;
2870
2871 if (buflen > 16)
2872 buflen = 16;
2873 buf = kzalloc(64, GFP_KERNEL);
2874 if (!buf)
Stephen M. Camerona84d7942014-05-29 10:54:20 -05002875 return -ENOMEM;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002876 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002877 if (rc == 0)
2878 memcpy(device_id, &buf[8], buflen);
2879 kfree(buf);
2880 return rc != 0;
2881}
2882
2883static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
Don Brace03383732015-01-23 16:43:30 -06002884 void *buf, int bufsize,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002885 int extended_response)
2886{
2887 int rc = IO_OK;
2888 struct CommandList *c;
2889 unsigned char scsi3addr[8];
2890 struct ErrorInfo *ei;
2891
Stephen Cameron45fcb862015-01-23 16:43:04 -06002892 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05002893
Stephen M. Camerone89c0ae2010-02-04 08:42:04 -06002894 /* address the controller */
2895 memset(scsi3addr, 0, sizeof(scsi3addr));
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002896 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2897 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2898 rc = -1;
2899 goto out;
2900 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002901 if (extended_response)
2902 c->Request.CDB[1] = extended_response;
Webb Scales25163bd2015-04-23 09:32:00 -05002903 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2904 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2905 if (rc)
2906 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002907 ei = c->err_info;
2908 if (ei->CommandStatus != 0 &&
2909 ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002910 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002911 rc = -1;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002912 } else {
Don Brace03383732015-01-23 16:43:30 -06002913 struct ReportLUNdata *rld = buf;
2914
2915 if (rld->extended_response_flag != extended_response) {
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002916 dev_err(&h->pdev->dev,
2917 "report luns requested format %u, got %u\n",
2918 extended_response,
Don Brace03383732015-01-23 16:43:30 -06002919 rld->extended_response_flag);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002920 rc = -1;
2921 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002922 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002923out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06002924 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002925 return rc;
2926}
2927
2928static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
Don Brace03383732015-01-23 16:43:30 -06002929 struct ReportExtendedLUNdata *buf, int bufsize)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002930{
Don Brace03383732015-01-23 16:43:30 -06002931 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
2932 HPSA_REPORT_PHYS_EXTENDED);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002933}
2934
2935static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2936 struct ReportLUNdata *buf, int bufsize)
2937{
2938 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2939}
2940
2941static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2942 int bus, int target, int lun)
2943{
2944 device->bus = bus;
2945 device->target = target;
2946 device->lun = lun;
2947}
2948
Stephen M. Cameron98465902014-02-21 16:25:00 -06002949/* Use VPD inquiry to get details of volume status */
2950static int hpsa_get_volume_status(struct ctlr_info *h,
2951 unsigned char scsi3addr[])
2952{
2953 int rc;
2954 int status;
2955 int size;
2956 unsigned char *buf;
2957
2958 buf = kzalloc(64, GFP_KERNEL);
2959 if (!buf)
2960 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2961
2962 /* Does controller have VPD for logical volume status? */
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05002963 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
Stephen M. Cameron98465902014-02-21 16:25:00 -06002964 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06002965
2966 /* Get the size of the VPD return buffer */
2967 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2968 buf, HPSA_VPD_HEADER_SZ);
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05002969 if (rc != 0)
Stephen M. Cameron98465902014-02-21 16:25:00 -06002970 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06002971 size = buf[3];
2972
2973 /* Now get the whole VPD buffer */
2974 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2975 buf, size + HPSA_VPD_HEADER_SZ);
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05002976 if (rc != 0)
Stephen M. Cameron98465902014-02-21 16:25:00 -06002977 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06002978 status = buf[4]; /* status byte */
2979
2980 kfree(buf);
2981 return status;
2982exit_failed:
2983 kfree(buf);
2984 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2985}
2986
2987/* Determine offline status of a volume.
2988 * Return either:
2989 * 0 (not offline)
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05002990 * 0xff (offline for unknown reasons)
Stephen M. Cameron98465902014-02-21 16:25:00 -06002991 * # (integer code indicating one of several NOT READY states
2992 * describing why a volume is to be kept offline)
2993 */
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05002994static int hpsa_volume_offline(struct ctlr_info *h,
Stephen M. Cameron98465902014-02-21 16:25:00 -06002995 unsigned char scsi3addr[])
2996{
2997 struct CommandList *c;
Stephen Cameron9437ac42015-04-23 09:32:16 -05002998 unsigned char *sense;
2999 u8 sense_key, asc, ascq;
3000 int sense_len;
Webb Scales25163bd2015-04-23 09:32:00 -05003001 int rc, ldstat = 0;
Stephen M. Cameron98465902014-02-21 16:25:00 -06003002 u16 cmd_status;
3003 u8 scsi_status;
3004#define ASC_LUN_NOT_READY 0x04
3005#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3006#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3007
3008 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05003009
Stephen M. Cameron98465902014-02-21 16:25:00 -06003010 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
Webb Scales25163bd2015-04-23 09:32:00 -05003011 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3012 if (rc) {
3013 cmd_free(h, c);
3014 return 0;
3015 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06003016 sense = c->err_info->SenseInfo;
Stephen Cameron9437ac42015-04-23 09:32:16 -05003017 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3018 sense_len = sizeof(c->err_info->SenseInfo);
3019 else
3020 sense_len = c->err_info->SenseLen;
3021 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
Stephen M. Cameron98465902014-02-21 16:25:00 -06003022 cmd_status = c->err_info->CommandStatus;
3023 scsi_status = c->err_info->ScsiStatus;
3024 cmd_free(h, c);
3025 /* Is the volume 'not ready'? */
3026 if (cmd_status != CMD_TARGET_STATUS ||
3027 scsi_status != SAM_STAT_CHECK_CONDITION ||
3028 sense_key != NOT_READY ||
3029 asc != ASC_LUN_NOT_READY) {
3030 return 0;
3031 }
3032
3033 /* Determine the reason for not ready state */
3034 ldstat = hpsa_get_volume_status(h, scsi3addr);
3035
3036 /* Keep volume offline in certain cases: */
3037 switch (ldstat) {
3038 case HPSA_LV_UNDERGOING_ERASE:
3039 case HPSA_LV_UNDERGOING_RPI:
3040 case HPSA_LV_PENDING_RPI:
3041 case HPSA_LV_ENCRYPTED_NO_KEY:
3042 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3043 case HPSA_LV_UNDERGOING_ENCRYPTION:
3044 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3045 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3046 return ldstat;
3047 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3048 /* If VPD status page isn't available,
3049 * use ASC/ASCQ to determine state
3050 */
3051 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3052 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3053 return ldstat;
3054 break;
3055 default:
3056 break;
3057 }
3058 return 0;
3059}
3060
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003061/*
3062 * Find out if a logical device supports aborts by simply trying one.
3063 * Smart Array may claim not to support aborts on logical drives, but
3064 * if a MSA2000 * is connected, the drives on that will be presented
3065 * by the Smart Array as logical drives, and aborts may be sent to
3066 * those devices successfully. So the simplest way to find out is
3067 * to simply try an abort and see how the device responds.
3068 */
3069static int hpsa_device_supports_aborts(struct ctlr_info *h,
3070 unsigned char *scsi3addr)
3071{
3072 struct CommandList *c;
3073 struct ErrorInfo *ei;
3074 int rc = 0;
3075
3076 u64 tag = (u64) -1; /* bogus tag */
3077
3078 /* Assume that physical devices support aborts */
3079 if (!is_logical_dev_addr_mode(scsi3addr))
3080 return 1;
3081
3082 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05003083
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003084 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3085 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3086 /* no unmap needed here because no data xfer. */
3087 ei = c->err_info;
3088 switch (ei->CommandStatus) {
3089 case CMD_INVALID:
3090 rc = 0;
3091 break;
3092 case CMD_UNABORTABLE:
3093 case CMD_ABORT_FAILED:
3094 rc = 1;
3095 break;
Stephen Cameron9437ac42015-04-23 09:32:16 -05003096 case CMD_TMF_STATUS:
3097 rc = hpsa_evaluate_tmf_status(h, c);
3098 break;
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003099 default:
3100 rc = 0;
3101 break;
3102 }
3103 cmd_free(h, c);
3104 return rc;
3105}
3106
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003107static int hpsa_update_device_info(struct ctlr_info *h,
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003108 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3109 unsigned char *is_OBDR_device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003110{
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003111
3112#define OBDR_SIG_OFFSET 43
3113#define OBDR_TAPE_SIG "$DR-10"
3114#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3115#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3116
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06003117 unsigned char *inq_buff;
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003118 unsigned char *obdr_sig;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003119
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06003120 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003121 if (!inq_buff)
3122 goto bail_out;
3123
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003124 /* Do an inquiry to the device to see what it is. */
3125 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3126 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3127 /* Inquiry failed (msg printed already) */
3128 dev_err(&h->pdev->dev,
3129 "hpsa_update_device_info: inquiry failed\n");
3130 goto bail_out;
3131 }
3132
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003133 this_device->devtype = (inq_buff[0] & 0x1f);
3134 memcpy(this_device->scsi3addr, scsi3addr, 8);
3135 memcpy(this_device->vendor, &inq_buff[8],
3136 sizeof(this_device->vendor));
3137 memcpy(this_device->model, &inq_buff[16],
3138 sizeof(this_device->model));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003139 memset(this_device->device_id, 0,
3140 sizeof(this_device->device_id));
3141 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
3142 sizeof(this_device->device_id));
3143
3144 if (this_device->devtype == TYPE_DISK &&
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003145 is_logical_dev_addr_mode(scsi3addr)) {
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05003146 int volume_offline;
3147
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003148 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003149 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3150 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05003151 volume_offline = hpsa_volume_offline(h, scsi3addr);
3152 if (volume_offline < 0 || volume_offline > 0xff)
3153 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3154 this_device->volume_offline = volume_offline & 0xff;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003155 } else {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003156 this_device->raid_level = RAID_UNKNOWN;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003157 this_device->offload_config = 0;
3158 this_device->offload_enabled = 0;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003159 this_device->offload_to_be_enabled = 0;
Joe Handzika3144e02015-04-23 09:32:59 -05003160 this_device->hba_ioaccel_enabled = 0;
Stephen M. Cameron98465902014-02-21 16:25:00 -06003161 this_device->volume_offline = 0;
Don Brace03383732015-01-23 16:43:30 -06003162 this_device->queue_depth = h->nr_cmds;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003163 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003164
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003165 if (is_OBDR_device) {
3166 /* See if this is a One-Button-Disaster-Recovery device
3167 * by looking for "$DR-10" at offset 43 in inquiry data.
3168 */
3169 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3170 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3171 strncmp(obdr_sig, OBDR_TAPE_SIG,
3172 OBDR_SIG_LEN) == 0);
3173 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003174 kfree(inq_buff);
3175 return 0;
3176
3177bail_out:
3178 kfree(inq_buff);
3179 return 1;
3180}
3181
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003182static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3183 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3184{
3185 unsigned long flags;
3186 int rc, entry;
3187 /*
3188 * See if this device supports aborts. If we already know
3189 * the device, we already know if it supports aborts, otherwise
3190 * we have to find out if it supports aborts by trying one.
3191 */
3192 spin_lock_irqsave(&h->devlock, flags);
3193 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3194 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3195 entry >= 0 && entry < h->ndevices) {
3196 dev->supports_aborts = h->dev[entry]->supports_aborts;
3197 spin_unlock_irqrestore(&h->devlock, flags);
3198 } else {
3199 spin_unlock_irqrestore(&h->devlock, flags);
3200 dev->supports_aborts =
3201 hpsa_device_supports_aborts(h, scsi3addr);
3202 if (dev->supports_aborts < 0)
3203 dev->supports_aborts = 0;
3204 }
3205}
3206
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003207static unsigned char *ext_target_model[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003208 "MSA2012",
3209 "MSA2024",
3210 "MSA2312",
3211 "MSA2324",
Stephen M. Cameronfda38512011-05-03 15:00:07 -05003212 "P2000 G3 SAS",
Stephen M. Camerone06c8e52013-09-23 13:33:56 -05003213 "MSA 2040 SAS",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003214 NULL,
3215};
3216
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003217static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003218{
3219 int i;
3220
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003221 for (i = 0; ext_target_model[i]; i++)
3222 if (strncmp(device->model, ext_target_model[i],
3223 strlen(ext_target_model[i])) == 0)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003224 return 1;
3225 return 0;
3226}
3227
3228/* Helper function to assign bus, target, lun mapping of devices.
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003229 * Puts non-external target logical volumes on bus 0, external target logical
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003230 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
3231 * Logical drive target and lun are assigned at this time, but
3232 * physical device lun and target assignment are deferred (assigned
3233 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3234 */
3235static void figure_bus_target_lun(struct ctlr_info *h,
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003236 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003237{
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003238 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003239
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003240 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3241 /* physical device, target and lun filled in later */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003242 if (is_hba_lunid(lunaddrbytes))
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003243 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003244 else
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003245 /* defer target, lun assignment for physical devices */
3246 hpsa_set_bus_target_lun(device, 2, -1, -1);
3247 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003248 }
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003249 /* It's a logical device */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003250 if (is_ext_target(h, device)) {
3251 /* external target way, put logicals on bus 1
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003252 * and match target/lun numbers box
3253 * reports, other smart array, bus 0, target 0, match lunid
3254 */
3255 hpsa_set_bus_target_lun(device,
3256 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
3257 return;
3258 }
3259 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003260}
3261
3262/*
3263 * If there is no lun 0 on a target, linux won't find any devices.
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003264 * For the external targets (arrays), we have to manually detect the enclosure
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003265 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3266 * it for some reason. *tmpdevice is the target we're adding,
3267 * this_device is a pointer into the current element of currentsd[]
3268 * that we're building up in update_scsi_devices(), below.
3269 * lunzerobits is a bitmap that tracks which targets already have a
3270 * lun 0 assigned.
3271 * Returns 1 if an enclosure was added, 0 if not.
3272 */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003273static int add_ext_target_dev(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003274 struct hpsa_scsi_dev_t *tmpdevice,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003275 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003276 unsigned long lunzerobits[], int *n_ext_target_devs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003277{
3278 unsigned char scsi3addr[8];
3279
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003280 if (test_bit(tmpdevice->target, lunzerobits))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003281 return 0; /* There is already a lun 0 on this target. */
3282
3283 if (!is_logical_dev_addr_mode(lunaddrbytes))
3284 return 0; /* It's the logical targets that may lack lun 0. */
3285
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003286 if (!is_ext_target(h, tmpdevice))
3287 return 0; /* Only external target devices have this problem. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003288
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003289 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003290 return 0;
3291
Stephen M. Cameronc4f8a292011-01-07 10:55:43 -06003292 memset(scsi3addr, 0, 8);
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003293 scsi3addr[3] = tmpdevice->target;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003294 if (is_hba_lunid(scsi3addr))
3295 return 0; /* Don't add the RAID controller here. */
3296
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003297 if (is_scsi_rev_5(h))
3298 return 0; /* p1210m doesn't need to do this. */
3299
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003300 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
Scott Teelaca4a522012-01-19 14:01:19 -06003301 dev_warn(&h->pdev->dev, "Maximum number of external "
3302 "target devices exceeded. Check your hardware "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003303 "configuration.");
3304 return 0;
3305 }
3306
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003307 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003308 return 0;
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003309 (*n_ext_target_devs)++;
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003310 hpsa_set_bus_target_lun(this_device,
3311 tmpdevice->bus, tmpdevice->target, 0);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003312 hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003313 set_bit(tmpdevice->target, lunzerobits);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003314 return 1;
3315}
3316
3317/*
Scott Teel54b6e9e2014-02-18 13:56:45 -06003318 * Get address of physical disk used for an ioaccel2 mode command:
3319 * 1. Extract ioaccel2 handle from the command.
3320 * 2. Find a matching ioaccel2 handle from list of physical disks.
3321 * 3. Return:
3322 * 1 and set scsi3addr to address of matching physical
3323 * 0 if no matching physical disk was found.
3324 */
3325static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3326 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3327{
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003328 struct io_accel2_cmd *c2 =
3329 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3330 unsigned long flags;
Scott Teel54b6e9e2014-02-18 13:56:45 -06003331 int i;
Scott Teel54b6e9e2014-02-18 13:56:45 -06003332
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003333 spin_lock_irqsave(&h->devlock, flags);
3334 for (i = 0; i < h->ndevices; i++)
3335 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3336 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3337 sizeof(h->dev[i]->scsi3addr));
3338 spin_unlock_irqrestore(&h->devlock, flags);
3339 return 1;
3340 }
3341 spin_unlock_irqrestore(&h->devlock, flags);
3342 return 0;
Scott Teel54b6e9e2014-02-18 13:56:45 -06003343}
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003344
Scott Teel54b6e9e2014-02-18 13:56:45 -06003345/*
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003346 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3347 * logdev. The number of luns in physdev and logdev are returned in
3348 * *nphysicals and *nlogicals, respectively.
3349 * Returns 0 on success, -1 otherwise.
3350 */
3351static int hpsa_gather_lun_info(struct ctlr_info *h,
Don Brace03383732015-01-23 16:43:30 -06003352 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003353 struct ReportLUNdata *logdev, u32 *nlogicals)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003354{
Don Brace03383732015-01-23 16:43:30 -06003355 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003356 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3357 return -1;
3358 }
Don Brace03383732015-01-23 16:43:30 -06003359 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003360 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
Don Brace03383732015-01-23 16:43:30 -06003361 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3362 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003363 *nphysicals = HPSA_MAX_PHYS_LUN;
3364 }
Don Brace03383732015-01-23 16:43:30 -06003365 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003366 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3367 return -1;
3368 }
Stephen M. Cameron6df1e952010-02-04 08:42:19 -06003369 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003370 /* Reject Logicals in excess of our max capability. */
3371 if (*nlogicals > HPSA_MAX_LUN) {
3372 dev_warn(&h->pdev->dev,
3373 "maximum logical LUNs (%d) exceeded. "
3374 "%d LUNs ignored.\n", HPSA_MAX_LUN,
3375 *nlogicals - HPSA_MAX_LUN);
3376 *nlogicals = HPSA_MAX_LUN;
3377 }
3378 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3379 dev_warn(&h->pdev->dev,
3380 "maximum logical + physical LUNs (%d) exceeded. "
3381 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3382 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3383 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3384 }
3385 return 0;
3386}
3387
Don Brace42a91642014-11-14 17:26:27 -06003388static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3389 int i, int nphysicals, int nlogicals,
Matt Gatesa93aa1f2014-02-18 13:55:07 -06003390 struct ReportExtendedLUNdata *physdev_list,
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003391 struct ReportLUNdata *logdev_list)
3392{
3393 /* Helper function, figure out where the LUN ID info is coming from
3394 * given index i, lists of physical and logical devices, where in
3395 * the list the raid controller is supposed to appear (first or last)
3396 */
3397
3398 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3399 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3400
3401 if (i == raid_ctlr_position)
3402 return RAID_CTLR_LUNID;
3403
3404 if (i < logicals_start)
Stephen M. Camerond5b5d962014-05-29 10:53:34 -05003405 return &physdev_list->LUN[i -
3406 (raid_ctlr_position == 0)].lunid[0];
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003407
3408 if (i < last_device)
3409 return &logdev_list->LUN[i - nphysicals -
3410 (raid_ctlr_position == 0)][0];
3411 BUG();
3412 return NULL;
3413}
3414
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003415static int hpsa_hba_mode_enabled(struct ctlr_info *h)
3416{
3417 int rc;
Joe Handzik6e8e8082014-05-15 15:44:42 -05003418 int hba_mode_enabled;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003419 struct bmic_controller_parameters *ctlr_params;
3420 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3421 GFP_KERNEL);
3422
3423 if (!ctlr_params)
Joe Handzik96444fb2014-05-15 15:44:47 -05003424 return -ENOMEM;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003425 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3426 sizeof(struct bmic_controller_parameters));
Joe Handzik96444fb2014-05-15 15:44:47 -05003427 if (rc) {
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003428 kfree(ctlr_params);
Joe Handzik96444fb2014-05-15 15:44:47 -05003429 return rc;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003430 }
Joe Handzik6e8e8082014-05-15 15:44:42 -05003431
3432 hba_mode_enabled =
3433 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3434 kfree(ctlr_params);
3435 return hba_mode_enabled;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003436}
3437
Don Brace03383732015-01-23 16:43:30 -06003438/* get physical drive ioaccel handle and queue depth */
3439static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3440 struct hpsa_scsi_dev_t *dev,
3441 u8 *lunaddrbytes,
3442 struct bmic_identify_physical_device *id_phys)
3443{
3444 int rc;
3445 struct ext_report_lun_entry *rle =
3446 (struct ext_report_lun_entry *) lunaddrbytes;
3447
3448 dev->ioaccel_handle = rle->ioaccel_handle;
Joe Handzika3144e02015-04-23 09:32:59 -05003449 if (PHYS_IOACCEL(lunaddrbytes) && dev->ioaccel_handle)
3450 dev->hba_ioaccel_enabled = 1;
Don Brace03383732015-01-23 16:43:30 -06003451 memset(id_phys, 0, sizeof(*id_phys));
3452 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3453 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3454 sizeof(*id_phys));
3455 if (!rc)
3456 /* Reserve space for FW operations */
3457#define DRIVE_CMDS_RESERVED_FOR_FW 2
3458#define DRIVE_QUEUE_DEPTH 7
3459 dev->queue_depth =
3460 le16_to_cpu(id_phys->current_queue_depth_limit) -
3461 DRIVE_CMDS_RESERVED_FOR_FW;
3462 else
3463 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3464 atomic_set(&dev->ioaccel_cmds_out, 0);
3465}
3466
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003467static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3468{
3469 /* the idea here is we could get notified
3470 * that some devices have changed, so we do a report
3471 * physical luns and report logical luns cmd, and adjust
3472 * our list of devices accordingly.
3473 *
3474 * The scsi3addr's of devices won't change so long as the
3475 * adapter is not reset. That means we can rescan and
3476 * tell which devices we already know about, vs. new
3477 * devices, vs. disappearing devices.
3478 */
Matt Gatesa93aa1f2014-02-18 13:55:07 -06003479 struct ReportExtendedLUNdata *physdev_list = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003480 struct ReportLUNdata *logdev_list = NULL;
Don Brace03383732015-01-23 16:43:30 -06003481 struct bmic_identify_physical_device *id_phys = NULL;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003482 u32 nphysicals = 0;
3483 u32 nlogicals = 0;
3484 u32 ndev_allocated = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003485 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3486 int ncurrent = 0;
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003487 int i, n_ext_target_devs, ndevs_to_allocate;
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003488 int raid_ctlr_position;
Joe Handzik2bbf5c72014-05-21 11:16:01 -05003489 int rescan_hba_mode;
Scott Teelaca4a522012-01-19 14:01:19 -06003490 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003491
Scott Teelcfe5bad2011-10-26 16:21:07 -05003492 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameron92084712014-11-14 17:26:54 -06003493 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3494 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003495 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
Don Brace03383732015-01-23 16:43:30 -06003496 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003497
Don Brace03383732015-01-23 16:43:30 -06003498 if (!currentsd || !physdev_list || !logdev_list ||
3499 !tmpdevice || !id_phys) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003500 dev_err(&h->pdev->dev, "out of memory\n");
3501 goto out;
3502 }
3503 memset(lunzerobits, 0, sizeof(lunzerobits));
3504
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003505 rescan_hba_mode = hpsa_hba_mode_enabled(h);
Joe Handzik96444fb2014-05-15 15:44:47 -05003506 if (rescan_hba_mode < 0)
3507 goto out;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003508
3509 if (!h->hba_mode_enabled && rescan_hba_mode)
3510 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3511 else if (h->hba_mode_enabled && !rescan_hba_mode)
3512 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3513
3514 h->hba_mode_enabled = rescan_hba_mode;
3515
Don Brace03383732015-01-23 16:43:30 -06003516 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3517 logdev_list, &nlogicals))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003518 goto out;
3519
Scott Teelaca4a522012-01-19 14:01:19 -06003520 /* We might see up to the maximum number of logical and physical disks
3521 * plus external target devices, and a device for the local RAID
3522 * controller.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003523 */
Scott Teelaca4a522012-01-19 14:01:19 -06003524 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003525
3526 /* Allocate the per device structures */
3527 for (i = 0; i < ndevs_to_allocate; i++) {
Scott Teelb7ec0212011-10-26 16:21:12 -05003528 if (i >= HPSA_MAX_DEVICES) {
3529 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3530 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3531 ndevs_to_allocate - HPSA_MAX_DEVICES);
3532 break;
3533 }
3534
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003535 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3536 if (!currentsd[i]) {
3537 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3538 __FILE__, __LINE__);
3539 goto out;
3540 }
3541 ndev_allocated++;
3542 }
3543
Stephen M. Cameron86452912014-05-29 10:53:49 -05003544 if (is_scsi_rev_5(h))
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003545 raid_ctlr_position = 0;
3546 else
3547 raid_ctlr_position = nphysicals + nlogicals;
3548
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003549 /* adjust our table of devices */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003550 n_ext_target_devs = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003551 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003552 u8 *lunaddrbytes, is_OBDR = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003553
3554 /* Figure out where the LUN ID info is coming from */
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003555 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3556 i, nphysicals, nlogicals, physdev_list, logdev_list);
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003557
3558 /* skip masked non-disk devices */
3559 if (MASKED_DEVICE(lunaddrbytes))
3560 if (i < nphysicals + (raid_ctlr_position == 0) &&
3561 NON_DISK_PHYS_DEV(lunaddrbytes))
3562 continue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003563
3564 /* Get device type, vendor, model, device id */
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003565 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3566 &is_OBDR))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003567 continue; /* skip it if we can't talk to it. */
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003568 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05003569 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003570 this_device = currentsd[ncurrent];
3571
3572 /*
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003573 * For external target devices, we have to insert a LUN 0 which
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003574 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3575 * is nonetheless an enclosure device there. We have to
3576 * present that otherwise linux won't find anything if
3577 * there is no lun 0.
3578 */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003579 if (add_ext_target_dev(h, tmpdevice, this_device,
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003580 lunaddrbytes, lunzerobits,
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003581 &n_ext_target_devs)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003582 ncurrent++;
3583 this_device = currentsd[ncurrent];
3584 }
3585
3586 *this_device = *tmpdevice;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003587
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003588 /* do not expose masked devices */
3589 if (MASKED_DEVICE(lunaddrbytes) &&
3590 i < nphysicals + (raid_ctlr_position == 0)) {
3591 if (h->hba_mode_enabled)
3592 dev_warn(&h->pdev->dev,
3593 "Masked physical device detected\n");
3594 this_device->expose_state = HPSA_DO_NOT_EXPOSE;
3595 } else {
3596 this_device->expose_state =
3597 HPSA_SG_ATTACH | HPSA_ULD_ATTACH;
3598 }
3599
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003600 switch (this_device->devtype) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003601 case TYPE_ROM:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003602 /* We don't *really* support actual CD-ROM devices,
3603 * just "One Button Disaster Recovery" tape drive
3604 * which temporarily pretends to be a CD-ROM drive.
3605 * So we check that the device is really an OBDR tape
3606 * device by checking for "$DR-10" in bytes 43-48 of
3607 * the inquiry data.
3608 */
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003609 if (is_OBDR)
3610 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003611 break;
3612 case TYPE_DISK:
Joe Handzikecf418d12015-04-23 09:33:04 -05003613 if (i >= nphysicals) {
3614 ncurrent++;
3615 break;
3616 }
3617
3618 if (h->hba_mode_enabled)
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003619 /* never use raid mapper in HBA mode */
3620 this_device->offload_enabled = 0;
Joe Handzikecf418d12015-04-23 09:33:04 -05003621 else if (!(h->transMethod & CFGTBL_Trans_io_accel1 ||
3622 h->transMethod & CFGTBL_Trans_io_accel2))
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003623 break;
Joe Handzikecf418d12015-04-23 09:33:04 -05003624
3625 hpsa_get_ioaccel_drive_info(h, this_device,
3626 lunaddrbytes, id_phys);
3627 atomic_set(&this_device->ioaccel_cmds_out, 0);
3628 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003629 break;
3630 case TYPE_TAPE:
3631 case TYPE_MEDIUM_CHANGER:
3632 ncurrent++;
3633 break;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05003634 case TYPE_ENCLOSURE:
3635 if (h->hba_mode_enabled)
3636 ncurrent++;
3637 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003638 case TYPE_RAID:
3639 /* Only present the Smartarray HBA as a RAID controller.
3640 * If it's a RAID controller other than the HBA itself
3641 * (an external RAID controller, MSA500 or similar)
3642 * don't present it.
3643 */
3644 if (!is_hba_lunid(lunaddrbytes))
3645 break;
3646 ncurrent++;
3647 break;
3648 default:
3649 break;
3650 }
Scott Teelcfe5bad2011-10-26 16:21:07 -05003651 if (ncurrent >= HPSA_MAX_DEVICES)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003652 break;
3653 }
3654 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3655out:
3656 kfree(tmpdevice);
3657 for (i = 0; i < ndev_allocated; i++)
3658 kfree(currentsd[i]);
3659 kfree(currentsd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003660 kfree(physdev_list);
3661 kfree(logdev_list);
Don Brace03383732015-01-23 16:43:30 -06003662 kfree(id_phys);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003663}
3664
Webb Scalesec5cbf02015-01-23 16:44:45 -06003665static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3666 struct scatterlist *sg)
3667{
3668 u64 addr64 = (u64) sg_dma_address(sg);
3669 unsigned int len = sg_dma_len(sg);
3670
3671 desc->Addr = cpu_to_le64(addr64);
3672 desc->Len = cpu_to_le32(len);
3673 desc->Ext = 0;
3674}
3675
Webb Scalesc7ee65b2015-01-23 16:42:17 -06003676/*
3677 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003678 * dma mapping and fills in the scatter gather entries of the
3679 * hpsa command, cp.
3680 */
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003681static int hpsa_scatter_gather(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003682 struct CommandList *cp,
3683 struct scsi_cmnd *cmd)
3684{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003685 struct scatterlist *sg;
Webb Scalesb3a7ba72015-04-23 09:34:27 -05003686 int use_sg, i, sg_limit, chained, last_sg;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003687 struct SGDescriptor *curr_sg;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003688
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003689 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003690
3691 use_sg = scsi_dma_map(cmd);
3692 if (use_sg < 0)
3693 return use_sg;
3694
3695 if (!use_sg)
3696 goto sglist_finished;
3697
Webb Scalesb3a7ba72015-04-23 09:34:27 -05003698 /*
3699 * If the number of entries is greater than the max for a single list,
3700 * then we have a chained list; we will set up all but one entry in the
3701 * first list (the last entry is saved for link information);
3702 * otherwise, we don't have a chained list and we'll set up at each of
3703 * the entries in the one list.
3704 */
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003705 curr_sg = cp->SG;
Webb Scalesb3a7ba72015-04-23 09:34:27 -05003706 chained = use_sg > h->max_cmd_sg_entries;
3707 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
3708 last_sg = scsi_sg_count(cmd) - 1;
3709 scsi_for_each_sg(cmd, sg, sg_limit, i) {
Webb Scalesec5cbf02015-01-23 16:44:45 -06003710 hpsa_set_sg_descriptor(curr_sg, sg);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003711 curr_sg++;
3712 }
Webb Scalesec5cbf02015-01-23 16:44:45 -06003713
Webb Scalesb3a7ba72015-04-23 09:34:27 -05003714 if (chained) {
3715 /*
3716 * Continue with the chained list. Set curr_sg to the chained
3717 * list. Modify the limit to the total count less the entries
3718 * we've already set up. Resume the scan at the list entry
3719 * where the previous loop left off.
3720 */
3721 curr_sg = h->cmd_sg_list[cp->cmdindex];
3722 sg_limit = use_sg - sg_limit;
3723 for_each_sg(sg, sg, sg_limit, i) {
3724 hpsa_set_sg_descriptor(curr_sg, sg);
3725 curr_sg++;
3726 }
3727 }
3728
Webb Scalesec5cbf02015-01-23 16:44:45 -06003729 /* Back the pointer up to the last entry and mark it as "last". */
Webb Scalesb3a7ba72015-04-23 09:34:27 -05003730 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003731
3732 if (use_sg + chained > h->maxSG)
3733 h->maxSG = use_sg + chained;
3734
3735 if (chained) {
3736 cp->Header.SGList = h->max_cmd_sg_entries;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003737 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06003738 if (hpsa_map_sg_chain_block(h, cp)) {
3739 scsi_dma_unmap(cmd);
3740 return -1;
3741 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003742 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003743 }
3744
3745sglist_finished:
3746
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003747 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
Webb Scalesc7ee65b2015-01-23 16:42:17 -06003748 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003749 return 0;
3750}
3751
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003752#define IO_ACCEL_INELIGIBLE (1)
3753static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3754{
3755 int is_write = 0;
3756 u32 block;
3757 u32 block_cnt;
3758
3759 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3760 switch (cdb[0]) {
3761 case WRITE_6:
3762 case WRITE_12:
3763 is_write = 1;
3764 case READ_6:
3765 case READ_12:
3766 if (*cdb_len == 6) {
3767 block = (((u32) cdb[2]) << 8) | cdb[3];
3768 block_cnt = cdb[4];
3769 } else {
3770 BUG_ON(*cdb_len != 12);
3771 block = (((u32) cdb[2]) << 24) |
3772 (((u32) cdb[3]) << 16) |
3773 (((u32) cdb[4]) << 8) |
3774 cdb[5];
3775 block_cnt =
3776 (((u32) cdb[6]) << 24) |
3777 (((u32) cdb[7]) << 16) |
3778 (((u32) cdb[8]) << 8) |
3779 cdb[9];
3780 }
3781 if (block_cnt > 0xffff)
3782 return IO_ACCEL_INELIGIBLE;
3783
3784 cdb[0] = is_write ? WRITE_10 : READ_10;
3785 cdb[1] = 0;
3786 cdb[2] = (u8) (block >> 24);
3787 cdb[3] = (u8) (block >> 16);
3788 cdb[4] = (u8) (block >> 8);
3789 cdb[5] = (u8) (block);
3790 cdb[6] = 0;
3791 cdb[7] = (u8) (block_cnt >> 8);
3792 cdb[8] = (u8) (block_cnt);
3793 cdb[9] = 0;
3794 *cdb_len = 10;
3795 break;
3796 }
3797 return 0;
3798}
3799
Scott Teelc3497752014-02-18 13:56:34 -06003800static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003801 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -06003802 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
Matt Gatese1f7de02014-02-18 13:55:17 -06003803{
3804 struct scsi_cmnd *cmd = c->scsi_cmd;
Matt Gatese1f7de02014-02-18 13:55:17 -06003805 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3806 unsigned int len;
3807 unsigned int total_len = 0;
3808 struct scatterlist *sg;
3809 u64 addr64;
3810 int use_sg, i;
3811 struct SGDescriptor *curr_sg;
3812 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3813
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003814 /* TODO: implement chaining support */
Don Brace03383732015-01-23 16:43:30 -06003815 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3816 atomic_dec(&phys_disk->ioaccel_cmds_out);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003817 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06003818 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003819
Matt Gatese1f7de02014-02-18 13:55:17 -06003820 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3821
Don Brace03383732015-01-23 16:43:30 -06003822 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3823 atomic_dec(&phys_disk->ioaccel_cmds_out);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003824 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06003825 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003826
Matt Gatese1f7de02014-02-18 13:55:17 -06003827 c->cmd_type = CMD_IOACCEL1;
3828
3829 /* Adjust the DMA address to point to the accelerated command buffer */
3830 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3831 (c->cmdindex * sizeof(*cp));
3832 BUG_ON(c->busaddr & 0x0000007F);
3833
3834 use_sg = scsi_dma_map(cmd);
Don Brace03383732015-01-23 16:43:30 -06003835 if (use_sg < 0) {
3836 atomic_dec(&phys_disk->ioaccel_cmds_out);
Matt Gatese1f7de02014-02-18 13:55:17 -06003837 return use_sg;
Don Brace03383732015-01-23 16:43:30 -06003838 }
Matt Gatese1f7de02014-02-18 13:55:17 -06003839
3840 if (use_sg) {
3841 curr_sg = cp->SG;
3842 scsi_for_each_sg(cmd, sg, use_sg, i) {
3843 addr64 = (u64) sg_dma_address(sg);
3844 len = sg_dma_len(sg);
3845 total_len += len;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003846 curr_sg->Addr = cpu_to_le64(addr64);
3847 curr_sg->Len = cpu_to_le32(len);
3848 curr_sg->Ext = cpu_to_le32(0);
Matt Gatese1f7de02014-02-18 13:55:17 -06003849 curr_sg++;
3850 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003851 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
Matt Gatese1f7de02014-02-18 13:55:17 -06003852
3853 switch (cmd->sc_data_direction) {
3854 case DMA_TO_DEVICE:
3855 control |= IOACCEL1_CONTROL_DATA_OUT;
3856 break;
3857 case DMA_FROM_DEVICE:
3858 control |= IOACCEL1_CONTROL_DATA_IN;
3859 break;
3860 case DMA_NONE:
3861 control |= IOACCEL1_CONTROL_NODATAXFER;
3862 break;
3863 default:
3864 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3865 cmd->sc_data_direction);
3866 BUG();
3867 break;
3868 }
3869 } else {
3870 control |= IOACCEL1_CONTROL_NODATAXFER;
3871 }
3872
Scott Teelc3497752014-02-18 13:56:34 -06003873 c->Header.SGList = use_sg;
Matt Gatese1f7de02014-02-18 13:55:17 -06003874 /* Fill out the command structure to submit */
Don Brace2b08b3e2015-01-23 16:41:09 -06003875 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
3876 cp->transfer_len = cpu_to_le32(total_len);
3877 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
3878 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
3879 cp->control = cpu_to_le32(control);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003880 memcpy(cp->CDB, cdb, cdb_len);
3881 memcpy(cp->CISS_LUN, scsi3addr, 8);
Scott Teelc3497752014-02-18 13:56:34 -06003882 /* Tag was already set at init time. */
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003883 enqueue_cmd_and_start_io(h, c);
Matt Gatese1f7de02014-02-18 13:55:17 -06003884 return 0;
3885}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003886
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003887/*
3888 * Queue a command directly to a device behind the controller using the
3889 * I/O accelerator path.
3890 */
3891static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3892 struct CommandList *c)
3893{
3894 struct scsi_cmnd *cmd = c->scsi_cmd;
3895 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3896
Don Brace03383732015-01-23 16:43:30 -06003897 c->phys_disk = dev;
3898
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003899 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
Don Brace03383732015-01-23 16:43:30 -06003900 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003901}
3902
Scott Teeldd0e19f2014-02-18 13:57:31 -06003903/*
3904 * Set encryption parameters for the ioaccel2 request
3905 */
3906static void set_encrypt_ioaccel2(struct ctlr_info *h,
3907 struct CommandList *c, struct io_accel2_cmd *cp)
3908{
3909 struct scsi_cmnd *cmd = c->scsi_cmd;
3910 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3911 struct raid_map_data *map = &dev->raid_map;
3912 u64 first_block;
3913
Scott Teeldd0e19f2014-02-18 13:57:31 -06003914 /* Are we doing encryption on this device */
Don Brace2b08b3e2015-01-23 16:41:09 -06003915 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
Scott Teeldd0e19f2014-02-18 13:57:31 -06003916 return;
3917 /* Set the data encryption key index. */
3918 cp->dekindex = map->dekindex;
3919
3920 /* Set the encryption enable flag, encoded into direction field. */
3921 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3922
3923 /* Set encryption tweak values based on logical block address
3924 * If block size is 512, tweak value is LBA.
3925 * For other block sizes, tweak is (LBA * block size)/ 512)
3926 */
3927 switch (cmd->cmnd[0]) {
3928 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3929 case WRITE_6:
3930 case READ_6:
Don Brace2b08b3e2015-01-23 16:41:09 -06003931 first_block = get_unaligned_be16(&cmd->cmnd[2]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06003932 break;
3933 case WRITE_10:
3934 case READ_10:
Scott Teeldd0e19f2014-02-18 13:57:31 -06003935 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3936 case WRITE_12:
3937 case READ_12:
Don Brace2b08b3e2015-01-23 16:41:09 -06003938 first_block = get_unaligned_be32(&cmd->cmnd[2]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06003939 break;
3940 case WRITE_16:
3941 case READ_16:
Don Brace2b08b3e2015-01-23 16:41:09 -06003942 first_block = get_unaligned_be64(&cmd->cmnd[2]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06003943 break;
3944 default:
3945 dev_err(&h->pdev->dev,
Don Brace2b08b3e2015-01-23 16:41:09 -06003946 "ERROR: %s: size (0x%x) not supported for encryption\n",
3947 __func__, cmd->cmnd[0]);
Scott Teeldd0e19f2014-02-18 13:57:31 -06003948 BUG();
3949 break;
3950 }
Don Brace2b08b3e2015-01-23 16:41:09 -06003951
3952 if (le32_to_cpu(map->volume_blk_size) != 512)
3953 first_block = first_block *
3954 le32_to_cpu(map->volume_blk_size)/512;
3955
3956 cp->tweak_lower = cpu_to_le32(first_block);
3957 cp->tweak_upper = cpu_to_le32(first_block >> 32);
Scott Teeldd0e19f2014-02-18 13:57:31 -06003958}
3959
Scott Teelc3497752014-02-18 13:56:34 -06003960static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3961 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -06003962 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
Scott Teelc3497752014-02-18 13:56:34 -06003963{
3964 struct scsi_cmnd *cmd = c->scsi_cmd;
3965 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3966 struct ioaccel2_sg_element *curr_sg;
3967 int use_sg, i;
3968 struct scatterlist *sg;
3969 u64 addr64;
3970 u32 len;
3971 u32 total_len = 0;
3972
Webb Scalesd9a729f2015-04-23 09:33:27 -05003973 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
Scott Teelc3497752014-02-18 13:56:34 -06003974
Don Brace03383732015-01-23 16:43:30 -06003975 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3976 atomic_dec(&phys_disk->ioaccel_cmds_out);
Scott Teelc3497752014-02-18 13:56:34 -06003977 return IO_ACCEL_INELIGIBLE;
Don Brace03383732015-01-23 16:43:30 -06003978 }
3979
Scott Teelc3497752014-02-18 13:56:34 -06003980 c->cmd_type = CMD_IOACCEL2;
3981 /* Adjust the DMA address to point to the accelerated command buffer */
3982 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3983 (c->cmdindex * sizeof(*cp));
3984 BUG_ON(c->busaddr & 0x0000007F);
3985
3986 memset(cp, 0, sizeof(*cp));
3987 cp->IU_type = IOACCEL2_IU_TYPE;
3988
3989 use_sg = scsi_dma_map(cmd);
Don Brace03383732015-01-23 16:43:30 -06003990 if (use_sg < 0) {
3991 atomic_dec(&phys_disk->ioaccel_cmds_out);
Scott Teelc3497752014-02-18 13:56:34 -06003992 return use_sg;
Don Brace03383732015-01-23 16:43:30 -06003993 }
Scott Teelc3497752014-02-18 13:56:34 -06003994
3995 if (use_sg) {
Scott Teelc3497752014-02-18 13:56:34 -06003996 curr_sg = cp->sg;
Webb Scalesd9a729f2015-04-23 09:33:27 -05003997 if (use_sg > h->ioaccel_maxsg) {
3998 addr64 = le64_to_cpu(
3999 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4000 curr_sg->address = cpu_to_le64(addr64);
4001 curr_sg->length = 0;
4002 curr_sg->reserved[0] = 0;
4003 curr_sg->reserved[1] = 0;
4004 curr_sg->reserved[2] = 0;
4005 curr_sg->chain_indicator = 0x80;
4006
4007 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4008 }
Scott Teelc3497752014-02-18 13:56:34 -06004009 scsi_for_each_sg(cmd, sg, use_sg, i) {
4010 addr64 = (u64) sg_dma_address(sg);
4011 len = sg_dma_len(sg);
4012 total_len += len;
4013 curr_sg->address = cpu_to_le64(addr64);
4014 curr_sg->length = cpu_to_le32(len);
4015 curr_sg->reserved[0] = 0;
4016 curr_sg->reserved[1] = 0;
4017 curr_sg->reserved[2] = 0;
4018 curr_sg->chain_indicator = 0;
4019 curr_sg++;
4020 }
4021
4022 switch (cmd->sc_data_direction) {
4023 case DMA_TO_DEVICE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06004024 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4025 cp->direction |= IOACCEL2_DIR_DATA_OUT;
Scott Teelc3497752014-02-18 13:56:34 -06004026 break;
4027 case DMA_FROM_DEVICE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06004028 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4029 cp->direction |= IOACCEL2_DIR_DATA_IN;
Scott Teelc3497752014-02-18 13:56:34 -06004030 break;
4031 case DMA_NONE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06004032 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4033 cp->direction |= IOACCEL2_DIR_NO_DATA;
Scott Teelc3497752014-02-18 13:56:34 -06004034 break;
4035 default:
4036 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4037 cmd->sc_data_direction);
4038 BUG();
4039 break;
4040 }
4041 } else {
Scott Teeldd0e19f2014-02-18 13:57:31 -06004042 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4043 cp->direction |= IOACCEL2_DIR_NO_DATA;
Scott Teelc3497752014-02-18 13:56:34 -06004044 }
Scott Teeldd0e19f2014-02-18 13:57:31 -06004045
4046 /* Set encryption parameters, if necessary */
4047 set_encrypt_ioaccel2(h, c, cp);
4048
Don Brace2b08b3e2015-01-23 16:41:09 -06004049 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
Don Bracef2405db2015-01-23 16:43:09 -06004050 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
Scott Teelc3497752014-02-18 13:56:34 -06004051 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
Scott Teelc3497752014-02-18 13:56:34 -06004052
Scott Teelc3497752014-02-18 13:56:34 -06004053 cp->data_len = cpu_to_le32(total_len);
4054 cp->err_ptr = cpu_to_le64(c->busaddr +
4055 offsetof(struct io_accel2_cmd, error_data));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004056 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
Scott Teelc3497752014-02-18 13:56:34 -06004057
Webb Scalesd9a729f2015-04-23 09:33:27 -05004058 /* fill in sg elements */
4059 if (use_sg > h->ioaccel_maxsg) {
4060 cp->sg_count = 1;
4061 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4062 atomic_dec(&phys_disk->ioaccel_cmds_out);
4063 scsi_dma_unmap(cmd);
4064 return -1;
4065 }
4066 } else
4067 cp->sg_count = (u8) use_sg;
4068
Scott Teelc3497752014-02-18 13:56:34 -06004069 enqueue_cmd_and_start_io(h, c);
4070 return 0;
4071}
4072
4073/*
4074 * Queue a command to the correct I/O accelerator path.
4075 */
4076static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4077 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
Don Brace03383732015-01-23 16:43:30 -06004078 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
Scott Teelc3497752014-02-18 13:56:34 -06004079{
Don Brace03383732015-01-23 16:43:30 -06004080 /* Try to honor the device's queue depth */
4081 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4082 phys_disk->queue_depth) {
4083 atomic_dec(&phys_disk->ioaccel_cmds_out);
4084 return IO_ACCEL_INELIGIBLE;
4085 }
Scott Teelc3497752014-02-18 13:56:34 -06004086 if (h->transMethod & CFGTBL_Trans_io_accel1)
4087 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
Don Brace03383732015-01-23 16:43:30 -06004088 cdb, cdb_len, scsi3addr,
4089 phys_disk);
Scott Teelc3497752014-02-18 13:56:34 -06004090 else
4091 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
Don Brace03383732015-01-23 16:43:30 -06004092 cdb, cdb_len, scsi3addr,
4093 phys_disk);
Scott Teelc3497752014-02-18 13:56:34 -06004094}
4095
Scott Teel6b80b182014-02-18 13:56:55 -06004096static void raid_map_helper(struct raid_map_data *map,
4097 int offload_to_mirror, u32 *map_index, u32 *current_group)
4098{
4099 if (offload_to_mirror == 0) {
4100 /* use physical disk in the first mirrored group. */
Don Brace2b08b3e2015-01-23 16:41:09 -06004101 *map_index %= le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004102 return;
4103 }
4104 do {
4105 /* determine mirror group that *map_index indicates */
Don Brace2b08b3e2015-01-23 16:41:09 -06004106 *current_group = *map_index /
4107 le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004108 if (offload_to_mirror == *current_group)
4109 continue;
Don Brace2b08b3e2015-01-23 16:41:09 -06004110 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
Scott Teel6b80b182014-02-18 13:56:55 -06004111 /* select map index from next group */
Don Brace2b08b3e2015-01-23 16:41:09 -06004112 *map_index += le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004113 (*current_group)++;
4114 } else {
4115 /* select map index from first group */
Don Brace2b08b3e2015-01-23 16:41:09 -06004116 *map_index %= le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004117 *current_group = 0;
4118 }
4119 } while (offload_to_mirror != *current_group);
4120}
4121
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004122/*
4123 * Attempt to perform offload RAID mapping for a logical volume I/O.
4124 */
4125static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4126 struct CommandList *c)
4127{
4128 struct scsi_cmnd *cmd = c->scsi_cmd;
4129 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4130 struct raid_map_data *map = &dev->raid_map;
4131 struct raid_map_disk_data *dd = &map->data[0];
4132 int is_write = 0;
4133 u32 map_index;
4134 u64 first_block, last_block;
4135 u32 block_cnt;
4136 u32 blocks_per_row;
4137 u64 first_row, last_row;
4138 u32 first_row_offset, last_row_offset;
4139 u32 first_column, last_column;
Scott Teel6b80b182014-02-18 13:56:55 -06004140 u64 r0_first_row, r0_last_row;
4141 u32 r5or6_blocks_per_row;
4142 u64 r5or6_first_row, r5or6_last_row;
4143 u32 r5or6_first_row_offset, r5or6_last_row_offset;
4144 u32 r5or6_first_column, r5or6_last_column;
4145 u32 total_disks_per_row;
4146 u32 stripesize;
4147 u32 first_group, last_group, current_group;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004148 u32 map_row;
4149 u32 disk_handle;
4150 u64 disk_block;
4151 u32 disk_block_cnt;
4152 u8 cdb[16];
4153 u8 cdb_len;
Don Brace2b08b3e2015-01-23 16:41:09 -06004154 u16 strip_size;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004155#if BITS_PER_LONG == 32
4156 u64 tmpdiv;
4157#endif
Scott Teel6b80b182014-02-18 13:56:55 -06004158 int offload_to_mirror;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004159
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004160 /* check for valid opcode, get LBA and block count */
4161 switch (cmd->cmnd[0]) {
4162 case WRITE_6:
4163 is_write = 1;
4164 case READ_6:
4165 first_block =
4166 (((u64) cmd->cmnd[2]) << 8) |
4167 cmd->cmnd[3];
4168 block_cnt = cmd->cmnd[4];
Stephen M. Cameron3fa89a02014-07-03 10:18:14 -05004169 if (block_cnt == 0)
4170 block_cnt = 256;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004171 break;
4172 case WRITE_10:
4173 is_write = 1;
4174 case READ_10:
4175 first_block =
4176 (((u64) cmd->cmnd[2]) << 24) |
4177 (((u64) cmd->cmnd[3]) << 16) |
4178 (((u64) cmd->cmnd[4]) << 8) |
4179 cmd->cmnd[5];
4180 block_cnt =
4181 (((u32) cmd->cmnd[7]) << 8) |
4182 cmd->cmnd[8];
4183 break;
4184 case WRITE_12:
4185 is_write = 1;
4186 case READ_12:
4187 first_block =
4188 (((u64) cmd->cmnd[2]) << 24) |
4189 (((u64) cmd->cmnd[3]) << 16) |
4190 (((u64) cmd->cmnd[4]) << 8) |
4191 cmd->cmnd[5];
4192 block_cnt =
4193 (((u32) cmd->cmnd[6]) << 24) |
4194 (((u32) cmd->cmnd[7]) << 16) |
4195 (((u32) cmd->cmnd[8]) << 8) |
4196 cmd->cmnd[9];
4197 break;
4198 case WRITE_16:
4199 is_write = 1;
4200 case READ_16:
4201 first_block =
4202 (((u64) cmd->cmnd[2]) << 56) |
4203 (((u64) cmd->cmnd[3]) << 48) |
4204 (((u64) cmd->cmnd[4]) << 40) |
4205 (((u64) cmd->cmnd[5]) << 32) |
4206 (((u64) cmd->cmnd[6]) << 24) |
4207 (((u64) cmd->cmnd[7]) << 16) |
4208 (((u64) cmd->cmnd[8]) << 8) |
4209 cmd->cmnd[9];
4210 block_cnt =
4211 (((u32) cmd->cmnd[10]) << 24) |
4212 (((u32) cmd->cmnd[11]) << 16) |
4213 (((u32) cmd->cmnd[12]) << 8) |
4214 cmd->cmnd[13];
4215 break;
4216 default:
4217 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4218 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004219 last_block = first_block + block_cnt - 1;
4220
4221 /* check for write to non-RAID-0 */
4222 if (is_write && dev->raid_level != 0)
4223 return IO_ACCEL_INELIGIBLE;
4224
4225 /* check for invalid block or wraparound */
Don Brace2b08b3e2015-01-23 16:41:09 -06004226 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4227 last_block < first_block)
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004228 return IO_ACCEL_INELIGIBLE;
4229
4230 /* calculate stripe information for the request */
Don Brace2b08b3e2015-01-23 16:41:09 -06004231 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4232 le16_to_cpu(map->strip_size);
4233 strip_size = le16_to_cpu(map->strip_size);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004234#if BITS_PER_LONG == 32
4235 tmpdiv = first_block;
4236 (void) do_div(tmpdiv, blocks_per_row);
4237 first_row = tmpdiv;
4238 tmpdiv = last_block;
4239 (void) do_div(tmpdiv, blocks_per_row);
4240 last_row = tmpdiv;
4241 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4242 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4243 tmpdiv = first_row_offset;
Don Brace2b08b3e2015-01-23 16:41:09 -06004244 (void) do_div(tmpdiv, strip_size);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004245 first_column = tmpdiv;
4246 tmpdiv = last_row_offset;
Don Brace2b08b3e2015-01-23 16:41:09 -06004247 (void) do_div(tmpdiv, strip_size);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004248 last_column = tmpdiv;
4249#else
4250 first_row = first_block / blocks_per_row;
4251 last_row = last_block / blocks_per_row;
4252 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4253 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
Don Brace2b08b3e2015-01-23 16:41:09 -06004254 first_column = first_row_offset / strip_size;
4255 last_column = last_row_offset / strip_size;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004256#endif
4257
4258 /* if this isn't a single row/column then give to the controller */
4259 if ((first_row != last_row) || (first_column != last_column))
4260 return IO_ACCEL_INELIGIBLE;
4261
4262 /* proceeding with driver mapping */
Don Brace2b08b3e2015-01-23 16:41:09 -06004263 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4264 le16_to_cpu(map->metadata_disks_per_row);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004265 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
Don Brace2b08b3e2015-01-23 16:41:09 -06004266 le16_to_cpu(map->row_cnt);
Scott Teel6b80b182014-02-18 13:56:55 -06004267 map_index = (map_row * total_disks_per_row) + first_column;
4268
4269 switch (dev->raid_level) {
4270 case HPSA_RAID_0:
4271 break; /* nothing special to do */
4272 case HPSA_RAID_1:
4273 /* Handles load balance across RAID 1 members.
4274 * (2-drive R1 and R10 with even # of drives.)
4275 * Appropriate for SSDs, not optimal for HDDs
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004276 */
Don Brace2b08b3e2015-01-23 16:41:09 -06004277 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004278 if (dev->offload_to_mirror)
Don Brace2b08b3e2015-01-23 16:41:09 -06004279 map_index += le16_to_cpu(map->data_disks_per_row);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004280 dev->offload_to_mirror = !dev->offload_to_mirror;
Scott Teel6b80b182014-02-18 13:56:55 -06004281 break;
4282 case HPSA_RAID_ADM:
4283 /* Handles N-way mirrors (R1-ADM)
4284 * and R10 with # of drives divisible by 3.)
4285 */
Don Brace2b08b3e2015-01-23 16:41:09 -06004286 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
Scott Teel6b80b182014-02-18 13:56:55 -06004287
4288 offload_to_mirror = dev->offload_to_mirror;
4289 raid_map_helper(map, offload_to_mirror,
4290 &map_index, &current_group);
4291 /* set mirror group to use next time */
4292 offload_to_mirror =
Don Brace2b08b3e2015-01-23 16:41:09 -06004293 (offload_to_mirror >=
4294 le16_to_cpu(map->layout_map_count) - 1)
Scott Teel6b80b182014-02-18 13:56:55 -06004295 ? 0 : offload_to_mirror + 1;
Scott Teel6b80b182014-02-18 13:56:55 -06004296 dev->offload_to_mirror = offload_to_mirror;
4297 /* Avoid direct use of dev->offload_to_mirror within this
4298 * function since multiple threads might simultaneously
4299 * increment it beyond the range of dev->layout_map_count -1.
4300 */
4301 break;
4302 case HPSA_RAID_5:
4303 case HPSA_RAID_6:
Don Brace2b08b3e2015-01-23 16:41:09 -06004304 if (le16_to_cpu(map->layout_map_count) <= 1)
Scott Teel6b80b182014-02-18 13:56:55 -06004305 break;
4306
4307 /* Verify first and last block are in same RAID group */
4308 r5or6_blocks_per_row =
Don Brace2b08b3e2015-01-23 16:41:09 -06004309 le16_to_cpu(map->strip_size) *
4310 le16_to_cpu(map->data_disks_per_row);
Scott Teel6b80b182014-02-18 13:56:55 -06004311 BUG_ON(r5or6_blocks_per_row == 0);
Don Brace2b08b3e2015-01-23 16:41:09 -06004312 stripesize = r5or6_blocks_per_row *
4313 le16_to_cpu(map->layout_map_count);
Scott Teel6b80b182014-02-18 13:56:55 -06004314#if BITS_PER_LONG == 32
4315 tmpdiv = first_block;
4316 first_group = do_div(tmpdiv, stripesize);
4317 tmpdiv = first_group;
4318 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4319 first_group = tmpdiv;
4320 tmpdiv = last_block;
4321 last_group = do_div(tmpdiv, stripesize);
4322 tmpdiv = last_group;
4323 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4324 last_group = tmpdiv;
4325#else
4326 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4327 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
Scott Teel6b80b182014-02-18 13:56:55 -06004328#endif
Stephen M. Cameron000ff7c2014-03-13 17:12:50 -05004329 if (first_group != last_group)
Scott Teel6b80b182014-02-18 13:56:55 -06004330 return IO_ACCEL_INELIGIBLE;
4331
4332 /* Verify request is in a single row of RAID 5/6 */
4333#if BITS_PER_LONG == 32
4334 tmpdiv = first_block;
4335 (void) do_div(tmpdiv, stripesize);
4336 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4337 tmpdiv = last_block;
4338 (void) do_div(tmpdiv, stripesize);
4339 r5or6_last_row = r0_last_row = tmpdiv;
4340#else
4341 first_row = r5or6_first_row = r0_first_row =
4342 first_block / stripesize;
4343 r5or6_last_row = r0_last_row = last_block / stripesize;
4344#endif
4345 if (r5or6_first_row != r5or6_last_row)
4346 return IO_ACCEL_INELIGIBLE;
4347
4348
4349 /* Verify request is in a single column */
4350#if BITS_PER_LONG == 32
4351 tmpdiv = first_block;
4352 first_row_offset = do_div(tmpdiv, stripesize);
4353 tmpdiv = first_row_offset;
4354 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4355 r5or6_first_row_offset = first_row_offset;
4356 tmpdiv = last_block;
4357 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4358 tmpdiv = r5or6_last_row_offset;
4359 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4360 tmpdiv = r5or6_first_row_offset;
4361 (void) do_div(tmpdiv, map->strip_size);
4362 first_column = r5or6_first_column = tmpdiv;
4363 tmpdiv = r5or6_last_row_offset;
4364 (void) do_div(tmpdiv, map->strip_size);
4365 r5or6_last_column = tmpdiv;
4366#else
4367 first_row_offset = r5or6_first_row_offset =
4368 (u32)((first_block % stripesize) %
4369 r5or6_blocks_per_row);
4370
4371 r5or6_last_row_offset =
4372 (u32)((last_block % stripesize) %
4373 r5or6_blocks_per_row);
4374
4375 first_column = r5or6_first_column =
Don Brace2b08b3e2015-01-23 16:41:09 -06004376 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
Scott Teel6b80b182014-02-18 13:56:55 -06004377 r5or6_last_column =
Don Brace2b08b3e2015-01-23 16:41:09 -06004378 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
Scott Teel6b80b182014-02-18 13:56:55 -06004379#endif
4380 if (r5or6_first_column != r5or6_last_column)
4381 return IO_ACCEL_INELIGIBLE;
4382
4383 /* Request is eligible */
4384 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
Don Brace2b08b3e2015-01-23 16:41:09 -06004385 le16_to_cpu(map->row_cnt);
Scott Teel6b80b182014-02-18 13:56:55 -06004386
4387 map_index = (first_group *
Don Brace2b08b3e2015-01-23 16:41:09 -06004388 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
Scott Teel6b80b182014-02-18 13:56:55 -06004389 (map_row * total_disks_per_row) + first_column;
4390 break;
4391 default:
4392 return IO_ACCEL_INELIGIBLE;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004393 }
Scott Teel6b80b182014-02-18 13:56:55 -06004394
Stephen Cameron07543e02015-01-23 16:44:14 -06004395 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4396 return IO_ACCEL_INELIGIBLE;
4397
Don Brace03383732015-01-23 16:43:30 -06004398 c->phys_disk = dev->phys_disk[map_index];
4399
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004400 disk_handle = dd[map_index].ioaccel_handle;
Don Brace2b08b3e2015-01-23 16:41:09 -06004401 disk_block = le64_to_cpu(map->disk_starting_blk) +
4402 first_row * le16_to_cpu(map->strip_size) +
4403 (first_row_offset - first_column *
4404 le16_to_cpu(map->strip_size));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004405 disk_block_cnt = block_cnt;
4406
4407 /* handle differing logical/physical block sizes */
4408 if (map->phys_blk_shift) {
4409 disk_block <<= map->phys_blk_shift;
4410 disk_block_cnt <<= map->phys_blk_shift;
4411 }
4412 BUG_ON(disk_block_cnt > 0xffff);
4413
4414 /* build the new CDB for the physical disk I/O */
4415 if (disk_block > 0xffffffff) {
4416 cdb[0] = is_write ? WRITE_16 : READ_16;
4417 cdb[1] = 0;
4418 cdb[2] = (u8) (disk_block >> 56);
4419 cdb[3] = (u8) (disk_block >> 48);
4420 cdb[4] = (u8) (disk_block >> 40);
4421 cdb[5] = (u8) (disk_block >> 32);
4422 cdb[6] = (u8) (disk_block >> 24);
4423 cdb[7] = (u8) (disk_block >> 16);
4424 cdb[8] = (u8) (disk_block >> 8);
4425 cdb[9] = (u8) (disk_block);
4426 cdb[10] = (u8) (disk_block_cnt >> 24);
4427 cdb[11] = (u8) (disk_block_cnt >> 16);
4428 cdb[12] = (u8) (disk_block_cnt >> 8);
4429 cdb[13] = (u8) (disk_block_cnt);
4430 cdb[14] = 0;
4431 cdb[15] = 0;
4432 cdb_len = 16;
4433 } else {
4434 cdb[0] = is_write ? WRITE_10 : READ_10;
4435 cdb[1] = 0;
4436 cdb[2] = (u8) (disk_block >> 24);
4437 cdb[3] = (u8) (disk_block >> 16);
4438 cdb[4] = (u8) (disk_block >> 8);
4439 cdb[5] = (u8) (disk_block);
4440 cdb[6] = 0;
4441 cdb[7] = (u8) (disk_block_cnt >> 8);
4442 cdb[8] = (u8) (disk_block_cnt);
4443 cdb[9] = 0;
4444 cdb_len = 10;
4445 }
4446 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
Don Brace03383732015-01-23 16:43:30 -06004447 dev->scsi3addr,
4448 dev->phys_disk[map_index]);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004449}
4450
Webb Scales25163bd2015-04-23 09:32:00 -05004451/*
4452 * Submit commands down the "normal" RAID stack path
4453 * All callers to hpsa_ciss_submit must check lockup_detected
4454 * beforehand, before (opt.) and after calling cmd_alloc
4455 */
Stephen Cameron574f05d2015-01-23 16:43:20 -06004456static int hpsa_ciss_submit(struct ctlr_info *h,
4457 struct CommandList *c, struct scsi_cmnd *cmd,
4458 unsigned char scsi3addr[])
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004459{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004460 cmd->host_scribble = (unsigned char *) c;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004461 c->cmd_type = CMD_SCSI;
4462 c->scsi_cmd = cmd;
4463 c->Header.ReplyQueue = 0; /* unused in simple mode */
4464 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
Don Bracef2405db2015-01-23 16:43:09 -06004465 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004466
4467 /* Fill in the request block... */
4468
4469 c->Request.Timeout = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004470 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4471 c->Request.CDBLen = cmd->cmd_len;
4472 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004473 switch (cmd->sc_data_direction) {
4474 case DMA_TO_DEVICE:
Stephen M. Camerona505b862014-11-14 17:27:04 -06004475 c->Request.type_attr_dir =
4476 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004477 break;
4478 case DMA_FROM_DEVICE:
Stephen M. Camerona505b862014-11-14 17:27:04 -06004479 c->Request.type_attr_dir =
4480 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004481 break;
4482 case DMA_NONE:
Stephen M. Camerona505b862014-11-14 17:27:04 -06004483 c->Request.type_attr_dir =
4484 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004485 break;
4486 case DMA_BIDIRECTIONAL:
4487 /* This can happen if a buggy application does a scsi passthru
4488 * and sets both inlen and outlen to non-zero. ( see
4489 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4490 */
4491
Stephen M. Camerona505b862014-11-14 17:27:04 -06004492 c->Request.type_attr_dir =
4493 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004494 /* This is technically wrong, and hpsa controllers should
4495 * reject it with CMD_INVALID, which is the most correct
4496 * response, but non-fibre backends appear to let it
4497 * slide by, and give the same results as if this field
4498 * were set correctly. Either way is acceptable for
4499 * our purposes here.
4500 */
4501
4502 break;
4503
4504 default:
4505 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4506 cmd->sc_data_direction);
4507 BUG();
4508 break;
4509 }
4510
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06004511 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
Webb Scales73153fe2015-04-23 09:35:04 -05004512 hpsa_cmd_resolve_and_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004513 return SCSI_MLQUEUE_HOST_BUSY;
4514 }
4515 enqueue_cmd_and_start_io(h, c);
4516 /* the cmd'll come back via intr handler in complete_scsi_command() */
4517 return 0;
4518}
4519
Stephen Cameron360c73b2015-04-23 09:32:32 -05004520static void hpsa_cmd_init(struct ctlr_info *h, int index,
4521 struct CommandList *c)
4522{
4523 dma_addr_t cmd_dma_handle, err_dma_handle;
4524
4525 /* Zero out all of commandlist except the last field, refcount */
4526 memset(c, 0, offsetof(struct CommandList, refcount));
4527 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
4528 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4529 c->err_info = h->errinfo_pool + index;
4530 memset(c->err_info, 0, sizeof(*c->err_info));
4531 err_dma_handle = h->errinfo_pool_dhandle
4532 + index * sizeof(*c->err_info);
4533 c->cmdindex = index;
4534 c->busaddr = (u32) cmd_dma_handle;
4535 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4536 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4537 c->h = h;
Webb Scalesa58e7e52015-04-23 09:34:16 -05004538 c->scsi_cmd = SCSI_CMD_IDLE;
Stephen Cameron360c73b2015-04-23 09:32:32 -05004539}
4540
4541static void hpsa_preinitialize_commands(struct ctlr_info *h)
4542{
4543 int i;
4544
4545 for (i = 0; i < h->nr_cmds; i++) {
4546 struct CommandList *c = h->cmd_pool + i;
4547
4548 hpsa_cmd_init(h, i, c);
4549 atomic_set(&c->refcount, 0);
4550 }
4551}
4552
4553static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
4554 struct CommandList *c)
4555{
4556 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4557
Webb Scales73153fe2015-04-23 09:35:04 -05004558 BUG_ON(c->cmdindex != index);
4559
Stephen Cameron360c73b2015-04-23 09:32:32 -05004560 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4561 memset(c->err_info, 0, sizeof(*c->err_info));
4562 c->busaddr = (u32) cmd_dma_handle;
4563}
4564
Webb Scales592a0ad2015-04-23 09:32:48 -05004565static int hpsa_ioaccel_submit(struct ctlr_info *h,
4566 struct CommandList *c, struct scsi_cmnd *cmd,
4567 unsigned char *scsi3addr)
4568{
4569 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4570 int rc = IO_ACCEL_INELIGIBLE;
4571
4572 cmd->host_scribble = (unsigned char *) c;
4573
4574 if (dev->offload_enabled) {
4575 hpsa_cmd_init(h, c->cmdindex, c);
4576 c->cmd_type = CMD_SCSI;
4577 c->scsi_cmd = cmd;
4578 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4579 if (rc < 0) /* scsi_dma_map failed. */
4580 rc = SCSI_MLQUEUE_HOST_BUSY;
Joe Handzika3144e02015-04-23 09:32:59 -05004581 } else if (dev->hba_ioaccel_enabled) {
Webb Scales592a0ad2015-04-23 09:32:48 -05004582 hpsa_cmd_init(h, c->cmdindex, c);
4583 c->cmd_type = CMD_SCSI;
4584 c->scsi_cmd = cmd;
4585 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4586 if (rc < 0) /* scsi_dma_map failed. */
4587 rc = SCSI_MLQUEUE_HOST_BUSY;
4588 }
4589 return rc;
4590}
4591
Don Brace080ef1c2015-01-23 16:43:25 -06004592static void hpsa_command_resubmit_worker(struct work_struct *work)
4593{
4594 struct scsi_cmnd *cmd;
4595 struct hpsa_scsi_dev_t *dev;
Webb Scales8a0ff922015-04-23 09:34:11 -05004596 struct CommandList *c = container_of(work, struct CommandList, work);
Don Brace080ef1c2015-01-23 16:43:25 -06004597
4598 cmd = c->scsi_cmd;
4599 dev = cmd->device->hostdata;
4600 if (!dev) {
4601 cmd->result = DID_NO_CONNECT << 16;
Webb Scales8a0ff922015-04-23 09:34:11 -05004602 return hpsa_cmd_free_and_done(c->h, c, cmd);
Don Brace080ef1c2015-01-23 16:43:25 -06004603 }
Webb Scalesa58e7e52015-04-23 09:34:16 -05004604 if (c->abort_pending)
4605 return hpsa_cmd_abort_and_free(c->h, c, cmd);
Webb Scales592a0ad2015-04-23 09:32:48 -05004606 if (c->cmd_type == CMD_IOACCEL2) {
4607 struct ctlr_info *h = c->h;
4608 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
4609 int rc;
4610
4611 if (c2->error_data.serv_response ==
4612 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
4613 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
4614 if (rc == 0)
4615 return;
4616 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4617 /*
4618 * If we get here, it means dma mapping failed.
4619 * Try again via scsi mid layer, which will
4620 * then get SCSI_MLQUEUE_HOST_BUSY.
4621 */
4622 cmd->result = DID_IMM_RETRY << 16;
Webb Scales8a0ff922015-04-23 09:34:11 -05004623 return hpsa_cmd_free_and_done(h, c, cmd);
Webb Scales592a0ad2015-04-23 09:32:48 -05004624 }
4625 /* else, fall thru and resubmit down CISS path */
4626 }
4627 }
Stephen Cameron360c73b2015-04-23 09:32:32 -05004628 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
Don Brace080ef1c2015-01-23 16:43:25 -06004629 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4630 /*
4631 * If we get here, it means dma mapping failed. Try
4632 * again via scsi mid layer, which will then get
4633 * SCSI_MLQUEUE_HOST_BUSY.
Webb Scales592a0ad2015-04-23 09:32:48 -05004634 *
4635 * hpsa_ciss_submit will have already freed c
4636 * if it encountered a dma mapping failure.
Don Brace080ef1c2015-01-23 16:43:25 -06004637 */
4638 cmd->result = DID_IMM_RETRY << 16;
4639 cmd->scsi_done(cmd);
4640 }
4641}
4642
Stephen Cameron574f05d2015-01-23 16:43:20 -06004643/* Running in struct Scsi_Host->host_lock less mode */
4644static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4645{
4646 struct ctlr_info *h;
4647 struct hpsa_scsi_dev_t *dev;
4648 unsigned char scsi3addr[8];
4649 struct CommandList *c;
4650 int rc = 0;
4651
4652 /* Get the ptr to our adapter structure out of cmd->host. */
4653 h = sdev_to_hba(cmd->device);
Webb Scales73153fe2015-04-23 09:35:04 -05004654
4655 BUG_ON(cmd->request->tag < 0);
4656
Stephen Cameron574f05d2015-01-23 16:43:20 -06004657 dev = cmd->device->hostdata;
4658 if (!dev) {
4659 cmd->result = DID_NO_CONNECT << 16;
4660 cmd->scsi_done(cmd);
4661 return 0;
4662 }
Webb Scales73153fe2015-04-23 09:35:04 -05004663
Stephen Cameron574f05d2015-01-23 16:43:20 -06004664 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4665
4666 if (unlikely(lockup_detected(h))) {
Webb Scales25163bd2015-04-23 09:32:00 -05004667 cmd->result = DID_NO_CONNECT << 16;
Stephen Cameron574f05d2015-01-23 16:43:20 -06004668 cmd->scsi_done(cmd);
4669 return 0;
4670 }
Webb Scales73153fe2015-04-23 09:35:04 -05004671 c = cmd_tagged_alloc(h, cmd);
Stephen Cameron574f05d2015-01-23 16:43:20 -06004672
Stephen Cameron407863c2015-01-23 16:44:19 -06004673 /*
4674 * Call alternate submit routine for I/O accelerated commands.
Stephen Cameron574f05d2015-01-23 16:43:20 -06004675 * Retries always go down the normal I/O path.
4676 */
4677 if (likely(cmd->retries == 0 &&
4678 cmd->request->cmd_type == REQ_TYPE_FS &&
4679 h->acciopath_status)) {
Webb Scales592a0ad2015-04-23 09:32:48 -05004680 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
4681 if (rc == 0)
4682 return 0;
4683 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
Webb Scales73153fe2015-04-23 09:35:04 -05004684 hpsa_cmd_resolve_and_free(h, c);
Webb Scales592a0ad2015-04-23 09:32:48 -05004685 return SCSI_MLQUEUE_HOST_BUSY;
Stephen Cameron574f05d2015-01-23 16:43:20 -06004686 }
4687 }
4688 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4689}
4690
Webb Scales8ebc9242015-01-23 16:44:50 -06004691static void hpsa_scan_complete(struct ctlr_info *h)
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004692{
4693 unsigned long flags;
4694
Webb Scales8ebc9242015-01-23 16:44:50 -06004695 spin_lock_irqsave(&h->scan_lock, flags);
4696 h->scan_finished = 1;
4697 wake_up_all(&h->scan_wait_queue);
4698 spin_unlock_irqrestore(&h->scan_lock, flags);
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004699}
4700
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004701static void hpsa_scan_start(struct Scsi_Host *sh)
4702{
4703 struct ctlr_info *h = shost_to_hba(sh);
4704 unsigned long flags;
4705
Webb Scales8ebc9242015-01-23 16:44:50 -06004706 /*
4707 * Don't let rescans be initiated on a controller known to be locked
4708 * up. If the controller locks up *during* a rescan, that thread is
4709 * probably hosed, but at least we can prevent new rescan threads from
4710 * piling up on a locked up controller.
4711 */
4712 if (unlikely(lockup_detected(h)))
4713 return hpsa_scan_complete(h);
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004714
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004715 /* wait until any scan already in progress is finished. */
4716 while (1) {
4717 spin_lock_irqsave(&h->scan_lock, flags);
4718 if (h->scan_finished)
4719 break;
4720 spin_unlock_irqrestore(&h->scan_lock, flags);
4721 wait_event(h->scan_wait_queue, h->scan_finished);
4722 /* Note: We don't need to worry about a race between this
4723 * thread and driver unload because the midlayer will
4724 * have incremented the reference count, so unload won't
4725 * happen if we're in here.
4726 */
4727 }
4728 h->scan_finished = 0; /* mark scan as in progress */
4729 spin_unlock_irqrestore(&h->scan_lock, flags);
4730
Webb Scales8ebc9242015-01-23 16:44:50 -06004731 if (unlikely(lockup_detected(h)))
4732 return hpsa_scan_complete(h);
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004733
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004734 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4735
Webb Scales8ebc9242015-01-23 16:44:50 -06004736 hpsa_scan_complete(h);
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004737}
4738
Don Brace7c0a0222015-01-23 16:41:30 -06004739static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4740{
Don Brace03383732015-01-23 16:43:30 -06004741 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4742
4743 if (!logical_drive)
4744 return -ENODEV;
Don Brace7c0a0222015-01-23 16:41:30 -06004745
4746 if (qdepth < 1)
4747 qdepth = 1;
Don Brace03383732015-01-23 16:43:30 -06004748 else if (qdepth > logical_drive->queue_depth)
4749 qdepth = logical_drive->queue_depth;
4750
4751 return scsi_change_queue_depth(sdev, qdepth);
Don Brace7c0a0222015-01-23 16:41:30 -06004752}
4753
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004754static int hpsa_scan_finished(struct Scsi_Host *sh,
4755 unsigned long elapsed_time)
4756{
4757 struct ctlr_info *h = shost_to_hba(sh);
4758 unsigned long flags;
4759 int finished;
4760
4761 spin_lock_irqsave(&h->scan_lock, flags);
4762 finished = h->scan_finished;
4763 spin_unlock_irqrestore(&h->scan_lock, flags);
4764 return finished;
4765}
4766
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004767static void hpsa_unregister_scsi(struct ctlr_info *h)
4768{
4769 /* we are being forcibly unloaded, and may not refuse. */
4770 scsi_remove_host(h->scsi_host);
4771 scsi_host_put(h->scsi_host);
4772 h->scsi_host = NULL;
4773}
4774
4775static int hpsa_register_scsi(struct ctlr_info *h)
4776{
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004777 struct Scsi_Host *sh;
4778 int error;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004779
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004780 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4781 if (sh == NULL)
4782 goto fail;
4783
4784 sh->io_port = 0;
4785 sh->n_io_port = 0;
4786 sh->this_id = -1;
4787 sh->max_channel = 3;
4788 sh->max_cmd_len = MAX_COMMAND_SIZE;
4789 sh->max_lun = HPSA_MAX_LUN;
4790 sh->max_id = HPSA_MAX_LUN;
Stephen Cameron41ce4c32015-04-23 09:31:47 -05004791 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
Don Brace03383732015-01-23 16:43:30 -06004792 sh->cmd_per_lun = sh->can_queue;
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004793 sh->sg_tablesize = h->maxsgentries;
4794 h->scsi_host = sh;
4795 sh->hostdata[0] = (unsigned long) h;
4796 sh->irq = h->intr[h->intr_mode];
4797 sh->unique_id = sh->irq;
Webb Scales73153fe2015-04-23 09:35:04 -05004798 error = scsi_init_shared_tag_map(sh, sh->can_queue);
4799 if (error) {
4800 dev_err(&h->pdev->dev,
4801 "%s: scsi_init_shared_tag_map failed for controller %d\n",
4802 __func__, h->ctlr);
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004803 goto fail_host_put;
Webb Scales73153fe2015-04-23 09:35:04 -05004804 }
4805 error = scsi_add_host(sh, &h->pdev->dev);
4806 if (error) {
4807 dev_err(&h->pdev->dev, "%s: scsi_add_host failed for controller %d\n",
4808 __func__, h->ctlr);
4809 goto fail_host_put;
4810 }
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004811 scsi_scan_host(sh);
4812 return 0;
4813
4814 fail_host_put:
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004815 scsi_host_put(sh);
4816 return error;
4817 fail:
4818 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4819 " failed for controller %d\n", __func__, h->ctlr);
4820 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004821}
4822
Webb Scalesb69324f2015-04-23 09:34:22 -05004823/*
Webb Scales73153fe2015-04-23 09:35:04 -05004824 * The block layer has already gone to the trouble of picking out a unique,
4825 * small-integer tag for this request. We use an offset from that value as
4826 * an index to select our command block. (The offset allows us to reserve the
4827 * low-numbered entries for our own uses.)
4828 */
4829static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
4830{
4831 int idx = scmd->request->tag;
4832
4833 if (idx < 0)
4834 return idx;
4835
4836 /* Offset to leave space for internal cmds. */
4837 return idx += HPSA_NRESERVED_CMDS;
4838}
4839
4840/*
Webb Scalesb69324f2015-04-23 09:34:22 -05004841 * Send a TEST_UNIT_READY command to the specified LUN using the specified
4842 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
4843 */
4844static int hpsa_send_test_unit_ready(struct ctlr_info *h,
4845 struct CommandList *c, unsigned char lunaddr[],
4846 int reply_queue)
4847{
4848 int rc;
4849
4850 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4851 (void) fill_cmd(c, TEST_UNIT_READY, h,
4852 NULL, 0, 0, lunaddr, TYPE_CMD);
4853 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
4854 if (rc)
4855 return rc;
4856 /* no unmap needed here because no data xfer. */
4857
4858 /* Check if the unit is already ready. */
4859 if (c->err_info->CommandStatus == CMD_SUCCESS)
4860 return 0;
4861
4862 /*
4863 * The first command sent after reset will receive "unit attention" to
4864 * indicate that the LUN has been reset...this is actually what we're
4865 * looking for (but, success is good too).
4866 */
4867 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4868 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4869 (c->err_info->SenseInfo[2] == NO_SENSE ||
4870 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4871 return 0;
4872
4873 return 1;
4874}
4875
4876/*
4877 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
4878 * returns zero when the unit is ready, and non-zero when giving up.
4879 */
4880static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
4881 struct CommandList *c,
4882 unsigned char lunaddr[], int reply_queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004883{
Tomas Henzl89193582014-02-21 16:25:05 -06004884 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004885 int count = 0;
4886 int waittime = 1; /* seconds */
Webb Scalesb69324f2015-04-23 09:34:22 -05004887
4888 /* Send test unit ready until device ready, or give up. */
4889 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
4890
4891 /*
4892 * Wait for a bit. do this first, because if we send
4893 * the TUR right away, the reset will just abort it.
4894 */
4895 msleep(1000 * waittime);
4896
4897 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
4898 if (!rc)
4899 break;
4900
4901 /* Increase wait time with each try, up to a point. */
4902 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4903 waittime *= 2;
4904
4905 dev_warn(&h->pdev->dev,
4906 "waiting %d secs for device to become ready.\n",
4907 waittime);
4908 }
4909
4910 return rc;
4911}
4912
4913static int wait_for_device_to_become_ready(struct ctlr_info *h,
4914 unsigned char lunaddr[],
4915 int reply_queue)
4916{
4917 int first_queue;
4918 int last_queue;
4919 int rq;
4920 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004921 struct CommandList *c;
4922
Stephen Cameron45fcb862015-01-23 16:43:04 -06004923 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004924
Webb Scalesb69324f2015-04-23 09:34:22 -05004925 /*
4926 * If no specific reply queue was requested, then send the TUR
4927 * repeatedly, requesting a reply on each reply queue; otherwise execute
4928 * the loop exactly once using only the specified queue.
4929 */
4930 if (reply_queue == DEFAULT_REPLY_QUEUE) {
4931 first_queue = 0;
4932 last_queue = h->nreply_queues - 1;
4933 } else {
4934 first_queue = reply_queue;
4935 last_queue = reply_queue;
4936 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004937
Webb Scalesb69324f2015-04-23 09:34:22 -05004938 for (rq = first_queue; rq <= last_queue; rq++) {
4939 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
Webb Scales25163bd2015-04-23 09:32:00 -05004940 if (rc)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004941 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004942 }
4943
4944 if (rc)
4945 dev_warn(&h->pdev->dev, "giving up on device.\n");
4946 else
4947 dev_warn(&h->pdev->dev, "device is ready.\n");
4948
Stephen Cameron45fcb862015-01-23 16:43:04 -06004949 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004950 return rc;
4951}
4952
4953/* Need at least one of these error handlers to keep ../scsi/hosts.c from
4954 * complaining. Doing a host- or bus-reset can't do anything good here.
4955 */
4956static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4957{
4958 int rc;
4959 struct ctlr_info *h;
4960 struct hpsa_scsi_dev_t *dev;
Webb Scales73153fe2015-04-23 09:35:04 -05004961 char msg[40];
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004962
4963 /* find the controller to which the command to be aborted was sent */
4964 h = sdev_to_hba(scsicmd->device);
4965 if (h == NULL) /* paranoia */
4966 return FAILED;
Don Bracee3458932015-01-23 16:44:24 -06004967
4968 if (lockup_detected(h))
4969 return FAILED;
4970
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004971 dev = scsicmd->device->hostdata;
4972 if (!dev) {
4973 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4974 "device lookup failed.\n");
4975 return FAILED;
4976 }
Webb Scales25163bd2015-04-23 09:32:00 -05004977
4978 /* if controller locked up, we can guarantee command won't complete */
4979 if (lockup_detected(h)) {
Webb Scales73153fe2015-04-23 09:35:04 -05004980 sprintf(msg, "cmd %d RESET FAILED, lockup detected",
4981 hpsa_get_cmd_index(scsicmd));
4982 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
Webb Scales25163bd2015-04-23 09:32:00 -05004983 return FAILED;
4984 }
4985
4986 /* this reset request might be the result of a lockup; check */
4987 if (detect_controller_lockup(h)) {
Webb Scales73153fe2015-04-23 09:35:04 -05004988 sprintf(msg, "cmd %d RESET FAILED, new lockup detected",
4989 hpsa_get_cmd_index(scsicmd));
4990 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
Webb Scales25163bd2015-04-23 09:32:00 -05004991 return FAILED;
4992 }
4993
4994 hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting");
4995
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004996 /* send a reset to the SCSI LUN which the command was sent to */
Webb Scales25163bd2015-04-23 09:32:00 -05004997 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
4998 DEFAULT_REPLY_QUEUE);
Webb Scalesb69324f2015-04-23 09:34:22 -05004999 if (rc == 0)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005000 return SUCCESS;
5001
Webb Scales25163bd2015-04-23 09:32:00 -05005002 dev_warn(&h->pdev->dev,
5003 "scsi %d:%d:%d:%d reset failed\n",
5004 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005005 return FAILED;
5006}
5007
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05005008static void swizzle_abort_tag(u8 *tag)
5009{
5010 u8 original_tag[8];
5011
5012 memcpy(original_tag, tag, 8);
5013 tag[0] = original_tag[3];
5014 tag[1] = original_tag[2];
5015 tag[2] = original_tag[1];
5016 tag[3] = original_tag[0];
5017 tag[4] = original_tag[7];
5018 tag[5] = original_tag[6];
5019 tag[6] = original_tag[5];
5020 tag[7] = original_tag[4];
5021}
5022
Scott Teel17eb87d2014-02-18 13:55:28 -06005023static void hpsa_get_tag(struct ctlr_info *h,
Don Brace2b08b3e2015-01-23 16:41:09 -06005024 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
Scott Teel17eb87d2014-02-18 13:55:28 -06005025{
Don Brace2b08b3e2015-01-23 16:41:09 -06005026 u64 tag;
Scott Teel17eb87d2014-02-18 13:55:28 -06005027 if (c->cmd_type == CMD_IOACCEL1) {
5028 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5029 &h->ioaccel_cmd_pool[c->cmdindex];
Don Brace2b08b3e2015-01-23 16:41:09 -06005030 tag = le64_to_cpu(cm1->tag);
5031 *tagupper = cpu_to_le32(tag >> 32);
5032 *taglower = cpu_to_le32(tag);
Scott Teel54b6e9e2014-02-18 13:56:45 -06005033 return;
Scott Teel17eb87d2014-02-18 13:55:28 -06005034 }
Scott Teel54b6e9e2014-02-18 13:56:45 -06005035 if (c->cmd_type == CMD_IOACCEL2) {
5036 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5037 &h->ioaccel2_cmd_pool[c->cmdindex];
Scott Teeldd0e19f2014-02-18 13:57:31 -06005038 /* upper tag not used in ioaccel2 mode */
5039 memset(tagupper, 0, sizeof(*tagupper));
5040 *taglower = cm2->Tag;
Scott Teel54b6e9e2014-02-18 13:56:45 -06005041 return;
5042 }
Don Brace2b08b3e2015-01-23 16:41:09 -06005043 tag = le64_to_cpu(c->Header.tag);
5044 *tagupper = cpu_to_le32(tag >> 32);
5045 *taglower = cpu_to_le32(tag);
Scott Teel17eb87d2014-02-18 13:55:28 -06005046}
5047
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005048static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005049 struct CommandList *abort, int reply_queue)
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005050{
5051 int rc = IO_OK;
5052 struct CommandList *c;
5053 struct ErrorInfo *ei;
Don Brace2b08b3e2015-01-23 16:41:09 -06005054 __le32 tagupper, taglower;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005055
Stephen Cameron45fcb862015-01-23 16:43:04 -06005056 c = cmd_alloc(h);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005057
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005058 /* fill_cmd can't fail here, no buffer to map */
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005059 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005060 0, 0, scsi3addr, TYPE_MSG);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005061 if (h->needs_abort_tags_swizzled)
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05005062 swizzle_abort_tag(&c->Request.CDB[4]);
Webb Scales25163bd2015-04-23 09:32:00 -05005063 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
Scott Teel17eb87d2014-02-18 13:55:28 -06005064 hpsa_get_tag(h, abort, &taglower, &tagupper);
Webb Scales25163bd2015-04-23 09:32:00 -05005065 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
Scott Teel17eb87d2014-02-18 13:55:28 -06005066 __func__, tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005067 /* no unmap needed here because no data xfer. */
5068
5069 ei = c->err_info;
5070 switch (ei->CommandStatus) {
5071 case CMD_SUCCESS:
5072 break;
Stephen Cameron9437ac42015-04-23 09:32:16 -05005073 case CMD_TMF_STATUS:
5074 rc = hpsa_evaluate_tmf_status(h, c);
5075 break;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005076 case CMD_UNABORTABLE: /* Very common, don't make noise. */
5077 rc = -1;
5078 break;
5079 default:
5080 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
Scott Teel17eb87d2014-02-18 13:55:28 -06005081 __func__, tagupper, taglower);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06005082 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005083 rc = -1;
5084 break;
5085 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06005086 cmd_free(h, c);
Scott Teeldd0e19f2014-02-18 13:57:31 -06005087 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5088 __func__, tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005089 return rc;
5090}
5091
Stephen Cameron8be986c2015-04-23 09:34:06 -05005092static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5093 struct CommandList *command_to_abort, int reply_queue)
5094{
5095 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5096 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5097 struct io_accel2_cmd *c2a =
5098 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
Webb Scalesa58e7e52015-04-23 09:34:16 -05005099 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
Stephen Cameron8be986c2015-04-23 09:34:06 -05005100 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5101
5102 /*
5103 * We're overlaying struct hpsa_tmf_struct on top of something which
5104 * was allocated as a struct io_accel2_cmd, so we better be sure it
5105 * actually fits, and doesn't overrun the error info space.
5106 */
5107 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5108 sizeof(struct io_accel2_cmd));
5109 BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5110 offsetof(struct hpsa_tmf_struct, error_len) +
5111 sizeof(ac->error_len));
5112
5113 c->cmd_type = IOACCEL2_TMF;
Webb Scalesa58e7e52015-04-23 09:34:16 -05005114 c->scsi_cmd = SCSI_CMD_BUSY;
5115
Stephen Cameron8be986c2015-04-23 09:34:06 -05005116 /* Adjust the DMA address to point to the accelerated command buffer */
5117 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5118 (c->cmdindex * sizeof(struct io_accel2_cmd));
5119 BUG_ON(c->busaddr & 0x0000007F);
5120
5121 memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5122 ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5123 ac->reply_queue = reply_queue;
5124 ac->tmf = IOACCEL2_TMF_ABORT;
5125 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5126 memset(ac->lun_id, 0, sizeof(ac->lun_id));
5127 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5128 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5129 ac->error_ptr = cpu_to_le64(c->busaddr +
5130 offsetof(struct io_accel2_cmd, error_data));
5131 ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5132}
5133
Scott Teel54b6e9e2014-02-18 13:56:45 -06005134/* ioaccel2 path firmware cannot handle abort task requests.
5135 * Change abort requests to physical target reset, and send to the
5136 * address of the physical disk used for the ioaccel 2 command.
5137 * Return 0 on success (IO_OK)
5138 * -1 on failure
5139 */
5140
5141static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -05005142 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
Scott Teel54b6e9e2014-02-18 13:56:45 -06005143{
5144 int rc = IO_OK;
5145 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
5146 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
5147 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
5148 unsigned char *psa = &phys_scsi3addr[0];
5149
5150 /* Get a pointer to the hpsa logical device. */
Stephen Cameron7fa30302015-01-23 16:44:30 -06005151 scmd = abort->scsi_cmd;
Scott Teel54b6e9e2014-02-18 13:56:45 -06005152 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
5153 if (dev == NULL) {
5154 dev_warn(&h->pdev->dev,
5155 "Cannot abort: no device pointer for command.\n");
5156 return -1; /* not abortable */
5157 }
5158
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06005159 if (h->raid_offload_debug > 0)
5160 dev_info(&h->pdev->dev,
Webb Scales0d96ef52015-04-23 09:31:55 -05005161 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06005162 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
Webb Scales0d96ef52015-04-23 09:31:55 -05005163 "Reset as abort",
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06005164 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
5165 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
5166
Scott Teel54b6e9e2014-02-18 13:56:45 -06005167 if (!dev->offload_enabled) {
5168 dev_warn(&h->pdev->dev,
5169 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5170 return -1; /* not abortable */
5171 }
5172
5173 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
5174 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
5175 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
5176 return -1; /* not abortable */
5177 }
5178
5179 /* send the reset */
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06005180 if (h->raid_offload_debug > 0)
5181 dev_info(&h->pdev->dev,
5182 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5183 psa[0], psa[1], psa[2], psa[3],
5184 psa[4], psa[5], psa[6], psa[7]);
Webb Scales25163bd2015-04-23 09:32:00 -05005185 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
Scott Teel54b6e9e2014-02-18 13:56:45 -06005186 if (rc != 0) {
5187 dev_warn(&h->pdev->dev,
5188 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5189 psa[0], psa[1], psa[2], psa[3],
5190 psa[4], psa[5], psa[6], psa[7]);
5191 return rc; /* failed to reset */
5192 }
5193
5194 /* wait for device to recover */
Webb Scalesb69324f2015-04-23 09:34:22 -05005195 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
Scott Teel54b6e9e2014-02-18 13:56:45 -06005196 dev_warn(&h->pdev->dev,
5197 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5198 psa[0], psa[1], psa[2], psa[3],
5199 psa[4], psa[5], psa[6], psa[7]);
5200 return -1; /* failed to recover */
5201 }
5202
5203 /* device recovered */
5204 dev_info(&h->pdev->dev,
5205 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5206 psa[0], psa[1], psa[2], psa[3],
5207 psa[4], psa[5], psa[6], psa[7]);
5208
5209 return rc; /* success */
5210}
5211
Stephen Cameron8be986c2015-04-23 09:34:06 -05005212static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5213 struct CommandList *abort, int reply_queue)
5214{
5215 int rc = IO_OK;
5216 struct CommandList *c;
5217 __le32 taglower, tagupper;
5218 struct hpsa_scsi_dev_t *dev;
5219 struct io_accel2_cmd *c2;
5220
5221 dev = abort->scsi_cmd->device->hostdata;
5222 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5223 return -1;
5224
5225 c = cmd_alloc(h);
5226 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5227 c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5228 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5229 hpsa_get_tag(h, abort, &taglower, &tagupper);
5230 dev_dbg(&h->pdev->dev,
5231 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5232 __func__, tagupper, taglower);
5233 /* no unmap needed here because no data xfer. */
5234
5235 dev_dbg(&h->pdev->dev,
5236 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5237 __func__, tagupper, taglower, c2->error_data.serv_response);
5238 switch (c2->error_data.serv_response) {
5239 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5240 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5241 rc = 0;
5242 break;
5243 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5244 case IOACCEL2_SERV_RESPONSE_FAILURE:
5245 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5246 rc = -1;
5247 break;
5248 default:
5249 dev_warn(&h->pdev->dev,
5250 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5251 __func__, tagupper, taglower,
5252 c2->error_data.serv_response);
5253 rc = -1;
5254 }
5255 cmd_free(h, c);
5256 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5257 tagupper, taglower);
5258 return rc;
5259}
5260
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05005261static int hpsa_send_abort_both_ways(struct ctlr_info *h,
Webb Scales25163bd2015-04-23 09:32:00 -05005262 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05005263{
Stephen Cameron8be986c2015-04-23 09:34:06 -05005264 /*
5265 * ioccelerator mode 2 commands should be aborted via the
Scott Teel54b6e9e2014-02-18 13:56:45 -06005266 * accelerated path, since RAID path is unaware of these commands,
Stephen Cameron8be986c2015-04-23 09:34:06 -05005267 * but not all underlying firmware can handle abort TMF.
5268 * Change abort to physical device reset when abort TMF is unsupported.
Scott Teel54b6e9e2014-02-18 13:56:45 -06005269 */
Stephen Cameron8be986c2015-04-23 09:34:06 -05005270 if (abort->cmd_type == CMD_IOACCEL2) {
5271 if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
5272 return hpsa_send_abort_ioaccel2(h, abort,
5273 reply_queue);
5274 else
5275 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
Webb Scales25163bd2015-04-23 09:32:00 -05005276 abort, reply_queue);
Stephen Cameron8be986c2015-04-23 09:34:06 -05005277 }
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005278 return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
Webb Scales25163bd2015-04-23 09:32:00 -05005279}
5280
5281/* Find out which reply queue a command was meant to return on */
5282static int hpsa_extract_reply_queue(struct ctlr_info *h,
5283 struct CommandList *c)
5284{
5285 if (c->cmd_type == CMD_IOACCEL2)
5286 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5287 return c->Header.ReplyQueue;
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05005288}
5289
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005290/*
5291 * Limit concurrency of abort commands to prevent
5292 * over-subscription of commands
5293 */
5294static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5295{
5296#define ABORT_CMD_WAIT_MSECS 5000
5297 return !wait_event_timeout(h->abort_cmd_wait_queue,
5298 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5299 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5300}
5301
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005302/* Send an abort for the specified command.
5303 * If the device and controller support it,
5304 * send a task abort request.
5305 */
5306static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5307{
5308
Webb Scalesa58e7e52015-04-23 09:34:16 -05005309 int rc;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005310 struct ctlr_info *h;
5311 struct hpsa_scsi_dev_t *dev;
5312 struct CommandList *abort; /* pointer to command to be aborted */
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005313 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
5314 char msg[256]; /* For debug messaging. */
5315 int ml = 0;
Don Brace2b08b3e2015-01-23 16:41:09 -06005316 __le32 tagupper, taglower;
Webb Scales25163bd2015-04-23 09:32:00 -05005317 int refcount, reply_queue;
5318
5319 if (sc == NULL)
5320 return FAILED;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005321
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005322 if (sc->device == NULL)
5323 return FAILED;
5324
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005325 /* Find the controller of the command to be aborted */
5326 h = sdev_to_hba(sc->device);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005327 if (h == NULL)
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005328 return FAILED;
5329
Webb Scales25163bd2015-04-23 09:32:00 -05005330 /* Find the device of the command to be aborted */
5331 dev = sc->device->hostdata;
5332 if (!dev) {
5333 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
5334 msg);
Don Bracee3458932015-01-23 16:44:24 -06005335 return FAILED;
Webb Scales25163bd2015-04-23 09:32:00 -05005336 }
5337
5338 /* If controller locked up, we can guarantee command won't complete */
5339 if (lockup_detected(h)) {
5340 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5341 "ABORT FAILED, lockup detected");
5342 return FAILED;
5343 }
5344
5345 /* This is a good time to check if controller lockup has occurred */
5346 if (detect_controller_lockup(h)) {
5347 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5348 "ABORT FAILED, new lockup detected");
5349 return FAILED;
5350 }
Don Bracee3458932015-01-23 16:44:24 -06005351
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005352 /* Check that controller supports some kind of task abort */
5353 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5354 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5355 return FAILED;
5356
5357 memset(msg, 0, sizeof(msg));
Robert Elliott4b761552015-04-23 09:33:54 -05005358 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005359 h->scsi_host->host_no, sc->device->channel,
Webb Scales0d96ef52015-04-23 09:31:55 -05005360 sc->device->id, sc->device->lun,
Robert Elliott4b761552015-04-23 09:33:54 -05005361 "Aborting command", sc);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005362
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005363 /* Get SCSI command to be aborted */
5364 abort = (struct CommandList *) sc->host_scribble;
5365 if (abort == NULL) {
Webb Scales281a7fd2015-01-23 16:43:35 -06005366 /* This can happen if the command already completed. */
5367 return SUCCESS;
5368 }
5369 refcount = atomic_inc_return(&abort->refcount);
5370 if (refcount == 1) { /* Command is done already. */
5371 cmd_free(h, abort);
5372 return SUCCESS;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005373 }
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005374
5375 /* Don't bother trying the abort if we know it won't work. */
5376 if (abort->cmd_type != CMD_IOACCEL2 &&
5377 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5378 cmd_free(h, abort);
5379 return FAILED;
5380 }
5381
Webb Scalesa58e7e52015-04-23 09:34:16 -05005382 /*
5383 * Check that we're aborting the right command.
5384 * It's possible the CommandList already completed and got re-used.
5385 */
5386 if (abort->scsi_cmd != sc) {
5387 cmd_free(h, abort);
5388 return SUCCESS;
5389 }
5390
5391 abort->abort_pending = true;
Scott Teel17eb87d2014-02-18 13:55:28 -06005392 hpsa_get_tag(h, abort, &taglower, &tagupper);
Webb Scales25163bd2015-04-23 09:32:00 -05005393 reply_queue = hpsa_extract_reply_queue(h, abort);
Scott Teel17eb87d2014-02-18 13:55:28 -06005394 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
Stephen Cameron7fa30302015-01-23 16:44:30 -06005395 as = abort->scsi_cmd;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005396 if (as != NULL)
Robert Elliott4b761552015-04-23 09:33:54 -05005397 ml += sprintf(msg+ml,
5398 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5399 as->cmd_len, as->cmnd[0], as->cmnd[1],
5400 as->serial_number);
5401 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
Webb Scales0d96ef52015-04-23 09:31:55 -05005402 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
Robert Elliott4b761552015-04-23 09:33:54 -05005403
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005404 /*
5405 * Command is in flight, or possibly already completed
5406 * by the firmware (but not to the scsi mid layer) but we can't
5407 * distinguish which. Send the abort down.
5408 */
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005409 if (wait_for_available_abort_cmd(h)) {
5410 dev_warn(&h->pdev->dev,
Robert Elliott4b761552015-04-23 09:33:54 -05005411 "%s FAILED, timeout waiting for an abort command to become available.\n",
5412 msg);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005413 cmd_free(h, abort);
5414 return FAILED;
5415 }
Webb Scales25163bd2015-04-23 09:32:00 -05005416 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05005417 atomic_inc(&h->abort_cmds_available);
5418 wake_up_all(&h->abort_cmd_wait_queue);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005419 if (rc != 0) {
Robert Elliott4b761552015-04-23 09:33:54 -05005420 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
Webb Scales0d96ef52015-04-23 09:31:55 -05005421 hpsa_show_dev_msg(KERN_WARNING, h, dev,
Robert Elliott4b761552015-04-23 09:33:54 -05005422 "FAILED to abort command");
Webb Scales281a7fd2015-01-23 16:43:35 -06005423 cmd_free(h, abort);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005424 return FAILED;
5425 }
Robert Elliott4b761552015-04-23 09:33:54 -05005426 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
Webb Scalesa58e7e52015-04-23 09:34:16 -05005427 wait_event(h->abort_sync_wait_queue,
5428 abort->scsi_cmd != sc || lockup_detected(h));
Webb Scales281a7fd2015-01-23 16:43:35 -06005429 cmd_free(h, abort);
Webb Scalesa58e7e52015-04-23 09:34:16 -05005430 return !lockup_detected(h) ? SUCCESS : FAILED;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005431}
5432
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005433/*
Webb Scales73153fe2015-04-23 09:35:04 -05005434 * For operations with an associated SCSI command, a command block is allocated
5435 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5436 * block request tag as an index into a table of entries. cmd_tagged_free() is
5437 * the complement, although cmd_free() may be called instead.
5438 */
5439static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
5440 struct scsi_cmnd *scmd)
5441{
5442 int idx = hpsa_get_cmd_index(scmd);
5443 struct CommandList *c = h->cmd_pool + idx;
5444
5445 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
5446 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
5447 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
5448 /* The index value comes from the block layer, so if it's out of
5449 * bounds, it's probably not our bug.
5450 */
5451 BUG();
5452 }
5453
5454 atomic_inc(&c->refcount);
5455 if (unlikely(!hpsa_is_cmd_idle(c))) {
5456 /*
5457 * We expect that the SCSI layer will hand us a unique tag
5458 * value. Thus, there should never be a collision here between
5459 * two requests...because if the selected command isn't idle
5460 * then someone is going to be very disappointed.
5461 */
5462 dev_err(&h->pdev->dev,
5463 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5464 idx);
5465 if (c->scsi_cmd != NULL)
5466 scsi_print_command(c->scsi_cmd);
5467 scsi_print_command(scmd);
5468 }
5469
5470 hpsa_cmd_partial_init(h, idx, c);
5471 return c;
5472}
5473
5474static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
5475{
5476 /*
5477 * Release our reference to the block. We don't need to do anything
5478 * else to free it, because it is accessed by index. (There's no point
5479 * in checking the result of the decrement, since we cannot guarantee
5480 * that there isn't a concurrent abort which is also accessing it.)
5481 */
5482 (void)atomic_dec(&c->refcount);
5483}
5484
5485/*
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005486 * For operations that cannot sleep, a command block is allocated at init,
5487 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5488 * which ones are free or in use. Lock must be held when calling this.
5489 * cmd_free() is the complement.
Robert Elliottbf43caf2015-04-23 09:33:38 -05005490 * This function never gives up and returns NULL. If it hangs,
5491 * another thread must call cmd_free() to free some tags.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005492 */
Webb Scales281a7fd2015-01-23 16:43:35 -06005493
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005494static struct CommandList *cmd_alloc(struct ctlr_info *h)
5495{
5496 struct CommandList *c;
Stephen Cameron360c73b2015-04-23 09:32:32 -05005497 int refcount, i;
Webb Scales73153fe2015-04-23 09:35:04 -05005498 int offset = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005499
Robert Elliott33811022015-01-23 16:43:41 -06005500 /*
5501 * There is some *extremely* small but non-zero chance that that
Stephen M. Cameron4c413122014-11-14 17:27:29 -06005502 * multiple threads could get in here, and one thread could
5503 * be scanning through the list of bits looking for a free
5504 * one, but the free ones are always behind him, and other
5505 * threads sneak in behind him and eat them before he can
5506 * get to them, so that while there is always a free one, a
5507 * very unlucky thread might be starved anyway, never able to
5508 * beat the other threads. In reality, this happens so
5509 * infrequently as to be indistinguishable from never.
Webb Scales73153fe2015-04-23 09:35:04 -05005510 *
5511 * Note that we start allocating commands before the SCSI host structure
5512 * is initialized. Since the search starts at bit zero, this
5513 * all works, since we have at least one command structure available;
5514 * however, it means that the structures with the low indexes have to be
5515 * reserved for driver-initiated requests, while requests from the block
5516 * layer will use the higher indexes.
Stephen M. Cameron4c413122014-11-14 17:27:29 -06005517 */
5518
Webb Scales281a7fd2015-01-23 16:43:35 -06005519 for (;;) {
Webb Scales73153fe2015-04-23 09:35:04 -05005520 i = find_next_zero_bit(h->cmd_pool_bits,
5521 HPSA_NRESERVED_CMDS,
5522 offset);
5523 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
Webb Scales281a7fd2015-01-23 16:43:35 -06005524 offset = 0;
5525 continue;
5526 }
5527 c = h->cmd_pool + i;
5528 refcount = atomic_inc_return(&c->refcount);
5529 if (unlikely(refcount > 1)) {
5530 cmd_free(h, c); /* already in use */
Webb Scales73153fe2015-04-23 09:35:04 -05005531 offset = (i + 1) % HPSA_NRESERVED_CMDS;
Webb Scales281a7fd2015-01-23 16:43:35 -06005532 continue;
5533 }
5534 set_bit(i & (BITS_PER_LONG - 1),
5535 h->cmd_pool_bits + (i / BITS_PER_LONG));
5536 break; /* it's ours now. */
5537 }
Stephen Cameron360c73b2015-04-23 09:32:32 -05005538 hpsa_cmd_partial_init(h, i, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005539 return c;
5540}
5541
Webb Scales73153fe2015-04-23 09:35:04 -05005542/*
5543 * This is the complementary operation to cmd_alloc(). Note, however, in some
5544 * corner cases it may also be used to free blocks allocated by
5545 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
5546 * the clear-bit is harmless.
5547 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005548static void cmd_free(struct ctlr_info *h, struct CommandList *c)
5549{
Webb Scales281a7fd2015-01-23 16:43:35 -06005550 if (atomic_dec_and_test(&c->refcount)) {
5551 int i;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005552
Webb Scales281a7fd2015-01-23 16:43:35 -06005553 i = c - h->cmd_pool;
5554 clear_bit(i & (BITS_PER_LONG - 1),
5555 h->cmd_pool_bits + (i / BITS_PER_LONG));
5556 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005557}
5558
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005559#ifdef CONFIG_COMPAT
5560
Don Brace42a91642014-11-14 17:26:27 -06005561static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
5562 void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005563{
5564 IOCTL32_Command_struct __user *arg32 =
5565 (IOCTL32_Command_struct __user *) arg;
5566 IOCTL_Command_struct arg64;
5567 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
5568 int err;
5569 u32 cp;
5570
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06005571 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005572 err = 0;
5573 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5574 sizeof(arg64.LUN_info));
5575 err |= copy_from_user(&arg64.Request, &arg32->Request,
5576 sizeof(arg64.Request));
5577 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5578 sizeof(arg64.error_info));
5579 err |= get_user(arg64.buf_size, &arg32->buf_size);
5580 err |= get_user(cp, &arg32->buf);
5581 arg64.buf = compat_ptr(cp);
5582 err |= copy_to_user(p, &arg64, sizeof(arg64));
5583
5584 if (err)
5585 return -EFAULT;
5586
Don Brace42a91642014-11-14 17:26:27 -06005587 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005588 if (err)
5589 return err;
5590 err |= copy_in_user(&arg32->error_info, &p->error_info,
5591 sizeof(arg32->error_info));
5592 if (err)
5593 return -EFAULT;
5594 return err;
5595}
5596
5597static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
Don Brace42a91642014-11-14 17:26:27 -06005598 int cmd, void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005599{
5600 BIG_IOCTL32_Command_struct __user *arg32 =
5601 (BIG_IOCTL32_Command_struct __user *) arg;
5602 BIG_IOCTL_Command_struct arg64;
5603 BIG_IOCTL_Command_struct __user *p =
5604 compat_alloc_user_space(sizeof(arg64));
5605 int err;
5606 u32 cp;
5607
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06005608 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005609 err = 0;
5610 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5611 sizeof(arg64.LUN_info));
5612 err |= copy_from_user(&arg64.Request, &arg32->Request,
5613 sizeof(arg64.Request));
5614 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5615 sizeof(arg64.error_info));
5616 err |= get_user(arg64.buf_size, &arg32->buf_size);
5617 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
5618 err |= get_user(cp, &arg32->buf);
5619 arg64.buf = compat_ptr(cp);
5620 err |= copy_to_user(p, &arg64, sizeof(arg64));
5621
5622 if (err)
5623 return -EFAULT;
5624
Don Brace42a91642014-11-14 17:26:27 -06005625 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005626 if (err)
5627 return err;
5628 err |= copy_in_user(&arg32->error_info, &p->error_info,
5629 sizeof(arg32->error_info));
5630 if (err)
5631 return -EFAULT;
5632 return err;
5633}
Stephen M. Cameron71fe75a2010-02-04 08:43:51 -06005634
Don Brace42a91642014-11-14 17:26:27 -06005635static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
Stephen M. Cameron71fe75a2010-02-04 08:43:51 -06005636{
5637 switch (cmd) {
5638 case CCISS_GETPCIINFO:
5639 case CCISS_GETINTINFO:
5640 case CCISS_SETINTINFO:
5641 case CCISS_GETNODENAME:
5642 case CCISS_SETNODENAME:
5643 case CCISS_GETHEARTBEAT:
5644 case CCISS_GETBUSTYPES:
5645 case CCISS_GETFIRMVER:
5646 case CCISS_GETDRIVVER:
5647 case CCISS_REVALIDVOLS:
5648 case CCISS_DEREGDISK:
5649 case CCISS_REGNEWDISK:
5650 case CCISS_REGNEWD:
5651 case CCISS_RESCANDISK:
5652 case CCISS_GETLUNINFO:
5653 return hpsa_ioctl(dev, cmd, arg);
5654
5655 case CCISS_PASSTHRU32:
5656 return hpsa_ioctl32_passthru(dev, cmd, arg);
5657 case CCISS_BIG_PASSTHRU32:
5658 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
5659
5660 default:
5661 return -ENOIOCTLCMD;
5662 }
5663}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005664#endif
5665
5666static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
5667{
5668 struct hpsa_pci_info pciinfo;
5669
5670 if (!argp)
5671 return -EINVAL;
5672 pciinfo.domain = pci_domain_nr(h->pdev->bus);
5673 pciinfo.bus = h->pdev->bus->number;
5674 pciinfo.dev_fn = h->pdev->devfn;
5675 pciinfo.board_id = h->board_id;
5676 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
5677 return -EFAULT;
5678 return 0;
5679}
5680
5681static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
5682{
5683 DriverVer_type DriverVer;
5684 unsigned char vmaj, vmin, vsubmin;
5685 int rc;
5686
5687 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
5688 &vmaj, &vmin, &vsubmin);
5689 if (rc != 3) {
5690 dev_info(&h->pdev->dev, "driver version string '%s' "
5691 "unrecognized.", HPSA_DRIVER_VERSION);
5692 vmaj = 0;
5693 vmin = 0;
5694 vsubmin = 0;
5695 }
5696 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
5697 if (!argp)
5698 return -EINVAL;
5699 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
5700 return -EFAULT;
5701 return 0;
5702}
5703
5704static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5705{
5706 IOCTL_Command_struct iocommand;
5707 struct CommandList *c;
5708 char *buff = NULL;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005709 u64 temp64;
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005710 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005711
5712 if (!argp)
5713 return -EINVAL;
5714 if (!capable(CAP_SYS_RAWIO))
5715 return -EPERM;
5716 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
5717 return -EFAULT;
5718 if ((iocommand.buf_size < 1) &&
5719 (iocommand.Request.Type.Direction != XFER_NONE)) {
5720 return -EINVAL;
5721 }
5722 if (iocommand.buf_size > 0) {
5723 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
5724 if (buff == NULL)
Robert Elliott2dd02d72015-04-23 09:33:43 -05005725 return -ENOMEM;
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05005726 if (iocommand.Request.Type.Direction & XFER_WRITE) {
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005727 /* Copy the data into the buffer we created */
5728 if (copy_from_user(buff, iocommand.buf,
5729 iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005730 rc = -EFAULT;
5731 goto out_kfree;
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005732 }
5733 } else {
5734 memset(buff, 0, iocommand.buf_size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005735 }
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005736 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06005737 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05005738
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005739 /* Fill in the command type */
5740 c->cmd_type = CMD_IOCTL_PEND;
Webb Scalesa58e7e52015-04-23 09:34:16 -05005741 c->scsi_cmd = SCSI_CMD_BUSY;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005742 /* Fill in Command Header */
5743 c->Header.ReplyQueue = 0; /* unused in simple mode */
5744 if (iocommand.buf_size > 0) { /* buffer to fill */
5745 c->Header.SGList = 1;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005746 c->Header.SGTotal = cpu_to_le16(1);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005747 } else { /* no buffers to fill */
5748 c->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005749 c->Header.SGTotal = cpu_to_le16(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005750 }
5751 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005752
5753 /* Fill in Request block */
5754 memcpy(&c->Request, &iocommand.Request,
5755 sizeof(c->Request));
5756
5757 /* Fill in the scatter gather information */
5758 if (iocommand.buf_size > 0) {
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005759 temp64 = pci_map_single(h->pdev, buff,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005760 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005761 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
5762 c->SG[0].Addr = cpu_to_le64(0);
5763 c->SG[0].Len = cpu_to_le32(0);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06005764 rc = -ENOMEM;
5765 goto out;
5766 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005767 c->SG[0].Addr = cpu_to_le64(temp64);
5768 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
5769 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005770 }
Webb Scales25163bd2015-04-23 09:32:00 -05005771 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
Stephen M. Cameronc2dd32e2011-06-03 09:57:29 -05005772 if (iocommand.buf_size > 0)
5773 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005774 check_ioctl_unit_attention(h, c);
Webb Scales25163bd2015-04-23 09:32:00 -05005775 if (rc) {
5776 rc = -EIO;
5777 goto out;
5778 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005779
5780 /* Copy the error information out */
5781 memcpy(&iocommand.error_info, c->err_info,
5782 sizeof(iocommand.error_info));
5783 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005784 rc = -EFAULT;
5785 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005786 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05005787 if ((iocommand.Request.Type.Direction & XFER_READ) &&
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005788 iocommand.buf_size > 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005789 /* Copy the data out of the buffer we created */
5790 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005791 rc = -EFAULT;
5792 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005793 }
5794 }
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005795out:
Stephen Cameron45fcb862015-01-23 16:43:04 -06005796 cmd_free(h, c);
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06005797out_kfree:
5798 kfree(buff);
5799 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005800}
5801
5802static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5803{
5804 BIG_IOCTL_Command_struct *ioc;
5805 struct CommandList *c;
5806 unsigned char **buff = NULL;
5807 int *buff_size = NULL;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005808 u64 temp64;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005809 BYTE sg_used = 0;
5810 int status = 0;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06005811 u32 left;
5812 u32 sz;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005813 BYTE __user *data_ptr;
5814
5815 if (!argp)
5816 return -EINVAL;
5817 if (!capable(CAP_SYS_RAWIO))
5818 return -EPERM;
5819 ioc = (BIG_IOCTL_Command_struct *)
5820 kmalloc(sizeof(*ioc), GFP_KERNEL);
5821 if (!ioc) {
5822 status = -ENOMEM;
5823 goto cleanup1;
5824 }
5825 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5826 status = -EFAULT;
5827 goto cleanup1;
5828 }
5829 if ((ioc->buf_size < 1) &&
5830 (ioc->Request.Type.Direction != XFER_NONE)) {
5831 status = -EINVAL;
5832 goto cleanup1;
5833 }
5834 /* Check kmalloc limits using all SGs */
5835 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5836 status = -EINVAL;
5837 goto cleanup1;
5838 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06005839 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005840 status = -EINVAL;
5841 goto cleanup1;
5842 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06005843 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005844 if (!buff) {
5845 status = -ENOMEM;
5846 goto cleanup1;
5847 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06005848 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005849 if (!buff_size) {
5850 status = -ENOMEM;
5851 goto cleanup1;
5852 }
5853 left = ioc->buf_size;
5854 data_ptr = ioc->buf;
5855 while (left) {
5856 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5857 buff_size[sg_used] = sz;
5858 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5859 if (buff[sg_used] == NULL) {
5860 status = -ENOMEM;
5861 goto cleanup1;
5862 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05005863 if (ioc->Request.Type.Direction & XFER_WRITE) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005864 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
Stephen M. Cameron0758f4f2014-07-03 10:18:03 -05005865 status = -EFAULT;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005866 goto cleanup1;
5867 }
5868 } else
5869 memset(buff[sg_used], 0, sz);
5870 left -= sz;
5871 data_ptr += sz;
5872 sg_used++;
5873 }
Stephen Cameron45fcb862015-01-23 16:43:04 -06005874 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05005875
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005876 c->cmd_type = CMD_IOCTL_PEND;
Webb Scalesa58e7e52015-04-23 09:34:16 -05005877 c->scsi_cmd = SCSI_CMD_BUSY;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005878 c->Header.ReplyQueue = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005879 c->Header.SGList = (u8) sg_used;
5880 c->Header.SGTotal = cpu_to_le16(sg_used);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005881 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005882 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5883 if (ioc->buf_size > 0) {
5884 int i;
5885 for (i = 0; i < sg_used; i++) {
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005886 temp64 = pci_map_single(h->pdev, buff[i],
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005887 buff_size[i], PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005888 if (dma_mapping_error(&h->pdev->dev,
5889 (dma_addr_t) temp64)) {
5890 c->SG[i].Addr = cpu_to_le64(0);
5891 c->SG[i].Len = cpu_to_le32(0);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06005892 hpsa_pci_unmap(h->pdev, c, i,
5893 PCI_DMA_BIDIRECTIONAL);
5894 status = -ENOMEM;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005895 goto cleanup0;
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06005896 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005897 c->SG[i].Addr = cpu_to_le64(temp64);
5898 c->SG[i].Len = cpu_to_le32(buff_size[i]);
5899 c->SG[i].Ext = cpu_to_le32(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005900 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005901 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005902 }
Webb Scales25163bd2015-04-23 09:32:00 -05005903 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005904 if (sg_used)
5905 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005906 check_ioctl_unit_attention(h, c);
Webb Scales25163bd2015-04-23 09:32:00 -05005907 if (status) {
5908 status = -EIO;
5909 goto cleanup0;
5910 }
5911
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005912 /* Copy the error information out */
5913 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5914 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005915 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005916 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005917 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05005918 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
Don Brace2b08b3e2015-01-23 16:41:09 -06005919 int i;
5920
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005921 /* Copy the data out of the buffer we created */
5922 BYTE __user *ptr = ioc->buf;
5923 for (i = 0; i < sg_used; i++) {
5924 if (copy_to_user(ptr, buff[i], buff_size[i])) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005925 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005926 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005927 }
5928 ptr += buff_size[i];
5929 }
5930 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005931 status = 0;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005932cleanup0:
Stephen Cameron45fcb862015-01-23 16:43:04 -06005933 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005934cleanup1:
5935 if (buff) {
Don Brace2b08b3e2015-01-23 16:41:09 -06005936 int i;
5937
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005938 for (i = 0; i < sg_used; i++)
5939 kfree(buff[i]);
5940 kfree(buff);
5941 }
5942 kfree(buff_size);
5943 kfree(ioc);
5944 return status;
5945}
5946
5947static void check_ioctl_unit_attention(struct ctlr_info *h,
5948 struct CommandList *c)
5949{
5950 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5951 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5952 (void) check_for_unit_attention(h, c);
5953}
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005954
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005955/*
5956 * ioctl
5957 */
Don Brace42a91642014-11-14 17:26:27 -06005958static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005959{
5960 struct ctlr_info *h;
5961 void __user *argp = (void __user *)arg;
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005962 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005963
5964 h = sdev_to_hba(dev);
5965
5966 switch (cmd) {
5967 case CCISS_DEREGDISK:
5968 case CCISS_REGNEWDISK:
5969 case CCISS_REGNEWD:
Stephen M. Camerona08a8472010-02-04 08:43:16 -06005970 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005971 return 0;
5972 case CCISS_GETPCIINFO:
5973 return hpsa_getpciinfo_ioctl(h, argp);
5974 case CCISS_GETDRIVVER:
5975 return hpsa_getdrivver_ioctl(h, argp);
5976 case CCISS_PASSTHRU:
Don Brace34f0c622015-01-23 16:43:46 -06005977 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005978 return -EAGAIN;
5979 rc = hpsa_passthru_ioctl(h, argp);
Don Brace34f0c622015-01-23 16:43:46 -06005980 atomic_inc(&h->passthru_cmds_avail);
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005981 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005982 case CCISS_BIG_PASSTHRU:
Don Brace34f0c622015-01-23 16:43:46 -06005983 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005984 return -EAGAIN;
5985 rc = hpsa_big_passthru_ioctl(h, argp);
Don Brace34f0c622015-01-23 16:43:46 -06005986 atomic_inc(&h->passthru_cmds_avail);
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005987 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005988 default:
5989 return -ENOTTY;
5990 }
5991}
5992
Robert Elliottbf43caf2015-04-23 09:33:38 -05005993static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005994 u8 reset_type)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005995{
5996 struct CommandList *c;
5997
5998 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05005999
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006000 /* fill_cmd can't fail here, no data buffer to map */
6001 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006002 RAID_CTLR_LUNID, TYPE_MSG);
6003 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6004 c->waiting = NULL;
6005 enqueue_cmd_and_start_io(h, c);
6006 /* Don't wait for completion, the reset won't complete. Don't free
6007 * the command either. This is the last command we will send before
6008 * re-initializing everything, so it doesn't matter and won't leak.
6009 */
Robert Elliottbf43caf2015-04-23 09:33:38 -05006010 return;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006011}
6012
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006013static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06006014 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006015 int cmd_type)
6016{
6017 int pci_dir = XFER_NONE;
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05006018 u64 tag; /* for commands to be aborted */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006019
6020 c->cmd_type = CMD_IOCTL_PEND;
Webb Scalesa58e7e52015-04-23 09:34:16 -05006021 c->scsi_cmd = SCSI_CMD_BUSY;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006022 c->Header.ReplyQueue = 0;
6023 if (buff != NULL && size > 0) {
6024 c->Header.SGList = 1;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006025 c->Header.SGTotal = cpu_to_le16(1);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006026 } else {
6027 c->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006028 c->Header.SGTotal = cpu_to_le16(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006029 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006030 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6031
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006032 if (cmd_type == TYPE_CMD) {
6033 switch (cmd) {
6034 case HPSA_INQUIRY:
6035 /* are we trying to read a vital product page */
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06006036 if (page_code & VPD_PAGE) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006037 c->Request.CDB[1] = 0x01;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06006038 c->Request.CDB[2] = (page_code & 0xff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006039 }
6040 c->Request.CDBLen = 6;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006041 c->Request.type_attr_dir =
6042 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006043 c->Request.Timeout = 0;
6044 c->Request.CDB[0] = HPSA_INQUIRY;
6045 c->Request.CDB[4] = size & 0xFF;
6046 break;
6047 case HPSA_REPORT_LOG:
6048 case HPSA_REPORT_PHYS:
6049 /* Talking to controller so It's a physical command
6050 mode = 00 target = 0. Nothing to write.
6051 */
6052 c->Request.CDBLen = 12;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006053 c->Request.type_attr_dir =
6054 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006055 c->Request.Timeout = 0;
6056 c->Request.CDB[0] = cmd;
6057 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6058 c->Request.CDB[7] = (size >> 16) & 0xFF;
6059 c->Request.CDB[8] = (size >> 8) & 0xFF;
6060 c->Request.CDB[9] = size & 0xFF;
6061 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006062 case HPSA_CACHE_FLUSH:
6063 c->Request.CDBLen = 12;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006064 c->Request.type_attr_dir =
6065 TYPE_ATTR_DIR(cmd_type,
6066 ATTR_SIMPLE, XFER_WRITE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006067 c->Request.Timeout = 0;
6068 c->Request.CDB[0] = BMIC_WRITE;
6069 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
Stephen M. Cameronbb158ea2011-10-26 16:21:17 -05006070 c->Request.CDB[7] = (size >> 8) & 0xFF;
6071 c->Request.CDB[8] = size & 0xFF;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006072 break;
6073 case TEST_UNIT_READY:
6074 c->Request.CDBLen = 6;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006075 c->Request.type_attr_dir =
6076 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006077 c->Request.Timeout = 0;
6078 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006079 case HPSA_GET_RAID_MAP:
6080 c->Request.CDBLen = 12;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006081 c->Request.type_attr_dir =
6082 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006083 c->Request.Timeout = 0;
6084 c->Request.CDB[0] = HPSA_CISS_READ;
6085 c->Request.CDB[1] = cmd;
6086 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6087 c->Request.CDB[7] = (size >> 16) & 0xFF;
6088 c->Request.CDB[8] = (size >> 8) & 0xFF;
6089 c->Request.CDB[9] = size & 0xFF;
6090 break;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06006091 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6092 c->Request.CDBLen = 10;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006093 c->Request.type_attr_dir =
6094 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
Stephen M. Cameron316b2212014-02-21 16:25:15 -06006095 c->Request.Timeout = 0;
6096 c->Request.CDB[0] = BMIC_READ;
6097 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6098 c->Request.CDB[7] = (size >> 16) & 0xFF;
6099 c->Request.CDB[8] = (size >> 8) & 0xFF;
6100 break;
Don Brace03383732015-01-23 16:43:30 -06006101 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6102 c->Request.CDBLen = 10;
6103 c->Request.type_attr_dir =
6104 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6105 c->Request.Timeout = 0;
6106 c->Request.CDB[0] = BMIC_READ;
6107 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6108 c->Request.CDB[7] = (size >> 16) & 0xFF;
6109 c->Request.CDB[8] = (size >> 8) & 0XFF;
6110 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006111 default:
6112 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6113 BUG();
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006114 return -1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006115 }
6116 } else if (cmd_type == TYPE_MSG) {
6117 switch (cmd) {
6118
6119 case HPSA_DEVICE_RESET_MSG:
6120 c->Request.CDBLen = 16;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006121 c->Request.type_attr_dir =
6122 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006123 c->Request.Timeout = 0; /* Don't time out */
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006124 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6125 c->Request.CDB[0] = cmd;
Stephen M. Cameron21e89af2012-07-26 11:34:10 -05006126 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006127 /* If bytes 4-7 are zero, it means reset the */
6128 /* LunID device */
6129 c->Request.CDB[4] = 0x00;
6130 c->Request.CDB[5] = 0x00;
6131 c->Request.CDB[6] = 0x00;
6132 c->Request.CDB[7] = 0x00;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05006133 break;
6134 case HPSA_ABORT_MSG:
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05006135 memcpy(&tag, buff, sizeof(tag));
Don Brace2b08b3e2015-01-23 16:41:09 -06006136 dev_dbg(&h->pdev->dev,
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05006137 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6138 tag, c->Header.tag);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05006139 c->Request.CDBLen = 16;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006140 c->Request.type_attr_dir =
6141 TYPE_ATTR_DIR(cmd_type,
6142 ATTR_SIMPLE, XFER_WRITE);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05006143 c->Request.Timeout = 0; /* Don't time out */
6144 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
6145 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
6146 c->Request.CDB[2] = 0x00; /* reserved */
6147 c->Request.CDB[3] = 0x00; /* reserved */
6148 /* Tag to abort goes in CDB[4]-CDB[11] */
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05006149 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
Stephen M. Cameron75167d22012-05-01 11:42:51 -05006150 c->Request.CDB[12] = 0x00; /* reserved */
6151 c->Request.CDB[13] = 0x00; /* reserved */
6152 c->Request.CDB[14] = 0x00; /* reserved */
6153 c->Request.CDB[15] = 0x00; /* reserved */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006154 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006155 default:
6156 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6157 cmd);
6158 BUG();
6159 }
6160 } else {
6161 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6162 BUG();
6163 }
6164
Stephen M. Camerona505b862014-11-14 17:27:04 -06006165 switch (GET_DIR(c->Request.type_attr_dir)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006166 case XFER_READ:
6167 pci_dir = PCI_DMA_FROMDEVICE;
6168 break;
6169 case XFER_WRITE:
6170 pci_dir = PCI_DMA_TODEVICE;
6171 break;
6172 case XFER_NONE:
6173 pci_dir = PCI_DMA_NONE;
6174 break;
6175 default:
6176 pci_dir = PCI_DMA_BIDIRECTIONAL;
6177 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006178 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6179 return -1;
6180 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006181}
6182
6183/*
6184 * Map (physical) PCI mem into (virtual) kernel space
6185 */
6186static void __iomem *remap_pci_mem(ulong base, ulong size)
6187{
6188 ulong page_base = ((ulong) base) & PAGE_MASK;
6189 ulong page_offs = ((ulong) base) - page_base;
Stephen M. Cameron088ba342012-07-26 11:34:23 -05006190 void __iomem *page_remapped = ioremap_nocache(page_base,
6191 page_offs + size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006192
6193 return page_remapped ? (page_remapped + page_offs) : NULL;
6194}
6195
Matt Gates254f7962012-05-01 11:43:06 -05006196static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006197{
Matt Gates254f7962012-05-01 11:43:06 -05006198 return h->access.command_completed(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006199}
6200
Stephen M. Cameron900c5442010-02-04 08:42:35 -06006201static inline bool interrupt_pending(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006202{
6203 return h->access.intr_pending(h);
6204}
6205
6206static inline long interrupt_not_for_us(struct ctlr_info *h)
6207{
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006208 return (h->access.intr_pending(h) == 0) ||
6209 (h->interrupts_enabled == 0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006210}
6211
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06006212static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6213 u32 raw_tag)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006214{
6215 if (unlikely(tag_index >= h->nr_cmds)) {
6216 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6217 return 1;
6218 }
6219 return 0;
6220}
6221
Stephen M. Cameron5a3d16f2012-05-01 11:42:46 -05006222static inline void finish_cmd(struct CommandList *c)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006223{
Stephen M. Camerone85c5972012-05-01 11:43:42 -05006224 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
Scott Teelc3497752014-02-18 13:56:34 -06006225 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6226 || c->cmd_type == CMD_IOACCEL2))
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05006227 complete_scsi_command(c);
Stephen Cameron8be986c2015-04-23 09:34:06 -05006228 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006229 complete(c->waiting);
Stephen M. Camerona104c992010-02-04 08:42:24 -06006230}
6231
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006232
6233static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
Stephen M. Camerona104c992010-02-04 08:42:24 -06006234{
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006235#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
6236#define HPSA_SIMPLE_ERROR_BITS 0x03
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06006237 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006238 return tag & ~HPSA_SIMPLE_ERROR_BITS;
6239 return tag & ~HPSA_PERF_ERROR_BITS;
Stephen M. Camerona104c992010-02-04 08:42:24 -06006240}
6241
Don Brace303932f2010-02-04 08:42:40 -06006242/* process completion of an indexed ("direct lookup") command */
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05006243static inline void process_indexed_cmd(struct ctlr_info *h,
Don Brace303932f2010-02-04 08:42:40 -06006244 u32 raw_tag)
6245{
6246 u32 tag_index;
6247 struct CommandList *c;
6248
Don Bracef2405db2015-01-23 16:43:09 -06006249 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05006250 if (!bad_tag(h, tag_index, raw_tag)) {
6251 c = h->cmd_pool + tag_index;
6252 finish_cmd(c);
6253 }
Don Brace303932f2010-02-04 08:42:40 -06006254}
6255
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006256/* Some controllers, like p400, will give us one interrupt
6257 * after a soft reset, even if we turned interrupts off.
6258 * Only need to check for this in the hpsa_xxx_discard_completions
6259 * functions.
6260 */
6261static int ignore_bogus_interrupt(struct ctlr_info *h)
6262{
6263 if (likely(!reset_devices))
6264 return 0;
6265
6266 if (likely(h->interrupts_enabled))
6267 return 0;
6268
6269 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6270 "(known firmware bug.) Ignoring.\n");
6271
6272 return 1;
6273}
6274
Matt Gates254f7962012-05-01 11:43:06 -05006275/*
6276 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6277 * Relies on (h-q[x] == x) being true for x such that
6278 * 0 <= x < MAX_REPLY_QUEUES.
6279 */
6280static struct ctlr_info *queue_to_hba(u8 *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006281{
Matt Gates254f7962012-05-01 11:43:06 -05006282 return container_of((queue - *queue), struct ctlr_info, q[0]);
6283}
6284
6285static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6286{
6287 struct ctlr_info *h = queue_to_hba(queue);
6288 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006289 u32 raw_tag;
6290
6291 if (ignore_bogus_interrupt(h))
6292 return IRQ_NONE;
6293
6294 if (interrupt_not_for_us(h))
6295 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006296 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006297 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05006298 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006299 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05006300 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006301 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006302 return IRQ_HANDLED;
6303}
6304
Matt Gates254f7962012-05-01 11:43:06 -05006305static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006306{
Matt Gates254f7962012-05-01 11:43:06 -05006307 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006308 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05006309 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006310
6311 if (ignore_bogus_interrupt(h))
6312 return IRQ_NONE;
6313
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006314 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05006315 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006316 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05006317 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006318 return IRQ_HANDLED;
6319}
6320
Matt Gates254f7962012-05-01 11:43:06 -05006321static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006322{
Matt Gates254f7962012-05-01 11:43:06 -05006323 struct ctlr_info *h = queue_to_hba((u8 *) queue);
Don Brace303932f2010-02-04 08:42:40 -06006324 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05006325 u8 q = *(u8 *) queue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006326
6327 if (interrupt_not_for_us(h))
6328 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006329 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006330 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05006331 raw_tag = get_next_completion(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006332 while (raw_tag != FIFO_EMPTY) {
Don Bracef2405db2015-01-23 16:43:09 -06006333 process_indexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05006334 raw_tag = next_command(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006335 }
6336 }
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006337 return IRQ_HANDLED;
6338}
6339
Matt Gates254f7962012-05-01 11:43:06 -05006340static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006341{
Matt Gates254f7962012-05-01 11:43:06 -05006342 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006343 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05006344 u8 q = *(u8 *) queue;
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006345
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006346 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05006347 raw_tag = get_next_completion(h, q);
Don Brace303932f2010-02-04 08:42:40 -06006348 while (raw_tag != FIFO_EMPTY) {
Don Bracef2405db2015-01-23 16:43:09 -06006349 process_indexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05006350 raw_tag = next_command(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006351 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006352 return IRQ_HANDLED;
6353}
6354
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006355/* Send a message CDB to the firmware. Careful, this only works
6356 * in simple mode, not performant mode due to the tag lookup.
6357 * We only ever use this immediately after a controller reset.
6358 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006359static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6360 unsigned char type)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006361{
6362 struct Command {
6363 struct CommandListHeader CommandHeader;
6364 struct RequestBlock Request;
6365 struct ErrDescriptor ErrorDescriptor;
6366 };
6367 struct Command *cmd;
6368 static const size_t cmd_sz = sizeof(*cmd) +
6369 sizeof(cmd->ErrorDescriptor);
6370 dma_addr_t paddr64;
Don Brace2b08b3e2015-01-23 16:41:09 -06006371 __le32 paddr32;
6372 u32 tag;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006373 void __iomem *vaddr;
6374 int i, err;
6375
6376 vaddr = pci_ioremap_bar(pdev, 0);
6377 if (vaddr == NULL)
6378 return -ENOMEM;
6379
6380 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6381 * CCISS commands, so they must be allocated from the lower 4GiB of
6382 * memory.
6383 */
6384 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6385 if (err) {
6386 iounmap(vaddr);
Robert Elliott1eaec8f2015-01-23 16:42:37 -06006387 return err;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006388 }
6389
6390 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
6391 if (cmd == NULL) {
6392 iounmap(vaddr);
6393 return -ENOMEM;
6394 }
6395
6396 /* This must fit, because of the 32-bit consistent DMA mask. Also,
6397 * although there's no guarantee, we assume that the address is at
6398 * least 4-byte aligned (most likely, it's page-aligned).
6399 */
Don Brace2b08b3e2015-01-23 16:41:09 -06006400 paddr32 = cpu_to_le32(paddr64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006401
6402 cmd->CommandHeader.ReplyQueue = 0;
6403 cmd->CommandHeader.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006404 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
Don Brace2b08b3e2015-01-23 16:41:09 -06006405 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006406 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
6407
6408 cmd->Request.CDBLen = 16;
Stephen M. Camerona505b862014-11-14 17:27:04 -06006409 cmd->Request.type_attr_dir =
6410 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006411 cmd->Request.Timeout = 0; /* Don't time out */
6412 cmd->Request.CDB[0] = opcode;
6413 cmd->Request.CDB[1] = type;
6414 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006415 cmd->ErrorDescriptor.Addr =
Don Brace2b08b3e2015-01-23 16:41:09 -06006416 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06006417 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006418
Don Brace2b08b3e2015-01-23 16:41:09 -06006419 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006420
6421 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
6422 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
Don Brace2b08b3e2015-01-23 16:41:09 -06006423 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006424 break;
6425 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
6426 }
6427
6428 iounmap(vaddr);
6429
6430 /* we leak the DMA buffer here ... no choice since the controller could
6431 * still complete the command.
6432 */
6433 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
6434 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
6435 opcode, type);
6436 return -ETIMEDOUT;
6437 }
6438
6439 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
6440
6441 if (tag & HPSA_ERROR_BIT) {
6442 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
6443 opcode, type);
6444 return -EIO;
6445 }
6446
6447 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
6448 opcode, type);
6449 return 0;
6450}
6451
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006452#define hpsa_noop(p) hpsa_message(p, 3, 0)
6453
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006454static int hpsa_controller_hard_reset(struct pci_dev *pdev,
Don Brace42a91642014-11-14 17:26:27 -06006455 void __iomem *vaddr, u32 use_doorbell)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006456{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006457
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006458 if (use_doorbell) {
6459 /* For everything after the P600, the PCI power state method
6460 * of resetting the controller doesn't work, so we have this
6461 * other way using the doorbell register.
6462 */
6463 dev_info(&pdev->dev, "using doorbell to reset controller\n");
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006464 writel(use_doorbell, vaddr + SA5_DOORBELL);
Stephen M. Cameron85009232013-09-23 13:33:36 -05006465
Justin Lindley00701a92014-05-29 10:52:47 -05006466 /* PMC hardware guys tell us we need a 10 second delay after
Stephen M. Cameron85009232013-09-23 13:33:36 -05006467 * doorbell reset and before any attempt to talk to the board
6468 * at all to ensure that this actually works and doesn't fall
6469 * over in some weird corner cases.
6470 */
Justin Lindley00701a92014-05-29 10:52:47 -05006471 msleep(10000);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006472 } else { /* Try to do it the PCI power state way */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006473
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006474 /* Quoting from the Open CISS Specification: "The Power
6475 * Management Control/Status Register (CSR) controls the power
6476 * state of the device. The normal operating state is D0,
6477 * CSR=00h. The software off state is D3, CSR=03h. To reset
6478 * the controller, place the interface device in D3 then to D0,
6479 * this causes a secondary PCI reset which will reset the
6480 * controller." */
6481
Don Brace2662cab2015-01-23 16:41:25 -06006482 int rc = 0;
6483
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006484 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
Don Brace2662cab2015-01-23 16:41:25 -06006485
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006486 /* enter the D3hot power management state */
Don Brace2662cab2015-01-23 16:41:25 -06006487 rc = pci_set_power_state(pdev, PCI_D3hot);
6488 if (rc)
6489 return rc;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006490
6491 msleep(500);
6492
6493 /* enter the D0 power management state */
Don Brace2662cab2015-01-23 16:41:25 -06006494 rc = pci_set_power_state(pdev, PCI_D0);
6495 if (rc)
6496 return rc;
Mike Millerc4853ef2011-10-21 08:19:43 +02006497
6498 /*
6499 * The P600 requires a small delay when changing states.
6500 * Otherwise we may think the board did not reset and we bail.
6501 * This for kdump only and is particular to the P600.
6502 */
6503 msleep(500);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006504 }
6505 return 0;
6506}
6507
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006508static void init_driver_version(char *driver_version, int len)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006509{
6510 memset(driver_version, 0, len);
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06006511 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006512}
6513
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006514static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006515{
6516 char *driver_version;
6517 int i, size = sizeof(cfgtable->driver_version);
6518
6519 driver_version = kmalloc(size, GFP_KERNEL);
6520 if (!driver_version)
6521 return -ENOMEM;
6522
6523 init_driver_version(driver_version, size);
6524 for (i = 0; i < size; i++)
6525 writeb(driver_version[i], &cfgtable->driver_version[i]);
6526 kfree(driver_version);
6527 return 0;
6528}
6529
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006530static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
6531 unsigned char *driver_ver)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006532{
6533 int i;
6534
6535 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
6536 driver_ver[i] = readb(&cfgtable->driver_version[i]);
6537}
6538
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006539static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006540{
6541
6542 char *driver_ver, *old_driver_ver;
6543 int rc, size = sizeof(cfgtable->driver_version);
6544
6545 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
6546 if (!old_driver_ver)
6547 return -ENOMEM;
6548 driver_ver = old_driver_ver + size;
6549
6550 /* After a reset, the 32 bytes of "driver version" in the cfgtable
6551 * should have been changed, otherwise we know the reset failed.
6552 */
6553 init_driver_version(old_driver_ver, size);
6554 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
6555 rc = !memcmp(driver_ver, old_driver_ver, size);
6556 kfree(old_driver_ver);
6557 return rc;
6558}
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006559/* This does a hard reset of the controller using PCI power management
6560 * states or the using the doorbell register.
6561 */
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02006562static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006563{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006564 u64 cfg_offset;
6565 u32 cfg_base_addr;
6566 u64 cfg_base_addr_index;
6567 void __iomem *vaddr;
6568 unsigned long paddr;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006569 u32 misc_fw_support;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006570 int rc;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006571 struct CfgTable __iomem *cfgtable;
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006572 u32 use_doorbell;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006573 u16 command_register;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006574
6575 /* For controllers as old as the P600, this is very nearly
6576 * the same thing as
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006577 *
6578 * pci_save_state(pci_dev);
6579 * pci_set_power_state(pci_dev, PCI_D3hot);
6580 * pci_set_power_state(pci_dev, PCI_D0);
6581 * pci_restore_state(pci_dev);
6582 *
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006583 * For controllers newer than the P600, the pci power state
6584 * method of resetting doesn't work so we have another way
6585 * using the doorbell register.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006586 */
Stephen M. Cameron18867652010-06-16 13:51:45 -05006587
Robert Elliott60f923b2015-01-23 16:42:06 -06006588 if (!ctlr_is_resettable(board_id)) {
6589 dev_warn(&pdev->dev, "Controller not resettable\n");
Stephen M. Cameron25c1e56a2011-01-06 14:48:18 -06006590 return -ENODEV;
6591 }
Stephen M. Cameron46380782011-05-03 15:00:01 -05006592
6593 /* if controller is soft- but not hard resettable... */
6594 if (!ctlr_is_hard_resettable(board_id))
6595 return -ENOTSUPP; /* try soft reset later. */
Stephen M. Cameron18867652010-06-16 13:51:45 -05006596
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006597 /* Save the PCI command register */
6598 pci_read_config_word(pdev, 4, &command_register);
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006599 pci_save_state(pdev);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006600
6601 /* find the first memory BAR, so we can find the cfg table */
6602 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
6603 if (rc)
6604 return rc;
6605 vaddr = remap_pci_mem(paddr, 0x250);
6606 if (!vaddr)
6607 return -ENOMEM;
6608
6609 /* find cfgtable in order to check if reset via doorbell is supported */
6610 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
6611 &cfg_base_addr_index, &cfg_offset);
6612 if (rc)
6613 goto unmap_vaddr;
6614 cfgtable = remap_pci_mem(pci_resource_start(pdev,
6615 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
6616 if (!cfgtable) {
6617 rc = -ENOMEM;
6618 goto unmap_vaddr;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006619 }
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006620 rc = write_driver_ver_to_cfgtable(cfgtable);
6621 if (rc)
Tomas Henzl03741d92015-01-23 16:41:14 -06006622 goto unmap_cfgtable;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006623
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006624 /* If reset via doorbell register is supported, use that.
6625 * There are two such methods. Favor the newest method.
6626 */
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006627 misc_fw_support = readl(&cfgtable->misc_fw_support);
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006628 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6629 if (use_doorbell) {
6630 use_doorbell = DOORBELL_CTLR_RESET2;
6631 } else {
6632 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6633 if (use_doorbell) {
Stephen Cameron050f7142015-01-23 16:42:22 -06006634 dev_warn(&pdev->dev,
6635 "Soft reset not supported. Firmware update is required.\n");
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006636 rc = -ENOTSUPP; /* try soft reset */
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05006637 goto unmap_cfgtable;
6638 }
6639 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006640
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006641 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6642 if (rc)
6643 goto unmap_cfgtable;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006644
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006645 pci_restore_state(pdev);
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06006646 pci_write_config_word(pdev, 4, command_register);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006647
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006648 /* Some devices (notably the HP Smart Array 5i Controller)
6649 need a little pause here */
6650 msleep(HPSA_POST_RESET_PAUSE_MSECS);
6651
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006652 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6653 if (rc) {
6654 dev_warn(&pdev->dev,
Stephen Cameron050f7142015-01-23 16:42:22 -06006655 "Failed waiting for board to become ready after hard reset\n");
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006656 goto unmap_cfgtable;
6657 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006658
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006659 rc = controller_reset_failed(vaddr);
6660 if (rc < 0)
6661 goto unmap_cfgtable;
6662 if (rc) {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006663 dev_warn(&pdev->dev, "Unable to successfully reset "
6664 "controller. Will try soft reset.\n");
6665 rc = -ENOTSUPP;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006666 } else {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006667 dev_info(&pdev->dev, "board ready after hard reset.\n");
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006668 }
6669
6670unmap_cfgtable:
6671 iounmap(cfgtable);
6672
6673unmap_vaddr:
6674 iounmap(vaddr);
6675 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006676}
6677
6678/*
6679 * We cannot read the structure directly, for portability we must use
6680 * the io functions.
6681 * This is for debug only.
6682 */
Don Brace42a91642014-11-14 17:26:27 -06006683static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006684{
Stephen M. Cameron58f86652010-05-27 15:13:58 -05006685#ifdef HPSA_DEBUG
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006686 int i;
6687 char temp_name[17];
6688
6689 dev_info(dev, "Controller Configuration information\n");
6690 dev_info(dev, "------------------------------------\n");
6691 for (i = 0; i < 4; i++)
6692 temp_name[i] = readb(&(tb->Signature[i]));
6693 temp_name[4] = '\0';
6694 dev_info(dev, " Signature = %s\n", temp_name);
6695 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
6696 dev_info(dev, " Transport methods supported = 0x%x\n",
6697 readl(&(tb->TransportSupport)));
6698 dev_info(dev, " Transport methods active = 0x%x\n",
6699 readl(&(tb->TransportActive)));
6700 dev_info(dev, " Requested transport Method = 0x%x\n",
6701 readl(&(tb->HostWrite.TransportRequest)));
6702 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
6703 readl(&(tb->HostWrite.CoalIntDelay)));
6704 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
6705 readl(&(tb->HostWrite.CoalIntCount)));
Robert Elliott69d6e332015-01-23 16:41:56 -06006706 dev_info(dev, " Max outstanding commands = %d\n",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006707 readl(&(tb->CmdsOutMax)));
6708 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6709 for (i = 0; i < 16; i++)
6710 temp_name[i] = readb(&(tb->ServerName[i]));
6711 temp_name[16] = '\0';
6712 dev_info(dev, " Server Name = %s\n", temp_name);
6713 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6714 readl(&(tb->HeartBeat)));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006715#endif /* HPSA_DEBUG */
Stephen M. Cameron58f86652010-05-27 15:13:58 -05006716}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006717
6718static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6719{
6720 int i, offset, mem_type, bar_type;
6721
6722 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6723 return 0;
6724 offset = 0;
6725 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6726 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6727 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6728 offset += 4;
6729 else {
6730 mem_type = pci_resource_flags(pdev, i) &
6731 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6732 switch (mem_type) {
6733 case PCI_BASE_ADDRESS_MEM_TYPE_32:
6734 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6735 offset += 4; /* 32 bit */
6736 break;
6737 case PCI_BASE_ADDRESS_MEM_TYPE_64:
6738 offset += 8;
6739 break;
6740 default: /* reserved in PCI 2.2 */
6741 dev_warn(&pdev->dev,
6742 "base address is invalid\n");
6743 return -1;
6744 break;
6745 }
6746 }
6747 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6748 return i + 1;
6749 }
6750 return -1;
6751}
6752
Robert Elliottcc64c812015-04-23 09:33:12 -05006753static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
6754{
6755 if (h->msix_vector) {
6756 if (h->pdev->msix_enabled)
6757 pci_disable_msix(h->pdev);
Robert Elliott105a3db2015-04-23 09:33:48 -05006758 h->msix_vector = 0;
Robert Elliottcc64c812015-04-23 09:33:12 -05006759 } else if (h->msi_vector) {
6760 if (h->pdev->msi_enabled)
6761 pci_disable_msi(h->pdev);
Robert Elliott105a3db2015-04-23 09:33:48 -05006762 h->msi_vector = 0;
Robert Elliottcc64c812015-04-23 09:33:12 -05006763 }
6764}
6765
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006766/* If MSI/MSI-X is supported by the kernel we will try to enable it on
Stephen Cameron050f7142015-01-23 16:42:22 -06006767 * controllers that are capable. If not, we use legacy INTx mode.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006768 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006769static void hpsa_interrupt_mode(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006770{
6771#ifdef CONFIG_PCI_MSI
Matt Gates254f7962012-05-01 11:43:06 -05006772 int err, i;
6773 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6774
6775 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6776 hpsa_msix_entries[i].vector = 0;
6777 hpsa_msix_entries[i].entry = i;
6778 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006779
6780 /* Some boards advertise MSI but don't really support it */
Stephen M. Cameron6b3f4c52010-05-27 15:13:02 -05006781 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6782 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006783 goto default_int_mode;
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006784 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06006785 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006786 h->msix_vector = MAX_REPLY_QUEUES;
Stephen M. Cameronf89439b2014-05-29 10:53:02 -05006787 if (h->msix_vector > num_online_cpus())
6788 h->msix_vector = num_online_cpus();
Alexander Gordeev18fce3c2014-08-18 08:01:42 +02006789 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
6790 1, h->msix_vector);
6791 if (err < 0) {
6792 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
6793 h->msix_vector = 0;
6794 goto single_msi_mode;
6795 } else if (err < h->msix_vector) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006796 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006797 "available\n", err);
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006798 }
Alexander Gordeev18fce3c2014-08-18 08:01:42 +02006799 h->msix_vector = err;
6800 for (i = 0; i < h->msix_vector; i++)
6801 h->intr[i] = hpsa_msix_entries[i].vector;
6802 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006803 }
Alexander Gordeev18fce3c2014-08-18 08:01:42 +02006804single_msi_mode:
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006805 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06006806 dev_info(&h->pdev->dev, "MSI capable controller\n");
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006807 if (!pci_enable_msi(h->pdev))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006808 h->msi_vector = 1;
6809 else
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006810 dev_warn(&h->pdev->dev, "MSI init failed\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006811 }
6812default_int_mode:
6813#endif /* CONFIG_PCI_MSI */
6814 /* if we get here we're going to use the default interrupt mode */
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006815 h->intr[h->intr_mode] = h->pdev->irq;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006816}
6817
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006818static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006819{
6820 int i;
6821 u32 subsystem_vendor_id, subsystem_device_id;
6822
6823 subsystem_vendor_id = pdev->subsystem_vendor;
6824 subsystem_device_id = pdev->subsystem_device;
6825 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6826 subsystem_vendor_id;
6827
6828 for (i = 0; i < ARRAY_SIZE(products); i++)
6829 if (*board_id == products[i].board_id)
6830 return i;
6831
Stephen M. Cameron6798cc02010-06-16 13:51:20 -05006832 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6833 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6834 !hpsa_allow_any) {
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006835 dev_warn(&pdev->dev, "unrecognized board ID: "
6836 "0x%08x, ignoring.\n", *board_id);
6837 return -ENODEV;
6838 }
6839 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6840}
6841
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006842static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6843 unsigned long *memory_bar)
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006844{
6845 int i;
6846
6847 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006848 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006849 /* addressing mode bits already removed */
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006850 *memory_bar = pci_resource_start(pdev, i);
6851 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006852 *memory_bar);
6853 return 0;
6854 }
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006855 dev_warn(&pdev->dev, "no memory BAR found\n");
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006856 return -ENODEV;
6857}
6858
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006859static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6860 int wait_for_ready)
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006861{
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006862 int i, iterations;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006863 u32 scratchpad;
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006864 if (wait_for_ready)
6865 iterations = HPSA_BOARD_READY_ITERATIONS;
6866 else
6867 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006868
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006869 for (i = 0; i < iterations; i++) {
6870 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6871 if (wait_for_ready) {
6872 if (scratchpad == HPSA_FIRMWARE_READY)
6873 return 0;
6874 } else {
6875 if (scratchpad != HPSA_FIRMWARE_READY)
6876 return 0;
6877 }
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006878 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6879 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006880 dev_warn(&pdev->dev, "board not ready, timed out.\n");
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006881 return -ENODEV;
6882}
6883
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006884static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6885 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6886 u64 *cfg_offset)
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006887{
6888 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6889 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6890 *cfg_base_addr &= (u32) 0x0000ffff;
6891 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6892 if (*cfg_base_addr_index == -1) {
6893 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6894 return -ENODEV;
6895 }
6896 return 0;
6897}
6898
Robert Elliott195f2c62015-04-23 09:33:17 -05006899static void hpsa_free_cfgtables(struct ctlr_info *h)
6900{
Robert Elliott105a3db2015-04-23 09:33:48 -05006901 if (h->transtable) {
Robert Elliott195f2c62015-04-23 09:33:17 -05006902 iounmap(h->transtable);
Robert Elliott105a3db2015-04-23 09:33:48 -05006903 h->transtable = NULL;
6904 }
6905 if (h->cfgtable) {
Robert Elliott195f2c62015-04-23 09:33:17 -05006906 iounmap(h->cfgtable);
Robert Elliott105a3db2015-04-23 09:33:48 -05006907 h->cfgtable = NULL;
6908 }
Robert Elliott195f2c62015-04-23 09:33:17 -05006909}
6910
6911/* Find and map CISS config table and transfer table
6912+ * several items must be unmapped (freed) later
6913+ * */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006914static int hpsa_find_cfgtables(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006915{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06006916 u64 cfg_offset;
6917 u32 cfg_base_addr;
6918 u64 cfg_base_addr_index;
Don Brace303932f2010-02-04 08:42:40 -06006919 u32 trans_offset;
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006920 int rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006921
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006922 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6923 &cfg_base_addr_index, &cfg_offset);
6924 if (rc)
6925 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006926 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006927 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
Robert Elliottcd3c81c2015-01-23 16:42:27 -06006928 if (!h->cfgtable) {
6929 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006930 return -ENOMEM;
Robert Elliottcd3c81c2015-01-23 16:42:27 -06006931 }
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006932 rc = write_driver_ver_to_cfgtable(h->cfgtable);
6933 if (rc)
6934 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006935 /* Find performant mode table. */
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006936 trans_offset = readl(&h->cfgtable->TransMethodOffset);
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006937 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6938 cfg_base_addr_index)+cfg_offset+trans_offset,
6939 sizeof(*h->transtable));
Robert Elliott195f2c62015-04-23 09:33:17 -05006940 if (!h->transtable) {
6941 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
6942 hpsa_free_cfgtables(h);
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006943 return -ENOMEM;
Robert Elliott195f2c62015-04-23 09:33:17 -05006944 }
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006945 return 0;
6946}
6947
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006948static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05006949{
Stephen Cameron41ce4c32015-04-23 09:31:47 -05006950#define MIN_MAX_COMMANDS 16
6951 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
6952
6953 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
Stephen M. Cameron72ceeae2011-01-06 14:48:13 -06006954
6955 /* Limit commands in memory limited kdump scenario. */
6956 if (reset_devices && h->max_commands > 32)
6957 h->max_commands = 32;
6958
Stephen Cameron41ce4c32015-04-23 09:31:47 -05006959 if (h->max_commands < MIN_MAX_COMMANDS) {
6960 dev_warn(&h->pdev->dev,
6961 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
6962 h->max_commands,
6963 MIN_MAX_COMMANDS);
6964 h->max_commands = MIN_MAX_COMMANDS;
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05006965 }
6966}
6967
Webb Scalesc7ee65b2015-01-23 16:42:17 -06006968/* If the controller reports that the total max sg entries is greater than 512,
6969 * then we know that chained SG blocks work. (Original smart arrays did not
6970 * support chained SG blocks and would return zero for max sg entries.)
6971 */
6972static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
6973{
6974 return h->maxsgentries > 512;
6975}
6976
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006977/* Interrogate the hardware for some limits:
6978 * max commands, max SG elements without chaining, and with chaining,
6979 * SG chain block size, etc.
6980 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006981static void hpsa_find_board_params(struct ctlr_info *h)
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006982{
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05006983 hpsa_get_max_perf_mode_cmds(h);
Stephen Cameron45fcb862015-01-23 16:43:04 -06006984 h->nr_cmds = h->max_commands;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006985 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006986 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
Webb Scalesc7ee65b2015-01-23 16:42:17 -06006987 if (hpsa_supports_chained_sg_blocks(h)) {
6988 /* Limit in-command s/g elements to 32 save dma'able memory. */
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006989 h->max_cmd_sg_entries = 32;
Webb Scales1a63ea62014-11-14 17:26:43 -06006990 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006991 h->maxsgentries--; /* save one for chain pointer */
6992 } else {
Webb Scalesc7ee65b2015-01-23 16:42:17 -06006993 /*
6994 * Original smart arrays supported at most 31 s/g entries
6995 * embedded inline in the command (trying to use more
6996 * would lock up the controller)
6997 */
6998 h->max_cmd_sg_entries = 31;
Webb Scales1a63ea62014-11-14 17:26:43 -06006999 h->maxsgentries = 31; /* default to traditional values */
Webb Scalesc7ee65b2015-01-23 16:42:17 -06007000 h->chainsize = 0;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007001 }
Stephen M. Cameron75167d22012-05-01 11:42:51 -05007002
7003 /* Find out what task management functions are supported and cache */
7004 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
Scott Teel0e7a7fc2014-02-18 13:55:59 -06007005 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7006 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7007 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7008 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
Stephen Cameron8be986c2015-04-23 09:34:06 -05007009 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7010 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007011}
7012
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05007013static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7014{
Akinobu Mita0fc9fd42012-04-04 22:14:59 +09007015 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06007016 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05007017 return false;
7018 }
7019 return true;
7020}
7021
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007022static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05007023{
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007024 u32 driver_support;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05007025
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007026 driver_support = readl(&(h->cfgtable->driver_support));
Arnd Bergmann0b9e7b72014-06-26 15:44:52 +02007027 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7028#ifdef CONFIG_X86
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007029 driver_support |= ENABLE_SCSI_PREFETCH;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05007030#endif
Stephen M. Cameron28e13442013-12-04 17:10:21 -06007031 driver_support |= ENABLE_UNIT_ATTN;
7032 writel(driver_support, &(h->cfgtable->driver_support));
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05007033}
7034
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05007035/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7036 * in a prefetch beyond physical memory.
7037 */
7038static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7039{
7040 u32 dma_prefetch;
7041
7042 if (h->board_id != 0x3225103C)
7043 return;
7044 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7045 dma_prefetch |= 0x8000;
7046 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7047}
7048
Robert Elliottc706a792015-01-23 16:45:01 -06007049static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007050{
7051 int i;
7052 u32 doorbell_value;
7053 unsigned long flags;
7054 /* wait until the clear_event_notify bit 6 is cleared by controller. */
Robert Elliott007e7aa2015-01-23 16:44:56 -06007055 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007056 spin_lock_irqsave(&h->lock, flags);
7057 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7058 spin_unlock_irqrestore(&h->lock, flags);
7059 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
Robert Elliottc706a792015-01-23 16:45:01 -06007060 goto done;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007061 /* delay and try again */
Robert Elliott007e7aa2015-01-23 16:44:56 -06007062 msleep(CLEAR_EVENT_WAIT_INTERVAL);
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007063 }
Robert Elliottc706a792015-01-23 16:45:01 -06007064 return -ENODEV;
7065done:
7066 return 0;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007067}
7068
Robert Elliottc706a792015-01-23 16:45:01 -06007069static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007070{
7071 int i;
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06007072 u32 doorbell_value;
7073 unsigned long flags;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007074
7075 /* under certain very rare conditions, this can take awhile.
7076 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7077 * as we enter this code.)
7078 */
Robert Elliott007e7aa2015-01-23 16:44:56 -06007079 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
Webb Scales25163bd2015-04-23 09:32:00 -05007080 if (h->remove_in_progress)
7081 goto done;
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06007082 spin_lock_irqsave(&h->lock, flags);
7083 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7084 spin_unlock_irqrestore(&h->lock, flags);
Dan Carpenter382be662011-02-15 15:33:13 -06007085 if (!(doorbell_value & CFGTBL_ChangeReq))
Robert Elliottc706a792015-01-23 16:45:01 -06007086 goto done;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007087 /* delay and try again */
Robert Elliott007e7aa2015-01-23 16:44:56 -06007088 msleep(MODE_CHANGE_WAIT_INTERVAL);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007089 }
Robert Elliottc706a792015-01-23 16:45:01 -06007090 return -ENODEV;
7091done:
7092 return 0;
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007093}
7094
Robert Elliottc706a792015-01-23 16:45:01 -06007095/* return -ENODEV or other reason on error, 0 on success */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007096static int hpsa_enter_simple_mode(struct ctlr_info *h)
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007097{
7098 u32 trans_support;
7099
7100 trans_support = readl(&(h->cfgtable->TransportSupport));
7101 if (!(trans_support & SIMPLE_MODE))
7102 return -ENOTSUPP;
7103
7104 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007105
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007106 /* Update the field, and then ring the doorbell */
7107 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007108 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007109 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Robert Elliottc706a792015-01-23 16:45:01 -06007110 if (hpsa_wait_for_mode_change_ack(h))
7111 goto error;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007112 print_cfg_table(&h->pdev->dev, h->cfgtable);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007113 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7114 goto error;
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06007115 h->transMethod = CFGTBL_Trans_Simple;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007116 return 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007117error:
Stephen Cameron050f7142015-01-23 16:42:22 -06007118 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007119 return -ENODEV;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007120}
7121
Robert Elliott195f2c62015-04-23 09:33:17 -05007122/* free items allocated or mapped by hpsa_pci_init */
7123static void hpsa_free_pci_init(struct ctlr_info *h)
7124{
7125 hpsa_free_cfgtables(h); /* pci_init 4 */
7126 iounmap(h->vaddr); /* pci_init 3 */
Robert Elliott105a3db2015-04-23 09:33:48 -05007127 h->vaddr = NULL;
Robert Elliott195f2c62015-04-23 09:33:17 -05007128 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
Robert Elliott943a7022015-04-23 09:34:32 -05007129 /*
7130 * call pci_disable_device before pci_release_regions per
7131 * Documentation/PCI/pci.txt
7132 */
Robert Elliott195f2c62015-04-23 09:33:17 -05007133 pci_disable_device(h->pdev); /* pci_init 1 */
Robert Elliott943a7022015-04-23 09:34:32 -05007134 pci_release_regions(h->pdev); /* pci_init 2 */
Robert Elliott195f2c62015-04-23 09:33:17 -05007135}
7136
7137/* several items must be freed later */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007138static int hpsa_pci_init(struct ctlr_info *h)
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007139{
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007140 int prod_index, err;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007141
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05007142 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
7143 if (prod_index < 0)
Robert Elliott60f923b2015-01-23 16:42:06 -06007144 return prod_index;
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05007145 h->product_name = products[prod_index].product_name;
7146 h->access = *(products[prod_index].access);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007147
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05007148 h->needs_abort_tags_swizzled =
7149 ctlr_needs_abort_tags_swizzled(h->board_id);
7150
Matthew Garrette5a44df2011-11-11 11:14:23 -05007151 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7152 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7153
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05007154 err = pci_enable_device(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007155 if (err) {
Robert Elliott195f2c62015-04-23 09:33:17 -05007156 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
Robert Elliott943a7022015-04-23 09:34:32 -05007157 pci_disable_device(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007158 return err;
7159 }
7160
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06007161 err = pci_request_regions(h->pdev, HPSA);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007162 if (err) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05007163 dev_err(&h->pdev->dev,
Robert Elliott195f2c62015-04-23 09:33:17 -05007164 "failed to obtain PCI resources\n");
Robert Elliott943a7022015-04-23 09:34:32 -05007165 pci_disable_device(h->pdev);
7166 return err;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007167 }
Robert Elliott4fa604e2014-11-14 17:27:24 -06007168
7169 pci_set_master(h->pdev);
7170
Stephen M. Cameron6b3f4c52010-05-27 15:13:02 -05007171 hpsa_interrupt_mode(h);
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05007172 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05007173 if (err)
Robert Elliott195f2c62015-04-23 09:33:17 -05007174 goto clean2; /* intmode+region, pci */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007175 h->vaddr = remap_pci_mem(h->paddr, 0x250);
Stephen M. Cameron204892e2010-05-27 15:13:22 -05007176 if (!h->vaddr) {
Robert Elliott195f2c62015-04-23 09:33:17 -05007177 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
Stephen M. Cameron204892e2010-05-27 15:13:22 -05007178 err = -ENOMEM;
Robert Elliott195f2c62015-04-23 09:33:17 -05007179 goto clean2; /* intmode+region, pci */
Stephen M. Cameron204892e2010-05-27 15:13:22 -05007180 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06007181 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05007182 if (err)
Robert Elliott195f2c62015-04-23 09:33:17 -05007183 goto clean3; /* vaddr, intmode+region, pci */
Stephen M. Cameron77c44952010-05-27 15:13:17 -05007184 err = hpsa_find_cfgtables(h);
7185 if (err)
Robert Elliott195f2c62015-04-23 09:33:17 -05007186 goto clean3; /* vaddr, intmode+region, pci */
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05007187 hpsa_find_board_params(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007188
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05007189 if (!hpsa_CISS_signature_present(h)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007190 err = -ENODEV;
Robert Elliott195f2c62015-04-23 09:33:17 -05007191 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007192 }
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06007193 hpsa_set_driver_support_bits(h);
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05007194 hpsa_p600_dma_prefetch_quirk(h);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05007195 err = hpsa_enter_simple_mode(h);
7196 if (err)
Robert Elliott195f2c62015-04-23 09:33:17 -05007197 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007198 return 0;
7199
Robert Elliott195f2c62015-04-23 09:33:17 -05007200clean4: /* cfgtables, vaddr, intmode+region, pci */
7201 hpsa_free_cfgtables(h);
7202clean3: /* vaddr, intmode+region, pci */
7203 iounmap(h->vaddr);
Robert Elliott105a3db2015-04-23 09:33:48 -05007204 h->vaddr = NULL;
Robert Elliott195f2c62015-04-23 09:33:17 -05007205clean2: /* intmode+region, pci */
7206 hpsa_disable_interrupt_mode(h);
Robert Elliott943a7022015-04-23 09:34:32 -05007207 /*
7208 * call pci_disable_device before pci_release_regions per
7209 * Documentation/PCI/pci.txt
7210 */
Robert Elliott195f2c62015-04-23 09:33:17 -05007211 pci_disable_device(h->pdev);
Robert Elliott943a7022015-04-23 09:34:32 -05007212 pci_release_regions(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007213 return err;
7214}
7215
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007216static void hpsa_hba_inquiry(struct ctlr_info *h)
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06007217{
7218 int rc;
7219
7220#define HBA_INQUIRY_BYTE_COUNT 64
7221 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7222 if (!h->hba_inquiry_data)
7223 return;
7224 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7225 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7226 if (rc != 0) {
7227 kfree(h->hba_inquiry_data);
7228 h->hba_inquiry_data = NULL;
7229 }
7230}
7231
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02007232static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007233{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007234 int rc, i;
Tomas Henzl3b747292015-01-23 16:41:20 -06007235 void __iomem *vaddr;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007236
7237 if (!reset_devices)
7238 return 0;
7239
Tomas Henzl132aa222014-08-14 16:12:39 +02007240 /* kdump kernel is loading, we don't know in which state is
7241 * the pci interface. The dev->enable_cnt is equal zero
7242 * so we call enable+disable, wait a while and switch it on.
7243 */
7244 rc = pci_enable_device(pdev);
7245 if (rc) {
7246 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7247 return -ENODEV;
7248 }
7249 pci_disable_device(pdev);
7250 msleep(260); /* a randomly chosen number */
7251 rc = pci_enable_device(pdev);
7252 if (rc) {
7253 dev_warn(&pdev->dev, "failed to enable device.\n");
7254 return -ENODEV;
7255 }
Robert Elliott4fa604e2014-11-14 17:27:24 -06007256
Tomas Henzl859c75a2014-09-12 14:44:15 +02007257 pci_set_master(pdev);
Robert Elliott4fa604e2014-11-14 17:27:24 -06007258
Tomas Henzl3b747292015-01-23 16:41:20 -06007259 vaddr = pci_ioremap_bar(pdev, 0);
7260 if (vaddr == NULL) {
7261 rc = -ENOMEM;
7262 goto out_disable;
7263 }
7264 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7265 iounmap(vaddr);
7266
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007267 /* Reset the controller with a PCI power-cycle or via doorbell */
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02007268 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007269
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007270 /* -ENOTSUPP here means we cannot reset the controller
7271 * but it's already (and still) up and running in
Stephen M. Cameron18867652010-06-16 13:51:45 -05007272 * "performant mode". Or, it might be 640x, which can't reset
7273 * due to concerns about shared bbwc between 6402/6404 pair.
Stephen M. Cameron1df85522010-06-16 13:51:40 -05007274 */
Robert Elliottadf1b3a2015-01-23 16:42:01 -06007275 if (rc)
Tomas Henzl132aa222014-08-14 16:12:39 +02007276 goto out_disable;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007277
7278 /* Now try to get the controller to respond to a no-op */
Robert Elliott1ba66c92015-01-23 16:42:11 -06007279 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007280 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7281 if (hpsa_noop(pdev) == 0)
7282 break;
7283 else
7284 dev_warn(&pdev->dev, "no-op failed%s\n",
7285 (i < 11 ? "; re-trying" : ""));
7286 }
Tomas Henzl132aa222014-08-14 16:12:39 +02007287
7288out_disable:
7289
7290 pci_disable_device(pdev);
7291 return rc;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007292}
7293
Robert Elliott1fb7c982015-04-23 09:33:22 -05007294static void hpsa_free_cmd_pool(struct ctlr_info *h)
7295{
7296 kfree(h->cmd_pool_bits);
Robert Elliott105a3db2015-04-23 09:33:48 -05007297 h->cmd_pool_bits = NULL;
7298 if (h->cmd_pool) {
Robert Elliott1fb7c982015-04-23 09:33:22 -05007299 pci_free_consistent(h->pdev,
7300 h->nr_cmds * sizeof(struct CommandList),
7301 h->cmd_pool,
7302 h->cmd_pool_dhandle);
Robert Elliott105a3db2015-04-23 09:33:48 -05007303 h->cmd_pool = NULL;
7304 h->cmd_pool_dhandle = 0;
7305 }
7306 if (h->errinfo_pool) {
Robert Elliott1fb7c982015-04-23 09:33:22 -05007307 pci_free_consistent(h->pdev,
7308 h->nr_cmds * sizeof(struct ErrorInfo),
7309 h->errinfo_pool,
7310 h->errinfo_pool_dhandle);
Robert Elliott105a3db2015-04-23 09:33:48 -05007311 h->errinfo_pool = NULL;
7312 h->errinfo_pool_dhandle = 0;
7313 }
Robert Elliott1fb7c982015-04-23 09:33:22 -05007314}
7315
Robert Elliottd37ffbe2015-04-23 09:32:27 -05007316static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007317{
7318 h->cmd_pool_bits = kzalloc(
7319 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7320 sizeof(unsigned long), GFP_KERNEL);
7321 h->cmd_pool = pci_alloc_consistent(h->pdev,
7322 h->nr_cmds * sizeof(*h->cmd_pool),
7323 &(h->cmd_pool_dhandle));
7324 h->errinfo_pool = pci_alloc_consistent(h->pdev,
7325 h->nr_cmds * sizeof(*h->errinfo_pool),
7326 &(h->errinfo_pool_dhandle));
7327 if ((h->cmd_pool_bits == NULL)
7328 || (h->cmd_pool == NULL)
7329 || (h->errinfo_pool == NULL)) {
7330 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
Robert Elliott2c143342015-01-23 16:42:48 -06007331 goto clean_up;
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007332 }
Stephen Cameron360c73b2015-04-23 09:32:32 -05007333 hpsa_preinitialize_commands(h);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007334 return 0;
Robert Elliott2c143342015-01-23 16:42:48 -06007335clean_up:
7336 hpsa_free_cmd_pool(h);
7337 return -ENOMEM;
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007338}
7339
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05007340static void hpsa_irq_affinity_hints(struct ctlr_info *h)
7341{
Fabian Frederickec429952015-01-23 16:41:46 -06007342 int i, cpu;
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05007343
7344 cpu = cpumask_first(cpu_online_mask);
7345 for (i = 0; i < h->msix_vector; i++) {
Fabian Frederickec429952015-01-23 16:41:46 -06007346 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05007347 cpu = cpumask_next(cpu, cpu_online_mask);
7348 }
7349}
7350
Robert Elliottec501a12015-01-23 16:41:40 -06007351/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7352static void hpsa_free_irqs(struct ctlr_info *h)
7353{
7354 int i;
7355
7356 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
7357 /* Single reply queue, only one irq to free */
7358 i = h->intr_mode;
7359 irq_set_affinity_hint(h->intr[i], NULL);
7360 free_irq(h->intr[i], &h->q[i]);
Robert Elliott105a3db2015-04-23 09:33:48 -05007361 h->q[i] = 0;
Robert Elliottec501a12015-01-23 16:41:40 -06007362 return;
7363 }
7364
7365 for (i = 0; i < h->msix_vector; i++) {
7366 irq_set_affinity_hint(h->intr[i], NULL);
7367 free_irq(h->intr[i], &h->q[i]);
Robert Elliott105a3db2015-04-23 09:33:48 -05007368 h->q[i] = 0;
Robert Elliottec501a12015-01-23 16:41:40 -06007369 }
Robert Elliotta4e17fc2015-01-23 16:41:51 -06007370 for (; i < MAX_REPLY_QUEUES; i++)
7371 h->q[i] = 0;
Robert Elliottec501a12015-01-23 16:41:40 -06007372}
7373
Robert Elliott9ee61792015-01-23 16:42:32 -06007374/* returns 0 on success; cleans up and returns -Enn on error */
7375static int hpsa_request_irqs(struct ctlr_info *h,
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007376 irqreturn_t (*msixhandler)(int, void *),
7377 irqreturn_t (*intxhandler)(int, void *))
7378{
Matt Gates254f7962012-05-01 11:43:06 -05007379 int rc, i;
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007380
Matt Gates254f7962012-05-01 11:43:06 -05007381 /*
7382 * initialize h->q[x] = x so that interrupt handlers know which
7383 * queue to process.
7384 */
7385 for (i = 0; i < MAX_REPLY_QUEUES; i++)
7386 h->q[i] = (u8) i;
7387
Hannes Reineckeeee0f032014-01-15 13:30:53 +01007388 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
Matt Gates254f7962012-05-01 11:43:06 -05007389 /* If performant mode and MSI-X, use multiple reply queues */
Robert Elliotta4e17fc2015-01-23 16:41:51 -06007390 for (i = 0; i < h->msix_vector; i++) {
Robert Elliott8b470042015-04-23 09:34:58 -05007391 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
Matt Gates254f7962012-05-01 11:43:06 -05007392 rc = request_irq(h->intr[i], msixhandler,
Robert Elliott8b470042015-04-23 09:34:58 -05007393 0, h->intrname[i],
Matt Gates254f7962012-05-01 11:43:06 -05007394 &h->q[i]);
Robert Elliotta4e17fc2015-01-23 16:41:51 -06007395 if (rc) {
7396 int j;
7397
7398 dev_err(&h->pdev->dev,
7399 "failed to get irq %d for %s\n",
7400 h->intr[i], h->devname);
7401 for (j = 0; j < i; j++) {
7402 free_irq(h->intr[j], &h->q[j]);
7403 h->q[j] = 0;
7404 }
7405 for (; j < MAX_REPLY_QUEUES; j++)
7406 h->q[j] = 0;
7407 return rc;
7408 }
7409 }
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05007410 hpsa_irq_affinity_hints(h);
Matt Gates254f7962012-05-01 11:43:06 -05007411 } else {
7412 /* Use single reply pool */
Hannes Reineckeeee0f032014-01-15 13:30:53 +01007413 if (h->msix_vector > 0 || h->msi_vector) {
Robert Elliott8b470042015-04-23 09:34:58 -05007414 if (h->msix_vector)
7415 sprintf(h->intrname[h->intr_mode],
7416 "%s-msix", h->devname);
7417 else
7418 sprintf(h->intrname[h->intr_mode],
7419 "%s-msi", h->devname);
Matt Gates254f7962012-05-01 11:43:06 -05007420 rc = request_irq(h->intr[h->intr_mode],
Robert Elliott8b470042015-04-23 09:34:58 -05007421 msixhandler, 0,
7422 h->intrname[h->intr_mode],
Matt Gates254f7962012-05-01 11:43:06 -05007423 &h->q[h->intr_mode]);
7424 } else {
Robert Elliott8b470042015-04-23 09:34:58 -05007425 sprintf(h->intrname[h->intr_mode],
7426 "%s-intx", h->devname);
Matt Gates254f7962012-05-01 11:43:06 -05007427 rc = request_irq(h->intr[h->intr_mode],
Robert Elliott8b470042015-04-23 09:34:58 -05007428 intxhandler, IRQF_SHARED,
7429 h->intrname[h->intr_mode],
Matt Gates254f7962012-05-01 11:43:06 -05007430 &h->q[h->intr_mode]);
7431 }
Robert Elliott105a3db2015-04-23 09:33:48 -05007432 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
Matt Gates254f7962012-05-01 11:43:06 -05007433 }
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007434 if (rc) {
Robert Elliott195f2c62015-04-23 09:33:17 -05007435 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007436 h->intr[h->intr_mode], h->devname);
Robert Elliott195f2c62015-04-23 09:33:17 -05007437 hpsa_free_irqs(h);
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007438 return -ENODEV;
7439 }
7440 return 0;
7441}
7442
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007443static int hpsa_kdump_soft_reset(struct ctlr_info *h)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007444{
Robert Elliottbf43caf2015-04-23 09:33:38 -05007445 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007446
7447 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
7448 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
7449 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
7450 return -1;
7451 }
7452
7453 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
7454 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
7455 dev_warn(&h->pdev->dev, "Board failed to become ready "
7456 "after soft reset.\n");
7457 return -1;
7458 }
7459
7460 return 0;
7461}
7462
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007463static void hpsa_free_reply_queues(struct ctlr_info *h)
7464{
7465 int i;
7466
7467 for (i = 0; i < h->nreply_queues; i++) {
7468 if (!h->reply_queue[i].head)
7469 continue;
Robert Elliott1fb7c982015-04-23 09:33:22 -05007470 pci_free_consistent(h->pdev,
7471 h->reply_queue_size,
7472 h->reply_queue[i].head,
7473 h->reply_queue[i].busaddr);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007474 h->reply_queue[i].head = NULL;
7475 h->reply_queue[i].busaddr = 0;
7476 }
Robert Elliott105a3db2015-04-23 09:33:48 -05007477 h->reply_queue_size = 0;
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007478}
7479
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05007480static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
7481{
Robert Elliott105a3db2015-04-23 09:33:48 -05007482 hpsa_free_performant_mode(h); /* init_one 7 */
7483 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
7484 hpsa_free_cmd_pool(h); /* init_one 5 */
7485 hpsa_free_irqs(h); /* init_one 4 */
7486 hpsa_free_pci_init(h); /* init_one 3 */
Robert Elliott9ecd9532015-04-23 09:34:43 -05007487 free_percpu(h->lockup_detected); /* init_one 2 */
7488 h->lockup_detected = NULL; /* init_one 2 */
7489 if (h->resubmit_wq) {
7490 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
7491 h->resubmit_wq = NULL;
7492 }
7493 if (h->rescan_ctlr_wq) {
7494 destroy_workqueue(h->rescan_ctlr_wq);
7495 h->rescan_ctlr_wq = NULL;
7496 }
Robert Elliott105a3db2015-04-23 09:33:48 -05007497 kfree(h); /* init_one 1 */
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007498}
7499
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007500/* Called when controller lockup detected. */
Don Bracef2405db2015-01-23 16:43:09 -06007501static void fail_all_outstanding_cmds(struct ctlr_info *h)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007502{
Webb Scales281a7fd2015-01-23 16:43:35 -06007503 int i, refcount;
7504 struct CommandList *c;
Webb Scales25163bd2015-04-23 09:32:00 -05007505 int failcount = 0;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007506
Don Brace080ef1c2015-01-23 16:43:25 -06007507 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
Don Bracef2405db2015-01-23 16:43:09 -06007508 for (i = 0; i < h->nr_cmds; i++) {
Don Bracef2405db2015-01-23 16:43:09 -06007509 c = h->cmd_pool + i;
Webb Scales281a7fd2015-01-23 16:43:35 -06007510 refcount = atomic_inc_return(&c->refcount);
7511 if (refcount > 1) {
Webb Scales25163bd2015-04-23 09:32:00 -05007512 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
Webb Scales281a7fd2015-01-23 16:43:35 -06007513 finish_cmd(c);
Stephen Cameron433b5f42015-04-23 09:32:11 -05007514 atomic_dec(&h->commands_outstanding);
Webb Scales25163bd2015-04-23 09:32:00 -05007515 failcount++;
Webb Scales281a7fd2015-01-23 16:43:35 -06007516 }
7517 cmd_free(h, c);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007518 }
Webb Scales25163bd2015-04-23 09:32:00 -05007519 dev_warn(&h->pdev->dev,
7520 "failed %d commands in fail_all\n", failcount);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007521}
7522
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007523static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
7524{
Rusty Russellc8ed0012015-03-05 10:49:19 +10307525 int cpu;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007526
Rusty Russellc8ed0012015-03-05 10:49:19 +10307527 for_each_online_cpu(cpu) {
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007528 u32 *lockup_detected;
7529 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
7530 *lockup_detected = value;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007531 }
7532 wmb(); /* be sure the per-cpu variables are out to memory */
7533}
7534
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007535static void controller_lockup_detected(struct ctlr_info *h)
7536{
7537 unsigned long flags;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007538 u32 lockup_detected;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007539
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007540 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7541 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007542 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
7543 if (!lockup_detected) {
7544 /* no heartbeat, but controller gave us a zero. */
7545 dev_warn(&h->pdev->dev,
Webb Scales25163bd2015-04-23 09:32:00 -05007546 "lockup detected after %d but scratchpad register is zero\n",
7547 h->heartbeat_sample_interval / HZ);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007548 lockup_detected = 0xffffffff;
7549 }
7550 set_lockup_detected_for_all_cpus(h, lockup_detected);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007551 spin_unlock_irqrestore(&h->lock, flags);
Webb Scales25163bd2015-04-23 09:32:00 -05007552 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
7553 lockup_detected, h->heartbeat_sample_interval / HZ);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007554 pci_disable_device(h->pdev);
Don Bracef2405db2015-01-23 16:43:09 -06007555 fail_all_outstanding_cmds(h);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007556}
7557
Webb Scales25163bd2015-04-23 09:32:00 -05007558static int detect_controller_lockup(struct ctlr_info *h)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007559{
7560 u64 now;
7561 u32 heartbeat;
7562 unsigned long flags;
7563
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007564 now = get_jiffies_64();
7565 /* If we've received an interrupt recently, we're ok. */
7566 if (time_after64(h->last_intr_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05007567 (h->heartbeat_sample_interval), now))
Webb Scales25163bd2015-04-23 09:32:00 -05007568 return false;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007569
7570 /*
7571 * If we've already checked the heartbeat recently, we're ok.
7572 * This could happen if someone sends us a signal. We
7573 * otherwise don't care about signals in this thread.
7574 */
7575 if (time_after64(h->last_heartbeat_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05007576 (h->heartbeat_sample_interval), now))
Webb Scales25163bd2015-04-23 09:32:00 -05007577 return false;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007578
7579 /* If heartbeat has not changed since we last looked, we're not ok. */
7580 spin_lock_irqsave(&h->lock, flags);
7581 heartbeat = readl(&h->cfgtable->HeartBeat);
7582 spin_unlock_irqrestore(&h->lock, flags);
7583 if (h->last_heartbeat == heartbeat) {
7584 controller_lockup_detected(h);
Webb Scales25163bd2015-04-23 09:32:00 -05007585 return true;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007586 }
7587
7588 /* We're ok. */
7589 h->last_heartbeat = heartbeat;
7590 h->last_heartbeat_timestamp = now;
Webb Scales25163bd2015-04-23 09:32:00 -05007591 return false;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007592}
7593
Stephen M. Cameron98465902014-02-21 16:25:00 -06007594static void hpsa_ack_ctlr_events(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007595{
7596 int i;
7597 char *event_type;
7598
Stephen Camerone4aa3e62015-01-23 16:44:07 -06007599 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7600 return;
7601
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007602 /* Ask the controller to clear the events we're handling. */
Stephen M. Cameron1f7cee82014-02-18 13:56:09 -06007603 if ((h->transMethod & (CFGTBL_Trans_io_accel1
7604 | CFGTBL_Trans_io_accel2)) &&
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007605 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
7606 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
7607
7608 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
7609 event_type = "state change";
7610 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
7611 event_type = "configuration change";
7612 /* Stop sending new RAID offload reqs via the IO accelerator */
7613 scsi_block_requests(h->scsi_host);
7614 for (i = 0; i < h->ndevices; i++)
7615 h->dev[i]->offload_enabled = 0;
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007616 hpsa_drain_accel_commands(h);
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007617 /* Set 'accelerator path config change' bit */
7618 dev_warn(&h->pdev->dev,
7619 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
7620 h->events, event_type);
7621 writel(h->events, &(h->cfgtable->clear_event_notify));
7622 /* Set the "clear event notify field update" bit 6 */
7623 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7624 /* Wait until ctlr clears 'clear event notify field', bit 6 */
7625 hpsa_wait_for_clear_event_notify_ack(h);
7626 scsi_unblock_requests(h->scsi_host);
7627 } else {
7628 /* Acknowledge controller notification events. */
7629 writel(h->events, &(h->cfgtable->clear_event_notify));
7630 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7631 hpsa_wait_for_clear_event_notify_ack(h);
7632#if 0
7633 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7634 hpsa_wait_for_mode_change_ack(h);
7635#endif
7636 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06007637 return;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007638}
7639
7640/* Check a register on the controller to see if there are configuration
7641 * changes (added/changed/removed logical drives, etc.) which mean that
Scott Teele863d682014-02-18 13:57:05 -06007642 * we should rescan the controller for devices.
7643 * Also check flag for driver-initiated rescan.
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007644 */
Stephen M. Cameron98465902014-02-21 16:25:00 -06007645static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007646{
7647 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
Stephen M. Cameron98465902014-02-21 16:25:00 -06007648 return 0;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007649
7650 h->events = readl(&(h->cfgtable->event_notify));
Stephen M. Cameron98465902014-02-21 16:25:00 -06007651 return h->events & RESCAN_REQUIRED_EVENT_BITS;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007652}
7653
Stephen M. Cameron98465902014-02-21 16:25:00 -06007654/*
7655 * Check if any of the offline devices have become ready
7656 */
7657static int hpsa_offline_devices_ready(struct ctlr_info *h)
7658{
7659 unsigned long flags;
7660 struct offline_device_entry *d;
7661 struct list_head *this, *tmp;
7662
7663 spin_lock_irqsave(&h->offline_device_lock, flags);
7664 list_for_each_safe(this, tmp, &h->offline_device_list) {
7665 d = list_entry(this, struct offline_device_entry,
7666 offline_list);
7667 spin_unlock_irqrestore(&h->offline_device_lock, flags);
Stephen M. Camerond1fea472014-07-03 10:17:58 -05007668 if (!hpsa_volume_offline(h, d->scsi3addr)) {
7669 spin_lock_irqsave(&h->offline_device_lock, flags);
7670 list_del(&d->offline_list);
7671 spin_unlock_irqrestore(&h->offline_device_lock, flags);
Stephen M. Cameron98465902014-02-21 16:25:00 -06007672 return 1;
Stephen M. Camerond1fea472014-07-03 10:17:58 -05007673 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06007674 spin_lock_irqsave(&h->offline_device_lock, flags);
7675 }
7676 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7677 return 0;
7678}
7679
Don Brace6636e7f2015-01-23 16:45:17 -06007680static void hpsa_rescan_ctlr_worker(struct work_struct *work)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007681{
7682 unsigned long flags;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007683 struct ctlr_info *h = container_of(to_delayed_work(work),
Don Brace6636e7f2015-01-23 16:45:17 -06007684 struct ctlr_info, rescan_ctlr_work);
7685
7686
7687 if (h->remove_in_progress)
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007688 return;
Stephen M. Cameron98465902014-02-21 16:25:00 -06007689
7690 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
7691 scsi_host_get(h->scsi_host);
Stephen M. Cameron98465902014-02-21 16:25:00 -06007692 hpsa_ack_ctlr_events(h);
7693 hpsa_scan_start(h->scsi_host);
7694 scsi_host_put(h->scsi_host);
7695 }
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007696 spin_lock_irqsave(&h->lock, flags);
Don Brace6636e7f2015-01-23 16:45:17 -06007697 if (!h->remove_in_progress)
7698 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007699 h->heartbeat_sample_interval);
7700 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007701}
7702
Don Brace6636e7f2015-01-23 16:45:17 -06007703static void hpsa_monitor_ctlr_worker(struct work_struct *work)
7704{
7705 unsigned long flags;
7706 struct ctlr_info *h = container_of(to_delayed_work(work),
7707 struct ctlr_info, monitor_ctlr_work);
7708
7709 detect_controller_lockup(h);
7710 if (lockup_detected(h))
7711 return;
7712
7713 spin_lock_irqsave(&h->lock, flags);
7714 if (!h->remove_in_progress)
7715 schedule_delayed_work(&h->monitor_ctlr_work,
7716 h->heartbeat_sample_interval);
7717 spin_unlock_irqrestore(&h->lock, flags);
7718}
7719
7720static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
7721 char *name)
7722{
7723 struct workqueue_struct *wq = NULL;
Don Brace6636e7f2015-01-23 16:45:17 -06007724
Don Brace397ea9c2015-02-06 17:44:15 -06007725 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
Don Brace6636e7f2015-01-23 16:45:17 -06007726 if (!wq)
7727 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
7728
7729 return wq;
7730}
7731
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007732static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007733{
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05007734 int dac, rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007735 struct ctlr_info *h;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007736 int try_soft_reset = 0;
7737 unsigned long flags;
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02007738 u32 board_id;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007739
7740 if (number_of_controllers == 0)
7741 printk(KERN_INFO DRIVER_NAME "\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007742
Tomas Henzl6b6c1cd2015-04-02 15:25:54 +02007743 rc = hpsa_lookup_board_id(pdev, &board_id);
7744 if (rc < 0) {
7745 dev_warn(&pdev->dev, "Board ID not found\n");
7746 return rc;
7747 }
7748
7749 rc = hpsa_init_reset_devices(pdev, board_id);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007750 if (rc) {
7751 if (rc != -ENOTSUPP)
7752 return rc;
7753 /* If the reset fails in a particular way (it has no way to do
7754 * a proper hard reset, so returns -ENOTSUPP) we can try to do
7755 * a soft reset once we get the controller configured up to the
7756 * point that it can accept a command.
7757 */
7758 try_soft_reset = 1;
7759 rc = 0;
7760 }
7761
7762reinit_after_soft_reset:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007763
Don Brace303932f2010-02-04 08:42:40 -06007764 /* Command structures must be aligned on a 32-byte boundary because
7765 * the 5 lower bits of the address are used by the hardware. and by
7766 * the driver. See comments in hpsa.h for more info.
7767 */
Don Brace303932f2010-02-04 08:42:40 -06007768 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007769 h = kzalloc(sizeof(*h), GFP_KERNEL);
Robert Elliott105a3db2015-04-23 09:33:48 -05007770 if (!h) {
7771 dev_err(&pdev->dev, "Failed to allocate controller head\n");
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007772 return -ENOMEM;
Robert Elliott105a3db2015-04-23 09:33:48 -05007773 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007774
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05007775 h->pdev = pdev;
Robert Elliott105a3db2015-04-23 09:33:48 -05007776
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06007777 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
Stephen M. Cameron98465902014-02-21 16:25:00 -06007778 INIT_LIST_HEAD(&h->offline_device_list);
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06007779 spin_lock_init(&h->lock);
Stephen M. Cameron98465902014-02-21 16:25:00 -06007780 spin_lock_init(&h->offline_device_lock);
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06007781 spin_lock_init(&h->scan_lock);
Don Brace34f0c622015-01-23 16:43:46 -06007782 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05007783 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007784
7785 /* Allocate and clear per-cpu variable lockup_detected */
7786 h->lockup_detected = alloc_percpu(u32);
Stephen M. Cameron2a5ac322014-07-03 10:18:08 -05007787 if (!h->lockup_detected) {
Robert Elliott105a3db2015-04-23 09:33:48 -05007788 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
Stephen M. Cameron2a5ac322014-07-03 10:18:08 -05007789 rc = -ENOMEM;
Robert Elliott2efa5922015-04-23 09:34:53 -05007790 goto clean1; /* aer/h */
Stephen M. Cameron2a5ac322014-07-03 10:18:08 -05007791 }
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007792 set_lockup_detected_for_all_cpus(h, 0);
7793
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05007794 rc = hpsa_pci_init(h);
Robert Elliott105a3db2015-04-23 09:33:48 -05007795 if (rc)
Robert Elliott2efa5922015-04-23 09:34:53 -05007796 goto clean2; /* lockup, aer/h */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007797
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06007798 sprintf(h->devname, HPSA "%d", number_of_controllers);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007799 h->ctlr = number_of_controllers;
7800 number_of_controllers++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007801
7802 /* configure PCI DMA stuff */
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007803 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7804 if (rc == 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007805 dac = 1;
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007806 } else {
7807 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7808 if (rc == 0) {
7809 dac = 0;
7810 } else {
7811 dev_err(&pdev->dev, "no suitable DMA available\n");
Robert Elliott2efa5922015-04-23 09:34:53 -05007812 goto clean3; /* pci, lockup, aer/h */
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007813 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007814 }
7815
7816 /* make sure the board interrupts are off */
7817 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05007818
Robert Elliott105a3db2015-04-23 09:33:48 -05007819 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
7820 if (rc)
Robert Elliott2efa5922015-04-23 09:34:53 -05007821 goto clean3; /* pci, lockup, aer/h */
Robert Elliottd37ffbe2015-04-23 09:32:27 -05007822 rc = hpsa_alloc_cmd_pool(h);
Robert Elliott8947fd12015-01-23 16:42:54 -06007823 if (rc)
Robert Elliott2efa5922015-04-23 09:34:53 -05007824 goto clean4; /* irq, pci, lockup, aer/h */
Robert Elliott105a3db2015-04-23 09:33:48 -05007825 rc = hpsa_alloc_sg_chain_blocks(h);
7826 if (rc)
Robert Elliott2efa5922015-04-23 09:34:53 -05007827 goto clean5; /* cmd, irq, pci, lockup, aer/h */
Stephen M. Camerona08a8472010-02-04 08:43:16 -06007828 init_waitqueue_head(&h->scan_wait_queue);
Stephen Cameron9b5c48c2015-04-23 09:32:06 -05007829 init_waitqueue_head(&h->abort_cmd_wait_queue);
Webb Scalesa58e7e52015-04-23 09:34:16 -05007830 init_waitqueue_head(&h->abort_sync_wait_queue);
Stephen M. Camerona08a8472010-02-04 08:43:16 -06007831 h->scan_finished = 1; /* no scan currently in progress */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007832
7833 pci_set_drvdata(pdev, h);
Stephen M. Cameron9a413382011-05-03 14:59:41 -05007834 h->ndevices = 0;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06007835 h->hba_mode_enabled = 0;
Stephen M. Cameron9a413382011-05-03 14:59:41 -05007836 h->scsi_host = NULL;
7837 spin_lock_init(&h->devlock);
Robert Elliott105a3db2015-04-23 09:33:48 -05007838 rc = hpsa_put_ctlr_into_performant_mode(h);
7839 if (rc)
Robert Elliott2efa5922015-04-23 09:34:53 -05007840 goto clean6; /* sg, cmd, irq, pci, lockup, aer/h */
7841
7842 /* create the resubmit workqueue */
7843 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
7844 if (!h->rescan_ctlr_wq) {
7845 rc = -ENOMEM;
7846 goto clean7;
7847 }
7848
7849 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
7850 if (!h->resubmit_wq) {
7851 rc = -ENOMEM;
7852 goto clean7; /* aer/h */
7853 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007854
Robert Elliott105a3db2015-04-23 09:33:48 -05007855 /*
7856 * At this point, the controller is ready to take commands.
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007857 * Now, if reset_devices and the hard reset didn't work, try
7858 * the soft reset and see if that works.
7859 */
7860 if (try_soft_reset) {
7861
7862 /* This is kind of gross. We may or may not get a completion
7863 * from the soft reset command, and if we do, then the value
7864 * from the fifo may or may not be valid. So, we wait 10 secs
7865 * after the reset throwing away any completions we get during
7866 * that time. Unregister the interrupt handler and register
7867 * fake ones to scoop up any residual completions.
7868 */
7869 spin_lock_irqsave(&h->lock, flags);
7870 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7871 spin_unlock_irqrestore(&h->lock, flags);
Robert Elliottec501a12015-01-23 16:41:40 -06007872 hpsa_free_irqs(h);
Robert Elliott9ee61792015-01-23 16:42:32 -06007873 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007874 hpsa_intx_discard_completions);
7875 if (rc) {
Robert Elliott9ee61792015-01-23 16:42:32 -06007876 dev_warn(&h->pdev->dev,
7877 "Failed to request_irq after soft reset.\n");
Robert Elliottd4987572015-04-23 09:34:37 -05007878 /*
Robert Elliottb2ef4802015-04-23 09:34:48 -05007879 * cannot goto clean7 or free_irqs will be called
7880 * again. Instead, do its work
7881 */
7882 hpsa_free_performant_mode(h); /* clean7 */
7883 hpsa_free_sg_chain_blocks(h); /* clean6 */
7884 hpsa_free_cmd_pool(h); /* clean5 */
7885 /*
7886 * skip hpsa_free_irqs(h) clean4 since that
7887 * was just called before request_irqs failed
Robert Elliottd4987572015-04-23 09:34:37 -05007888 */
7889 goto clean3;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007890 }
7891
7892 rc = hpsa_kdump_soft_reset(h);
7893 if (rc)
7894 /* Neither hard nor soft reset worked, we're hosed. */
Robert Elliott2efa5922015-04-23 09:34:53 -05007895 goto clean8;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007896
7897 dev_info(&h->pdev->dev, "Board READY.\n");
7898 dev_info(&h->pdev->dev,
7899 "Waiting for stale completions to drain.\n");
7900 h->access.set_intr_mask(h, HPSA_INTR_ON);
7901 msleep(10000);
7902 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7903
7904 rc = controller_reset_failed(h->cfgtable);
7905 if (rc)
7906 dev_info(&h->pdev->dev,
7907 "Soft reset appears to have failed.\n");
7908
7909 /* since the controller's reset, we have to go back and re-init
7910 * everything. Easiest to just forget what we've done and do it
7911 * all over again.
7912 */
7913 hpsa_undo_allocations_after_kdump_soft_reset(h);
7914 try_soft_reset = 0;
7915 if (rc)
Robert Elliottb2ef4802015-04-23 09:34:48 -05007916 /* don't goto clean, we already unallocated */
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007917 return -ENODEV;
7918
7919 goto reinit_after_soft_reset;
7920 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007921
Robert Elliott105a3db2015-04-23 09:33:48 -05007922 /* Enable Accelerated IO path at driver layer */
7923 h->acciopath_status = 1;
Scott Teelda0697b2014-02-18 13:57:00 -06007924
Scott Teele863d682014-02-18 13:57:05 -06007925
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007926 /* Turn the interrupts on so we can service requests */
7927 h->access.set_intr_mask(h, HPSA_INTR_ON);
7928
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06007929 hpsa_hba_inquiry(h);
Robert Elliott105a3db2015-04-23 09:33:48 -05007930 rc = hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
Stephen Cameron4a4384c2015-04-23 09:32:37 -05007931 if (rc)
Robert Elliott2efa5922015-04-23 09:34:53 -05007932 goto clean8; /* wq, perf, sg, cmd, irq, pci, lockup, aer/h */
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007933
7934 /* Monitor the controller for firmware lockups */
7935 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
7936 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
7937 schedule_delayed_work(&h->monitor_ctlr_work,
7938 h->heartbeat_sample_interval);
Don Brace6636e7f2015-01-23 16:45:17 -06007939 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
7940 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7941 h->heartbeat_sample_interval);
Stephen M. Cameron88bf6d62013-11-01 11:02:25 -05007942 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007943
Robert Elliott2efa5922015-04-23 09:34:53 -05007944clean8: /* perf, sg, cmd, irq, pci, lockup, aer/h */
Robert Elliott105a3db2015-04-23 09:33:48 -05007945 kfree(h->hba_inquiry_data);
Robert Elliott2efa5922015-04-23 09:34:53 -05007946clean7: /* perf, sg, cmd, irq, pci, lockup, aer/h */
Robert Elliott105a3db2015-04-23 09:33:48 -05007947 hpsa_free_performant_mode(h);
7948 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7949clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06007950 hpsa_free_sg_chain_blocks(h);
Robert Elliott2efa5922015-04-23 09:34:53 -05007951clean5: /* cmd, irq, pci, lockup, aer/h */
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007952 hpsa_free_cmd_pool(h);
Robert Elliott2efa5922015-04-23 09:34:53 -05007953clean4: /* irq, pci, lockup, aer/h */
Robert Elliottec501a12015-01-23 16:41:40 -06007954 hpsa_free_irqs(h);
Robert Elliott2efa5922015-04-23 09:34:53 -05007955clean3: /* pci, lockup, aer/h */
Robert Elliott195f2c62015-04-23 09:33:17 -05007956 hpsa_free_pci_init(h);
Robert Elliott2efa5922015-04-23 09:34:53 -05007957clean2: /* lockup, aer/h */
Robert Elliott105a3db2015-04-23 09:33:48 -05007958 if (h->lockup_detected) {
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007959 free_percpu(h->lockup_detected);
Robert Elliott105a3db2015-04-23 09:33:48 -05007960 h->lockup_detected = NULL;
7961 }
7962clean1: /* wq/aer/h */
7963 if (h->resubmit_wq) {
7964 destroy_workqueue(h->resubmit_wq);
7965 h->resubmit_wq = NULL;
7966 }
7967 if (h->rescan_ctlr_wq) {
7968 destroy_workqueue(h->rescan_ctlr_wq);
7969 h->rescan_ctlr_wq = NULL;
7970 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007971 kfree(h);
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007972 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007973}
7974
7975static void hpsa_flush_cache(struct ctlr_info *h)
7976{
7977 char *flush_buf;
7978 struct CommandList *c;
Webb Scales25163bd2015-04-23 09:32:00 -05007979 int rc;
Stephen M. Cameron702890e2013-09-23 13:33:30 -05007980
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007981 if (unlikely(lockup_detected(h)))
Stephen M. Cameron702890e2013-09-23 13:33:30 -05007982 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007983 flush_buf = kzalloc(4, GFP_KERNEL);
7984 if (!flush_buf)
7985 return;
7986
Stephen Cameron45fcb862015-01-23 16:43:04 -06007987 c = cmd_alloc(h);
Robert Elliottbf43caf2015-04-23 09:33:38 -05007988
Stephen M. Camerona2dac132013-02-20 11:24:41 -06007989 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
7990 RAID_CTLR_LUNID, TYPE_CMD)) {
7991 goto out;
7992 }
Webb Scales25163bd2015-04-23 09:32:00 -05007993 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
7994 PCI_DMA_TODEVICE, NO_TIMEOUT);
7995 if (rc)
7996 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007997 if (c->err_info->CommandStatus != 0)
Stephen M. Camerona2dac132013-02-20 11:24:41 -06007998out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007999 dev_warn(&h->pdev->dev,
8000 "error flushing cache on controller\n");
Stephen Cameron45fcb862015-01-23 16:43:04 -06008001 cmd_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008002 kfree(flush_buf);
8003}
8004
8005static void hpsa_shutdown(struct pci_dev *pdev)
8006{
8007 struct ctlr_info *h;
8008
8009 h = pci_get_drvdata(pdev);
8010 /* Turn board interrupts off and send the flush cache command
8011 * sendcmd will turn off interrupt, and send the flush...
8012 * To write all data in the battery backed cache to disks
8013 */
8014 hpsa_flush_cache(h);
8015 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Robert Elliott105a3db2015-04-23 09:33:48 -05008016 hpsa_free_irqs(h); /* init_one 4 */
Robert Elliottcc64c812015-04-23 09:33:12 -05008017 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008018}
8019
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008020static void hpsa_free_device_info(struct ctlr_info *h)
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06008021{
8022 int i;
8023
Robert Elliott105a3db2015-04-23 09:33:48 -05008024 for (i = 0; i < h->ndevices; i++) {
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06008025 kfree(h->dev[i]);
Robert Elliott105a3db2015-04-23 09:33:48 -05008026 h->dev[i] = NULL;
8027 }
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06008028}
8029
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008030static void hpsa_remove_one(struct pci_dev *pdev)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008031{
8032 struct ctlr_info *h;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008033 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008034
8035 if (pci_get_drvdata(pdev) == NULL) {
Stephen M. Camerona0c12412011-10-26 16:22:04 -05008036 dev_err(&pdev->dev, "unable to remove device\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008037 return;
8038 }
8039 h = pci_get_drvdata(pdev);
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008040
8041 /* Get rid of any controller monitoring work items */
8042 spin_lock_irqsave(&h->lock, flags);
8043 h->remove_in_progress = 1;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06008044 spin_unlock_irqrestore(&h->lock, flags);
Don Brace6636e7f2015-01-23 16:45:17 -06008045 cancel_delayed_work_sync(&h->monitor_ctlr_work);
8046 cancel_delayed_work_sync(&h->rescan_ctlr_work);
8047 destroy_workqueue(h->rescan_ctlr_wq);
8048 destroy_workqueue(h->resubmit_wq);
Robert Elliottcc64c812015-04-23 09:33:12 -05008049
Robert Elliott105a3db2015-04-23 09:33:48 -05008050 /* includes hpsa_free_irqs - init_one 4 */
Robert Elliott195f2c62015-04-23 09:33:17 -05008051 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008052 hpsa_shutdown(pdev);
Robert Elliottcc64c812015-04-23 09:33:12 -05008053
Robert Elliott105a3db2015-04-23 09:33:48 -05008054 hpsa_free_device_info(h); /* scan */
8055
Robert Elliott2efa5922015-04-23 09:34:53 -05008056 hpsa_unregister_scsi(h); /* init_one 9 */
8057 kfree(h->hba_inquiry_data); /* init_one 9 */
8058 h->hba_inquiry_data = NULL; /* init_one 9 */
Robert Elliott105a3db2015-04-23 09:33:48 -05008059 hpsa_free_performant_mode(h); /* init_one 7 */
8060 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8061 hpsa_free_cmd_pool(h); /* init_one 5 */
8062
8063 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
Robert Elliott195f2c62015-04-23 09:33:17 -05008064
8065 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
Robert Elliott105a3db2015-04-23 09:33:48 -05008066 hpsa_free_pci_init(h); /* init_one 3 */
Robert Elliott195f2c62015-04-23 09:33:17 -05008067
Robert Elliott105a3db2015-04-23 09:33:48 -05008068 free_percpu(h->lockup_detected); /* init_one 2 */
8069 h->lockup_detected = NULL; /* init_one 2 */
8070 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8071 kfree(h); /* init_one 1 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008072}
8073
8074static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8075 __attribute__((unused)) pm_message_t state)
8076{
8077 return -ENOSYS;
8078}
8079
8080static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8081{
8082 return -ENOSYS;
8083}
8084
8085static struct pci_driver hpsa_pci_driver = {
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06008086 .name = HPSA,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008087 .probe = hpsa_init_one,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08008088 .remove = hpsa_remove_one,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008089 .id_table = hpsa_pci_device_id, /* id_table */
8090 .shutdown = hpsa_shutdown,
8091 .suspend = hpsa_suspend,
8092 .resume = hpsa_resume,
8093};
8094
Don Brace303932f2010-02-04 08:42:40 -06008095/* Fill in bucket_map[], given nsgs (the max number of
8096 * scatter gather elements supported) and bucket[],
8097 * which is an array of 8 integers. The bucket[] array
8098 * contains 8 different DMA transfer sizes (in 16
8099 * byte increments) which the controller uses to fetch
8100 * commands. This function fills in bucket_map[], which
8101 * maps a given number of scatter gather elements to one of
8102 * the 8 DMA transfer sizes. The point of it is to allow the
8103 * controller to only do as much DMA as needed to fetch the
8104 * command, with the DMA transfer size encoded in the lower
8105 * bits of the command address.
8106 */
8107static void calc_bucket_map(int bucket[], int num_buckets,
Don Brace2b08b3e2015-01-23 16:41:09 -06008108 int nsgs, int min_blocks, u32 *bucket_map)
Don Brace303932f2010-02-04 08:42:40 -06008109{
8110 int i, j, b, size;
8111
Don Brace303932f2010-02-04 08:42:40 -06008112 /* Note, bucket_map must have nsgs+1 entries. */
8113 for (i = 0; i <= nsgs; i++) {
8114 /* Compute size of a command with i SG entries */
Matt Gatese1f7de02014-02-18 13:55:17 -06008115 size = i + min_blocks;
Don Brace303932f2010-02-04 08:42:40 -06008116 b = num_buckets; /* Assume the biggest bucket */
8117 /* Find the bucket that is just big enough */
Matt Gatese1f7de02014-02-18 13:55:17 -06008118 for (j = 0; j < num_buckets; j++) {
Don Brace303932f2010-02-04 08:42:40 -06008119 if (bucket[j] >= size) {
8120 b = j;
8121 break;
8122 }
8123 }
8124 /* for a command with i SG entries, use bucket b. */
8125 bucket_map[i] = b;
8126 }
8127}
8128
Robert Elliott105a3db2015-04-23 09:33:48 -05008129/*
8130 * return -ENODEV on err, 0 on success (or no action)
8131 * allocates numerous items that must be freed later
8132 */
Robert Elliottc706a792015-01-23 16:45:01 -06008133static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
Don Brace303932f2010-02-04 08:42:40 -06008134{
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008135 int i;
8136 unsigned long register_value;
Matt Gatese1f7de02014-02-18 13:55:17 -06008137 unsigned long transMethod = CFGTBL_Trans_Performant |
8138 (trans_support & CFGTBL_Trans_use_short_tags) |
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008139 CFGTBL_Trans_enable_directed_msix |
8140 (trans_support & (CFGTBL_Trans_io_accel1 |
8141 CFGTBL_Trans_io_accel2));
Matt Gatese1f7de02014-02-18 13:55:17 -06008142 struct access_method access = SA5_performant_access;
Stephen M. Camerondef342b2010-05-27 15:14:39 -05008143
8144 /* This is a bit complicated. There are 8 registers on
8145 * the controller which we write to to tell it 8 different
8146 * sizes of commands which there may be. It's a way of
8147 * reducing the DMA done to fetch each command. Encoded into
8148 * each command's tag are 3 bits which communicate to the controller
8149 * which of the eight sizes that command fits within. The size of
8150 * each command depends on how many scatter gather entries there are.
8151 * Each SG entry requires 16 bytes. The eight registers are programmed
8152 * with the number of 16-byte blocks a command of that size requires.
8153 * The smallest command possible requires 5 such 16 byte blocks.
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008154 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
Stephen M. Camerondef342b2010-05-27 15:14:39 -05008155 * blocks. Note, this only extends to the SG entries contained
8156 * within the command block, and does not extend to chained blocks
8157 * of SG elements. bft[] contains the eight values we write to
8158 * the registers. They are not evenly distributed, but have more
8159 * sizes for small commands, and fewer sizes for larger commands.
8160 */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008161 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008162#define MIN_IOACCEL2_BFT_ENTRY 5
8163#define HPSA_IOACCEL2_HEADER_SZ 4
8164 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
8165 13, 14, 15, 16, 17, 18, 19,
8166 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
8167 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
8168 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
8169 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
8170 16 * MIN_IOACCEL2_BFT_ENTRY);
8171 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008172 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
Don Brace303932f2010-02-04 08:42:40 -06008173 /* 5 = 1 s/g entry or 4k
8174 * 6 = 2 s/g entry or 8k
8175 * 8 = 4 s/g entry or 16k
8176 * 10 = 6 s/g entry or 24k
8177 */
Don Brace303932f2010-02-04 08:42:40 -06008178
Stephen M. Cameronb3a52e72014-05-29 10:53:23 -05008179 /* If the controller supports either ioaccel method then
8180 * we can also use the RAID stack submit path that does not
8181 * perform the superfluous readl() after each command submission.
8182 */
8183 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
8184 access = SA5_performant_access_no_read;
8185
Don Brace303932f2010-02-04 08:42:40 -06008186 /* Controller spec: zero out this buffer. */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05008187 for (i = 0; i < h->nreply_queues; i++)
8188 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
Don Brace303932f2010-02-04 08:42:40 -06008189
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008190 bft[7] = SG_ENTRIES_IN_CMD + 4;
8191 calc_bucket_map(bft, ARRAY_SIZE(bft),
Matt Gatese1f7de02014-02-18 13:55:17 -06008192 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
Don Brace303932f2010-02-04 08:42:40 -06008193 for (i = 0; i < 8; i++)
8194 writel(bft[i], &h->transtable->BlockFetch[i]);
8195
8196 /* size of controller ring buffer */
8197 writel(h->max_commands, &h->transtable->RepQSize);
Matt Gates254f7962012-05-01 11:43:06 -05008198 writel(h->nreply_queues, &h->transtable->RepQCount);
Don Brace303932f2010-02-04 08:42:40 -06008199 writel(0, &h->transtable->RepQCtrAddrLow32);
8200 writel(0, &h->transtable->RepQCtrAddrHigh32);
Matt Gates254f7962012-05-01 11:43:06 -05008201
8202 for (i = 0; i < h->nreply_queues; i++) {
8203 writel(0, &h->transtable->RepQAddr[i].upper);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05008204 writel(h->reply_queue[i].busaddr,
Matt Gates254f7962012-05-01 11:43:06 -05008205 &h->transtable->RepQAddr[i].lower);
8206 }
8207
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008208 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Matt Gatese1f7de02014-02-18 13:55:17 -06008209 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
8210 /*
8211 * enable outbound interrupt coalescing in accelerator mode;
8212 */
8213 if (trans_support & CFGTBL_Trans_io_accel1) {
8214 access = SA5_ioaccel_mode1_access;
8215 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8216 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
Scott Teelc3497752014-02-18 13:56:34 -06008217 } else {
8218 if (trans_support & CFGTBL_Trans_io_accel2) {
8219 access = SA5_ioaccel_mode2_access;
8220 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8221 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8222 }
Matt Gatese1f7de02014-02-18 13:55:17 -06008223 }
Don Brace303932f2010-02-04 08:42:40 -06008224 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Robert Elliottc706a792015-01-23 16:45:01 -06008225 if (hpsa_wait_for_mode_change_ack(h)) {
8226 dev_err(&h->pdev->dev,
8227 "performant mode problem - doorbell timeout\n");
8228 return -ENODEV;
8229 }
Don Brace303932f2010-02-04 08:42:40 -06008230 register_value = readl(&(h->cfgtable->TransportActive));
8231 if (!(register_value & CFGTBL_Trans_Performant)) {
Stephen Cameron050f7142015-01-23 16:42:22 -06008232 dev_err(&h->pdev->dev,
8233 "performant mode problem - transport not active\n");
Robert Elliottc706a792015-01-23 16:45:01 -06008234 return -ENODEV;
Don Brace303932f2010-02-04 08:42:40 -06008235 }
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06008236 /* Change the access methods to the performant access methods */
Matt Gatese1f7de02014-02-18 13:55:17 -06008237 h->access = access;
8238 h->transMethod = transMethod;
8239
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008240 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
8241 (trans_support & CFGTBL_Trans_io_accel2)))
Robert Elliottc706a792015-01-23 16:45:01 -06008242 return 0;
Matt Gatese1f7de02014-02-18 13:55:17 -06008243
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008244 if (trans_support & CFGTBL_Trans_io_accel1) {
8245 /* Set up I/O accelerator mode */
8246 for (i = 0; i < h->nreply_queues; i++) {
8247 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
8248 h->reply_queue[i].current_entry =
8249 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
8250 }
8251 bft[7] = h->ioaccel_maxsg + 8;
8252 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
8253 h->ioaccel1_blockFetchTable);
8254
8255 /* initialize all reply queue entries to unused */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05008256 for (i = 0; i < h->nreply_queues; i++)
8257 memset(h->reply_queue[i].head,
8258 (u8) IOACCEL_MODE1_REPLY_UNUSED,
8259 h->reply_queue_size);
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008260
8261 /* set all the constant fields in the accelerator command
8262 * frames once at init time to save CPU cycles later.
8263 */
8264 for (i = 0; i < h->nr_cmds; i++) {
8265 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
8266
8267 cp->function = IOACCEL1_FUNCTION_SCSIIO;
8268 cp->err_info = (u32) (h->errinfo_pool_dhandle +
8269 (i * sizeof(struct ErrorInfo)));
8270 cp->err_info_len = sizeof(struct ErrorInfo);
8271 cp->sgl_offset = IOACCEL1_SGLOFFSET;
Don Brace2b08b3e2015-01-23 16:41:09 -06008272 cp->host_context_flags =
8273 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008274 cp->timeout_sec = 0;
8275 cp->ReplyQueue = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06008276 cp->tag =
Don Bracef2405db2015-01-23 16:43:09 -06008277 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06008278 cp->host_addr =
8279 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008280 (i * sizeof(struct io_accel1_cmd)));
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008281 }
8282 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8283 u64 cfg_offset, cfg_base_addr_index;
8284 u32 bft2_offset, cfg_base_addr;
8285 int rc;
8286
8287 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
8288 &cfg_base_addr_index, &cfg_offset);
8289 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
8290 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
8291 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
8292 4, h->ioaccel2_blockFetchTable);
8293 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
8294 BUILD_BUG_ON(offsetof(struct CfgTable,
8295 io_accel_request_size_offset) != 0xb8);
8296 h->ioaccel2_bft2_regs =
8297 remap_pci_mem(pci_resource_start(h->pdev,
8298 cfg_base_addr_index) +
8299 cfg_offset + bft2_offset,
8300 ARRAY_SIZE(bft2) *
8301 sizeof(*h->ioaccel2_bft2_regs));
8302 for (i = 0; i < ARRAY_SIZE(bft2); i++)
8303 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
Matt Gatese1f7de02014-02-18 13:55:17 -06008304 }
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06008305 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Robert Elliottc706a792015-01-23 16:45:01 -06008306 if (hpsa_wait_for_mode_change_ack(h)) {
8307 dev_err(&h->pdev->dev,
8308 "performant mode problem - enabling ioaccel mode\n");
8309 return -ENODEV;
8310 }
8311 return 0;
Matt Gatese1f7de02014-02-18 13:55:17 -06008312}
8313
Robert Elliott1fb7c982015-04-23 09:33:22 -05008314/* Free ioaccel1 mode command blocks and block fetch table */
8315static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8316{
Robert Elliott105a3db2015-04-23 09:33:48 -05008317 if (h->ioaccel_cmd_pool) {
Robert Elliott1fb7c982015-04-23 09:33:22 -05008318 pci_free_consistent(h->pdev,
8319 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8320 h->ioaccel_cmd_pool,
8321 h->ioaccel_cmd_pool_dhandle);
Robert Elliott105a3db2015-04-23 09:33:48 -05008322 h->ioaccel_cmd_pool = NULL;
8323 h->ioaccel_cmd_pool_dhandle = 0;
8324 }
Robert Elliott1fb7c982015-04-23 09:33:22 -05008325 kfree(h->ioaccel1_blockFetchTable);
Robert Elliott105a3db2015-04-23 09:33:48 -05008326 h->ioaccel1_blockFetchTable = NULL;
Robert Elliott1fb7c982015-04-23 09:33:22 -05008327}
8328
Robert Elliottd37ffbe2015-04-23 09:32:27 -05008329/* Allocate ioaccel1 mode command blocks and block fetch table */
8330static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
Matt Gatese1f7de02014-02-18 13:55:17 -06008331{
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06008332 h->ioaccel_maxsg =
8333 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8334 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
8335 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
8336
Matt Gatese1f7de02014-02-18 13:55:17 -06008337 /* Command structures must be aligned on a 128-byte boundary
8338 * because the 7 lower bits of the address are used by the
8339 * hardware.
8340 */
Matt Gatese1f7de02014-02-18 13:55:17 -06008341 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
8342 IOACCEL1_COMMANDLIST_ALIGNMENT);
8343 h->ioaccel_cmd_pool =
8344 pci_alloc_consistent(h->pdev,
8345 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8346 &(h->ioaccel_cmd_pool_dhandle));
8347
8348 h->ioaccel1_blockFetchTable =
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06008349 kmalloc(((h->ioaccel_maxsg + 1) *
Matt Gatese1f7de02014-02-18 13:55:17 -06008350 sizeof(u32)), GFP_KERNEL);
8351
8352 if ((h->ioaccel_cmd_pool == NULL) ||
8353 (h->ioaccel1_blockFetchTable == NULL))
8354 goto clean_up;
8355
8356 memset(h->ioaccel_cmd_pool, 0,
8357 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
8358 return 0;
8359
8360clean_up:
Robert Elliott1fb7c982015-04-23 09:33:22 -05008361 hpsa_free_ioaccel1_cmd_and_bft(h);
Robert Elliott2dd02d72015-04-23 09:33:43 -05008362 return -ENOMEM;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008363}
8364
Robert Elliott1fb7c982015-04-23 09:33:22 -05008365/* Free ioaccel2 mode command blocks and block fetch table */
8366static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8367{
Webb Scalesd9a729f2015-04-23 09:33:27 -05008368 hpsa_free_ioaccel2_sg_chain_blocks(h);
8369
Robert Elliott105a3db2015-04-23 09:33:48 -05008370 if (h->ioaccel2_cmd_pool) {
Robert Elliott1fb7c982015-04-23 09:33:22 -05008371 pci_free_consistent(h->pdev,
8372 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8373 h->ioaccel2_cmd_pool,
8374 h->ioaccel2_cmd_pool_dhandle);
Robert Elliott105a3db2015-04-23 09:33:48 -05008375 h->ioaccel2_cmd_pool = NULL;
8376 h->ioaccel2_cmd_pool_dhandle = 0;
8377 }
Robert Elliott1fb7c982015-04-23 09:33:22 -05008378 kfree(h->ioaccel2_blockFetchTable);
Robert Elliott105a3db2015-04-23 09:33:48 -05008379 h->ioaccel2_blockFetchTable = NULL;
Robert Elliott1fb7c982015-04-23 09:33:22 -05008380}
8381
Robert Elliottd37ffbe2015-04-23 09:32:27 -05008382/* Allocate ioaccel2 mode command blocks and block fetch table */
8383static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
Stephen M. Cameronaca90122014-02-18 13:56:14 -06008384{
Webb Scalesd9a729f2015-04-23 09:33:27 -05008385 int rc;
8386
Stephen M. Cameronaca90122014-02-18 13:56:14 -06008387 /* Allocate ioaccel2 mode command blocks and block fetch table */
8388
8389 h->ioaccel_maxsg =
8390 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8391 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
8392 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
8393
Stephen M. Cameronaca90122014-02-18 13:56:14 -06008394 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
8395 IOACCEL2_COMMANDLIST_ALIGNMENT);
8396 h->ioaccel2_cmd_pool =
8397 pci_alloc_consistent(h->pdev,
8398 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8399 &(h->ioaccel2_cmd_pool_dhandle));
8400
8401 h->ioaccel2_blockFetchTable =
8402 kmalloc(((h->ioaccel_maxsg + 1) *
8403 sizeof(u32)), GFP_KERNEL);
8404
8405 if ((h->ioaccel2_cmd_pool == NULL) ||
Webb Scalesd9a729f2015-04-23 09:33:27 -05008406 (h->ioaccel2_blockFetchTable == NULL)) {
8407 rc = -ENOMEM;
8408 goto clean_up;
8409 }
8410
8411 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
8412 if (rc)
Stephen M. Cameronaca90122014-02-18 13:56:14 -06008413 goto clean_up;
8414
8415 memset(h->ioaccel2_cmd_pool, 0,
8416 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
8417 return 0;
8418
8419clean_up:
Robert Elliott1fb7c982015-04-23 09:33:22 -05008420 hpsa_free_ioaccel2_cmd_and_bft(h);
Webb Scalesd9a729f2015-04-23 09:33:27 -05008421 return rc;
Stephen M. Cameronaca90122014-02-18 13:56:14 -06008422}
8423
Robert Elliott105a3db2015-04-23 09:33:48 -05008424/* Free items allocated by hpsa_put_ctlr_into_performant_mode */
8425static void hpsa_free_performant_mode(struct ctlr_info *h)
8426{
8427 kfree(h->blockFetchTable);
8428 h->blockFetchTable = NULL;
8429 hpsa_free_reply_queues(h);
8430 hpsa_free_ioaccel1_cmd_and_bft(h);
8431 hpsa_free_ioaccel2_cmd_and_bft(h);
8432}
8433
8434/* return -ENODEV on error, 0 on success (or no action)
8435 * allocates numerous items that must be freed later
8436 */
8437static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008438{
8439 u32 trans_support;
Matt Gatese1f7de02014-02-18 13:55:17 -06008440 unsigned long transMethod = CFGTBL_Trans_Performant |
8441 CFGTBL_Trans_use_short_tags;
Robert Elliott105a3db2015-04-23 09:33:48 -05008442 int i, rc;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008443
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -06008444 if (hpsa_simple_mode)
Robert Elliott105a3db2015-04-23 09:33:48 -05008445 return 0;
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -06008446
scameron@beardog.cce.hp.com67c99a72014-04-14 14:01:09 -05008447 trans_support = readl(&(h->cfgtable->TransportSupport));
8448 if (!(trans_support & PERFORMANT_MODE))
Robert Elliott105a3db2015-04-23 09:33:48 -05008449 return 0;
scameron@beardog.cce.hp.com67c99a72014-04-14 14:01:09 -05008450
Matt Gatese1f7de02014-02-18 13:55:17 -06008451 /* Check for I/O accelerator mode support */
8452 if (trans_support & CFGTBL_Trans_io_accel1) {
8453 transMethod |= CFGTBL_Trans_io_accel1 |
8454 CFGTBL_Trans_enable_directed_msix;
Robert Elliott105a3db2015-04-23 09:33:48 -05008455 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
8456 if (rc)
8457 return rc;
8458 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8459 transMethod |= CFGTBL_Trans_io_accel2 |
Stephen M. Cameronaca90122014-02-18 13:56:14 -06008460 CFGTBL_Trans_enable_directed_msix;
Robert Elliott105a3db2015-04-23 09:33:48 -05008461 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
8462 if (rc)
8463 return rc;
Matt Gatese1f7de02014-02-18 13:55:17 -06008464 }
8465
Hannes Reineckeeee0f032014-01-15 13:30:53 +01008466 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05008467 hpsa_get_max_perf_mode_cmds(h);
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008468 /* Performant mode ring buffer and supporting data structures */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05008469 h->reply_queue_size = h->max_commands * sizeof(u64);
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008470
Matt Gates254f7962012-05-01 11:43:06 -05008471 for (i = 0; i < h->nreply_queues; i++) {
Stephen M. Cameron072b0512014-05-29 10:53:07 -05008472 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
8473 h->reply_queue_size,
8474 &(h->reply_queue[i].busaddr));
Robert Elliott105a3db2015-04-23 09:33:48 -05008475 if (!h->reply_queue[i].head) {
8476 rc = -ENOMEM;
8477 goto clean1; /* rq, ioaccel */
8478 }
Matt Gates254f7962012-05-01 11:43:06 -05008479 h->reply_queue[i].size = h->max_commands;
8480 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
8481 h->reply_queue[i].current_entry = 0;
8482 }
8483
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008484 /* Need a block fetch table for performant mode */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06008485 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008486 sizeof(u32)), GFP_KERNEL);
Robert Elliott105a3db2015-04-23 09:33:48 -05008487 if (!h->blockFetchTable) {
8488 rc = -ENOMEM;
8489 goto clean1; /* rq, ioaccel */
8490 }
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05008491
Robert Elliott105a3db2015-04-23 09:33:48 -05008492 rc = hpsa_enter_performant_mode(h, trans_support);
8493 if (rc)
8494 goto clean2; /* bft, rq, ioaccel */
8495 return 0;
Don Brace303932f2010-02-04 08:42:40 -06008496
Robert Elliott105a3db2015-04-23 09:33:48 -05008497clean2: /* bft, rq, ioaccel */
Don Brace303932f2010-02-04 08:42:40 -06008498 kfree(h->blockFetchTable);
Robert Elliott105a3db2015-04-23 09:33:48 -05008499 h->blockFetchTable = NULL;
8500clean1: /* rq, ioaccel */
8501 hpsa_free_reply_queues(h);
8502 hpsa_free_ioaccel1_cmd_and_bft(h);
8503 hpsa_free_ioaccel2_cmd_and_bft(h);
8504 return rc;
Don Brace303932f2010-02-04 08:42:40 -06008505}
8506
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06008507static int is_accelerated_cmd(struct CommandList *c)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008508{
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06008509 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
8510}
8511
8512static void hpsa_drain_accel_commands(struct ctlr_info *h)
8513{
8514 struct CommandList *c = NULL;
Don Bracef2405db2015-01-23 16:43:09 -06008515 int i, accel_cmds_out;
Webb Scales281a7fd2015-01-23 16:43:35 -06008516 int refcount;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008517
Don Bracef2405db2015-01-23 16:43:09 -06008518 do { /* wait for all outstanding ioaccel commands to drain out */
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06008519 accel_cmds_out = 0;
Don Bracef2405db2015-01-23 16:43:09 -06008520 for (i = 0; i < h->nr_cmds; i++) {
Don Bracef2405db2015-01-23 16:43:09 -06008521 c = h->cmd_pool + i;
Webb Scales281a7fd2015-01-23 16:43:35 -06008522 refcount = atomic_inc_return(&c->refcount);
8523 if (refcount > 1) /* Command is allocated */
8524 accel_cmds_out += is_accelerated_cmd(c);
8525 cmd_free(h, c);
Don Bracef2405db2015-01-23 16:43:09 -06008526 }
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06008527 if (accel_cmds_out <= 0)
Webb Scales281a7fd2015-01-23 16:43:35 -06008528 break;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06008529 msleep(100);
8530 } while (1);
8531}
8532
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008533/*
8534 * This is it. Register the PCI driver information for the cards we control
8535 * the OS will call our registered routines when it finds one of our cards.
8536 */
8537static int __init hpsa_init(void)
8538{
Mike Miller31468402010-02-25 14:03:12 -06008539 return pci_register_driver(&hpsa_pci_driver);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008540}
8541
8542static void __exit hpsa_cleanup(void)
8543{
8544 pci_unregister_driver(&hpsa_pci_driver);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008545}
8546
Matt Gatese1f7de02014-02-18 13:55:17 -06008547static void __attribute__((unused)) verify_offsets(void)
8548{
8549#define VERIFY_OFFSET(member, offset) \
Scott Teeldd0e19f2014-02-18 13:57:31 -06008550 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
8551
8552 VERIFY_OFFSET(structure_size, 0);
8553 VERIFY_OFFSET(volume_blk_size, 4);
8554 VERIFY_OFFSET(volume_blk_cnt, 8);
8555 VERIFY_OFFSET(phys_blk_shift, 16);
8556 VERIFY_OFFSET(parity_rotation_shift, 17);
8557 VERIFY_OFFSET(strip_size, 18);
8558 VERIFY_OFFSET(disk_starting_blk, 20);
8559 VERIFY_OFFSET(disk_blk_cnt, 28);
8560 VERIFY_OFFSET(data_disks_per_row, 36);
8561 VERIFY_OFFSET(metadata_disks_per_row, 38);
8562 VERIFY_OFFSET(row_cnt, 40);
8563 VERIFY_OFFSET(layout_map_count, 42);
8564 VERIFY_OFFSET(flags, 44);
8565 VERIFY_OFFSET(dekindex, 46);
8566 /* VERIFY_OFFSET(reserved, 48 */
8567 VERIFY_OFFSET(data, 64);
8568
8569#undef VERIFY_OFFSET
8570
8571#define VERIFY_OFFSET(member, offset) \
Mike Millerb66cc252014-02-18 13:56:04 -06008572 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
8573
8574 VERIFY_OFFSET(IU_type, 0);
8575 VERIFY_OFFSET(direction, 1);
8576 VERIFY_OFFSET(reply_queue, 2);
8577 /* VERIFY_OFFSET(reserved1, 3); */
8578 VERIFY_OFFSET(scsi_nexus, 4);
8579 VERIFY_OFFSET(Tag, 8);
8580 VERIFY_OFFSET(cdb, 16);
8581 VERIFY_OFFSET(cciss_lun, 32);
8582 VERIFY_OFFSET(data_len, 40);
8583 VERIFY_OFFSET(cmd_priority_task_attr, 44);
8584 VERIFY_OFFSET(sg_count, 45);
8585 /* VERIFY_OFFSET(reserved3 */
8586 VERIFY_OFFSET(err_ptr, 48);
8587 VERIFY_OFFSET(err_len, 56);
8588 /* VERIFY_OFFSET(reserved4 */
8589 VERIFY_OFFSET(sg, 64);
8590
8591#undef VERIFY_OFFSET
8592
8593#define VERIFY_OFFSET(member, offset) \
Matt Gatese1f7de02014-02-18 13:55:17 -06008594 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
8595
8596 VERIFY_OFFSET(dev_handle, 0x00);
8597 VERIFY_OFFSET(reserved1, 0x02);
8598 VERIFY_OFFSET(function, 0x03);
8599 VERIFY_OFFSET(reserved2, 0x04);
8600 VERIFY_OFFSET(err_info, 0x0C);
8601 VERIFY_OFFSET(reserved3, 0x10);
8602 VERIFY_OFFSET(err_info_len, 0x12);
8603 VERIFY_OFFSET(reserved4, 0x13);
8604 VERIFY_OFFSET(sgl_offset, 0x14);
8605 VERIFY_OFFSET(reserved5, 0x15);
8606 VERIFY_OFFSET(transfer_len, 0x1C);
8607 VERIFY_OFFSET(reserved6, 0x20);
8608 VERIFY_OFFSET(io_flags, 0x24);
8609 VERIFY_OFFSET(reserved7, 0x26);
8610 VERIFY_OFFSET(LUN, 0x34);
8611 VERIFY_OFFSET(control, 0x3C);
8612 VERIFY_OFFSET(CDB, 0x40);
8613 VERIFY_OFFSET(reserved8, 0x50);
8614 VERIFY_OFFSET(host_context_flags, 0x60);
8615 VERIFY_OFFSET(timeout_sec, 0x62);
8616 VERIFY_OFFSET(ReplyQueue, 0x64);
8617 VERIFY_OFFSET(reserved9, 0x65);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06008618 VERIFY_OFFSET(tag, 0x68);
Matt Gatese1f7de02014-02-18 13:55:17 -06008619 VERIFY_OFFSET(host_addr, 0x70);
8620 VERIFY_OFFSET(CISS_LUN, 0x78);
8621 VERIFY_OFFSET(SG, 0x78 + 8);
8622#undef VERIFY_OFFSET
8623}
8624
Stephen M. Cameronedd16362009-12-08 14:09:11 -08008625module_init(hpsa_init);
8626module_exit(hpsa_cleanup);