blob: b082594e7c69a82798a960f1796ce3fe6b3a86a3 [file] [log] [blame]
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001/*
2 * Disk Array driver for HP Smart Array SAS controllers
Scott Teel51c35132014-02-18 13:57:26 -06003 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
Matthew Garrette5a44df2011-11-11 11:14:23 -050026#include <linux/pci-aspm.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080027#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/fs.h>
31#include <linux/timer.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080032#include <linux/init.h>
33#include <linux/spinlock.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080034#include <linux/compat.h>
35#include <linux/blktrace_api.h>
36#include <linux/uaccess.h>
37#include <linux/io.h>
38#include <linux/dma-mapping.h>
39#include <linux/completion.h>
40#include <linux/moduleparam.h>
41#include <scsi/scsi.h>
42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_host.h>
Stephen M. Cameron667e23d2010-02-25 14:02:51 -060045#include <scsi/scsi_tcq.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080046#include <linux/cciss_ioctl.h>
47#include <linux/string.h>
48#include <linux/bitmap.h>
Arun Sharma600634972011-07-26 16:09:06 -070049#include <linux/atomic.h>
Stephen M. Camerona0c12412011-10-26 16:22:04 -050050#include <linux/jiffies.h>
Don Brace42a91642014-11-14 17:26:27 -060051#include <linux/percpu-defs.h>
Stephen M. Cameron094963d2014-05-29 10:53:18 -050052#include <linux/percpu.h>
Stephen M. Cameron283b4a92014-02-18 13:55:33 -060053#include <asm/div64.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080054#include "hpsa_cmd.h"
55#include "hpsa.h"
56
57/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
Stephen M. Cameron9a993302014-03-13 17:13:06 -050058#define HPSA_DRIVER_VERSION "3.4.4-1"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080059#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -060060#define HPSA "hpsa"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080061
62/* How long to wait (in milliseconds) for board to go into simple mode */
63#define MAX_CONFIG_WAIT 30000
64#define MAX_IOCTL_CONFIG_WAIT 1000
65
66/*define how many times we will try a command because of bus resets */
67#define MAX_CMD_RETRIES 3
68
69/* Embedded module documentation macros - see modules.h */
70MODULE_AUTHOR("Hewlett-Packard Company");
71MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
72 HPSA_DRIVER_VERSION);
73MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
74MODULE_VERSION(HPSA_DRIVER_VERSION);
75MODULE_LICENSE("GPL");
76
77static int hpsa_allow_any;
78module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
79MODULE_PARM_DESC(hpsa_allow_any,
80 "Allow hpsa driver to access unknown HP Smart Array hardware");
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -060081static int hpsa_simple_mode;
82module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
83MODULE_PARM_DESC(hpsa_simple_mode,
84 "Use 'simple mode' rather than 'performant mode'");
Stephen M. Cameronedd16362009-12-08 14:09:11 -080085
86/* define the PCI info for the cards we can control */
87static const struct pci_device_id hpsa_pci_device_id[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -080088 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
Mike Miller163dbcd2013-09-04 15:11:10 -050093 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
Mike Millerf8b01eb2010-02-04 08:42:45 -060095 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
scameron@beardog.cce.hp.com9143a962011-03-07 10:44:16 -060096 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
Mike Millerfe0c9612012-09-20 16:05:18 -0500103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
Mike Millerfe0c9612012-09-20 16:05:18 -0500107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
Mike Miller97b9f532013-09-04 15:05:55 -0500109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
Mike Miller97b9f532013-09-04 15:05:55 -0500120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
Stephen M. Cameron8e616a52014-02-18 13:58:02 -0600128 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
129 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
130 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
131 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
132 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
Mike Miller7c03b872010-12-01 11:16:07 -0600133 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
Stephen M. Cameron6798cc02010-06-16 13:51:20 -0500134 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800135 {0,}
136};
137
138MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
139
140/* board_id = Subsystem Device ID & Vendor ID
141 * product = Marketing Name for the board
142 * access = Address of the struct of function pointers
143 */
144static struct board_type products[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800145 {0x3241103C, "Smart Array P212", &SA5_access},
146 {0x3243103C, "Smart Array P410", &SA5_access},
147 {0x3245103C, "Smart Array P410i", &SA5_access},
148 {0x3247103C, "Smart Array P411", &SA5_access},
149 {0x3249103C, "Smart Array P812", &SA5_access},
Mike Miller163dbcd2013-09-04 15:11:10 -0500150 {0x324A103C, "Smart Array P712m", &SA5_access},
151 {0x324B103C, "Smart Array P711m", &SA5_access},
Stephen M. Cameron7d2cce52014-11-14 17:26:38 -0600152 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
Mike Millerfe0c9612012-09-20 16:05:18 -0500153 {0x3350103C, "Smart Array P222", &SA5_access},
154 {0x3351103C, "Smart Array P420", &SA5_access},
155 {0x3352103C, "Smart Array P421", &SA5_access},
156 {0x3353103C, "Smart Array P822", &SA5_access},
157 {0x3354103C, "Smart Array P420i", &SA5_access},
158 {0x3355103C, "Smart Array P220i", &SA5_access},
159 {0x3356103C, "Smart Array P721m", &SA5_access},
Mike Miller1fd6c8e2013-09-04 15:08:29 -0500160 {0x1921103C, "Smart Array P830i", &SA5_access},
161 {0x1922103C, "Smart Array P430", &SA5_access},
162 {0x1923103C, "Smart Array P431", &SA5_access},
163 {0x1924103C, "Smart Array P830", &SA5_access},
164 {0x1926103C, "Smart Array P731m", &SA5_access},
165 {0x1928103C, "Smart Array P230i", &SA5_access},
166 {0x1929103C, "Smart Array P530", &SA5_access},
Mike Miller97b9f532013-09-04 15:05:55 -0500167 {0x21BD103C, "Smart Array", &SA5_access},
168 {0x21BE103C, "Smart Array", &SA5_access},
169 {0x21BF103C, "Smart Array", &SA5_access},
170 {0x21C0103C, "Smart Array", &SA5_access},
171 {0x21C1103C, "Smart Array", &SA5_access},
172 {0x21C2103C, "Smart Array", &SA5_access},
173 {0x21C3103C, "Smart Array", &SA5_access},
174 {0x21C4103C, "Smart Array", &SA5_access},
175 {0x21C5103C, "Smart Array", &SA5_access},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500176 {0x21C6103C, "Smart Array", &SA5_access},
Mike Miller97b9f532013-09-04 15:05:55 -0500177 {0x21C7103C, "Smart Array", &SA5_access},
178 {0x21C8103C, "Smart Array", &SA5_access},
179 {0x21C9103C, "Smart Array", &SA5_access},
Joe Handzik3b7a45e2014-05-08 14:27:24 -0500180 {0x21CA103C, "Smart Array", &SA5_access},
181 {0x21CB103C, "Smart Array", &SA5_access},
182 {0x21CC103C, "Smart Array", &SA5_access},
183 {0x21CD103C, "Smart Array", &SA5_access},
184 {0x21CE103C, "Smart Array", &SA5_access},
Stephen M. Cameron8e616a52014-02-18 13:58:02 -0600185 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
186 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
187 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
188 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
189 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800190 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
191};
192
193static int number_of_controllers;
194
Stephen M. Cameron10f66012010-06-16 13:51:50 -0500195static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
196static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
Don Brace42a91642014-11-14 17:26:27 -0600197static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
Stephen M. Cameron0b570752014-05-29 10:53:28 -0500198static void lock_and_start_io(struct ctlr_info *h);
199static void start_io(struct ctlr_info *h, unsigned long *flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800200
201#ifdef CONFIG_COMPAT
Don Brace42a91642014-11-14 17:26:27 -0600202static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
203 void __user *arg);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800204#endif
205
206static void cmd_free(struct ctlr_info *h, struct CommandList *c);
207static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
208static struct CommandList *cmd_alloc(struct ctlr_info *h);
209static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
Stephen M. Camerona2dac132013-02-20 11:24:41 -0600210static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -0600211 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800212 int cmd_type);
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -0600213#define VPD_PAGE (1 << 8)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800214
Jeff Garzikf2812332010-11-16 02:10:29 -0500215static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
Stephen M. Camerona08a8472010-02-04 08:43:16 -0600216static void hpsa_scan_start(struct Scsi_Host *);
217static int hpsa_scan_finished(struct Scsi_Host *sh,
218 unsigned long elapsed_time);
Stephen M. Cameron667e23d2010-02-25 14:02:51 -0600219static int hpsa_change_queue_depth(struct scsi_device *sdev,
220 int qdepth, int reason);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800221
222static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500223static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800224static int hpsa_slave_alloc(struct scsi_device *sdev);
225static void hpsa_slave_destroy(struct scsi_device *sdev);
226
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800227static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800228static int check_for_unit_attention(struct ctlr_info *h,
229 struct CommandList *c);
230static void check_ioctl_unit_attention(struct ctlr_info *h,
231 struct CommandList *c);
Don Brace303932f2010-02-04 08:42:40 -0600232/* performant mode helper functions */
233static void calc_bucket_map(int *bucket, int num_buckets,
Matt Gatese1f7de02014-02-18 13:55:17 -0600234 int nsgs, int min_blocks, int *bucket_map);
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800235static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
Matt Gates254f7962012-05-01 11:43:06 -0500236static inline u32 next_command(struct ctlr_info *h, u8 q);
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800237static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
238 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
239 u64 *cfg_offset);
240static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
241 unsigned long *memory_bar);
242static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
243static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
244 int wait_for_ready);
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500245static inline void finish_cmd(struct CommandList *c);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -0600246static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -0600247#define BOARD_NOT_READY 0
248#define BOARD_READY 1
Stephen M. Cameron23100dd2014-02-18 13:57:37 -0600249static void hpsa_drain_accel_commands(struct ctlr_info *h);
Stephen M. Cameron76438d02014-02-18 13:55:43 -0600250static void hpsa_flush_cache(struct ctlr_info *h);
Scott Teelc3497752014-02-18 13:56:34 -0600251static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
252 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
253 u8 *scsi3addr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800254
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800255static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
256{
257 unsigned long *priv = shost_priv(sdev->host);
258 return (struct ctlr_info *) *priv;
259}
260
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600261static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
262{
263 unsigned long *priv = shost_priv(sh);
264 return (struct ctlr_info *) *priv;
265}
266
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800267static int check_for_unit_attention(struct ctlr_info *h,
268 struct CommandList *c)
269{
270 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
271 return 0;
272
273 switch (c->err_info->SenseInfo[12]) {
274 case STATE_CHANGED:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600275 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800276 "detected, command retried\n", h->ctlr);
277 break;
278 case LUN_FAILED:
Stephen M. Cameron7f736952014-11-14 17:26:48 -0600279 dev_warn(&h->pdev->dev,
280 HPSA "%d: LUN failure detected\n", h->ctlr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800281 break;
282 case REPORT_LUNS_CHANGED:
Stephen M. Cameron7f736952014-11-14 17:26:48 -0600283 dev_warn(&h->pdev->dev,
284 HPSA "%d: report LUN data changed\n", h->ctlr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800285 /*
Scott Teel4f4eb9f2012-01-19 14:01:25 -0600286 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
287 * target (array) devices.
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800288 */
289 break;
290 case POWER_OR_RESET:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600291 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800292 "or device reset detected\n", h->ctlr);
293 break;
294 case UNIT_ATTENTION_CLEARED:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600295 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800296 "cleared by another initiator\n", h->ctlr);
297 break;
298 default:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600299 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800300 "unit attention detected\n", h->ctlr);
301 break;
302 }
303 return 1;
304}
305
Matt Bondurant852af202012-05-01 11:42:35 -0500306static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
307{
308 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
309 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
310 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
311 return 0;
312 dev_warn(&h->pdev->dev, HPSA "device busy");
313 return 1;
314}
315
Scott Teelda0697b2014-02-18 13:57:00 -0600316static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
317 struct device_attribute *attr,
318 const char *buf, size_t count)
319{
320 int status, len;
321 struct ctlr_info *h;
322 struct Scsi_Host *shost = class_to_shost(dev);
323 char tmpbuf[10];
324
325 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
326 return -EACCES;
327 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
328 strncpy(tmpbuf, buf, len);
329 tmpbuf[len] = '\0';
330 if (sscanf(tmpbuf, "%d", &status) != 1)
331 return -EINVAL;
332 h = shost_to_hba(shost);
333 h->acciopath_status = !!status;
334 dev_warn(&h->pdev->dev,
335 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
336 h->acciopath_status ? "enabled" : "disabled");
337 return count;
338}
339
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600340static ssize_t host_store_raid_offload_debug(struct device *dev,
341 struct device_attribute *attr,
342 const char *buf, size_t count)
343{
344 int debug_level, len;
345 struct ctlr_info *h;
346 struct Scsi_Host *shost = class_to_shost(dev);
347 char tmpbuf[10];
348
349 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
350 return -EACCES;
351 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
352 strncpy(tmpbuf, buf, len);
353 tmpbuf[len] = '\0';
354 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
355 return -EINVAL;
356 if (debug_level < 0)
357 debug_level = 0;
358 h = shost_to_hba(shost);
359 h->raid_offload_debug = debug_level;
360 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
361 h->raid_offload_debug);
362 return count;
363}
364
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800365static ssize_t host_store_rescan(struct device *dev,
366 struct device_attribute *attr,
367 const char *buf, size_t count)
368{
369 struct ctlr_info *h;
370 struct Scsi_Host *shost = class_to_shost(dev);
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600371 h = shost_to_hba(shost);
Mike Miller31468402010-02-25 14:03:12 -0600372 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800373 return count;
374}
375
Stephen M. Camerond28ce022010-05-27 15:14:34 -0500376static ssize_t host_show_firmware_revision(struct device *dev,
377 struct device_attribute *attr, char *buf)
378{
379 struct ctlr_info *h;
380 struct Scsi_Host *shost = class_to_shost(dev);
381 unsigned char *fwrev;
382
383 h = shost_to_hba(shost);
384 if (!h->hba_inquiry_data)
385 return 0;
386 fwrev = &h->hba_inquiry_data[32];
387 return snprintf(buf, 20, "%c%c%c%c\n",
388 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
389}
390
Stephen M. Cameron94a13642011-01-06 14:48:39 -0600391static ssize_t host_show_commands_outstanding(struct device *dev,
392 struct device_attribute *attr, char *buf)
393{
394 struct Scsi_Host *shost = class_to_shost(dev);
395 struct ctlr_info *h = shost_to_hba(shost);
396
397 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
398}
399
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600400static ssize_t host_show_transport_mode(struct device *dev,
401 struct device_attribute *attr, char *buf)
402{
403 struct ctlr_info *h;
404 struct Scsi_Host *shost = class_to_shost(dev);
405
406 h = shost_to_hba(shost);
407 return snprintf(buf, 20, "%s\n",
Stephen M. Cameron960a30e2011-02-15 15:33:03 -0600408 h->transMethod & CFGTBL_Trans_Performant ?
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600409 "performant" : "simple");
410}
411
Scott Teelda0697b2014-02-18 13:57:00 -0600412static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
413 struct device_attribute *attr, char *buf)
414{
415 struct ctlr_info *h;
416 struct Scsi_Host *shost = class_to_shost(dev);
417
418 h = shost_to_hba(shost);
419 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
420 (h->acciopath_status == 1) ? "enabled" : "disabled");
421}
422
Stephen M. Cameron46380782011-05-03 15:00:01 -0500423/* List of controllers which cannot be hard reset on kexec with reset_devices */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600424static u32 unresettable_controller[] = {
425 0x324a103C, /* Smart Array P712m */
426 0x324b103C, /* SmartArray P711m */
427 0x3223103C, /* Smart Array P800 */
428 0x3234103C, /* Smart Array P400 */
429 0x3235103C, /* Smart Array P400i */
430 0x3211103C, /* Smart Array E200i */
431 0x3212103C, /* Smart Array E200 */
432 0x3213103C, /* Smart Array E200i */
433 0x3214103C, /* Smart Array E200i */
434 0x3215103C, /* Smart Array E200i */
435 0x3237103C, /* Smart Array E500 */
436 0x323D103C, /* Smart Array P700m */
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100437 0x40800E11, /* Smart Array 5i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600438 0x409C0E11, /* Smart Array 6400 */
439 0x409D0E11, /* Smart Array 6400 EM */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100440 0x40700E11, /* Smart Array 5300 */
441 0x40820E11, /* Smart Array 532 */
442 0x40830E11, /* Smart Array 5312 */
443 0x409A0E11, /* Smart Array 641 */
444 0x409B0E11, /* Smart Array 642 */
445 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600446};
447
Stephen M. Cameron46380782011-05-03 15:00:01 -0500448/* List of controllers which cannot even be soft reset */
449static u32 soft_unresettable_controller[] = {
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100450 0x40800E11, /* Smart Array 5i */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100451 0x40700E11, /* Smart Array 5300 */
452 0x40820E11, /* Smart Array 532 */
453 0x40830E11, /* Smart Array 5312 */
454 0x409A0E11, /* Smart Array 641 */
455 0x409B0E11, /* Smart Array 642 */
456 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron46380782011-05-03 15:00:01 -0500457 /* Exclude 640x boards. These are two pci devices in one slot
458 * which share a battery backed cache module. One controls the
459 * cache, the other accesses the cache through the one that controls
460 * it. If we reset the one controlling the cache, the other will
461 * likely not be happy. Just forbid resetting this conjoined mess.
462 * The 640x isn't really supported by hpsa anyway.
463 */
464 0x409C0E11, /* Smart Array 6400 */
465 0x409D0E11, /* Smart Array 6400 EM */
466};
467
468static int ctlr_is_hard_resettable(u32 board_id)
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600469{
470 int i;
471
472 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
Stephen M. Cameron46380782011-05-03 15:00:01 -0500473 if (unresettable_controller[i] == board_id)
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600474 return 0;
475 return 1;
476}
477
Stephen M. Cameron46380782011-05-03 15:00:01 -0500478static int ctlr_is_soft_resettable(u32 board_id)
479{
480 int i;
481
482 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
483 if (soft_unresettable_controller[i] == board_id)
484 return 0;
485 return 1;
486}
487
488static int ctlr_is_resettable(u32 board_id)
489{
490 return ctlr_is_hard_resettable(board_id) ||
491 ctlr_is_soft_resettable(board_id);
492}
493
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600494static ssize_t host_show_resettable(struct device *dev,
495 struct device_attribute *attr, char *buf)
496{
497 struct ctlr_info *h;
498 struct Scsi_Host *shost = class_to_shost(dev);
499
500 h = shost_to_hba(shost);
Stephen M. Cameron46380782011-05-03 15:00:01 -0500501 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600502}
503
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800504static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
505{
506 return (scsi3addr[3] & 0xC0) == 0x40;
507}
508
509static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
Mike Millerd82357e2012-05-01 11:43:32 -0500510 "1(ADM)", "UNKNOWN"
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800511};
Scott Teel6b80b182014-02-18 13:56:55 -0600512#define HPSA_RAID_0 0
513#define HPSA_RAID_4 1
514#define HPSA_RAID_1 2 /* also used for RAID 10 */
515#define HPSA_RAID_5 3 /* also used for RAID 50 */
516#define HPSA_RAID_51 4
517#define HPSA_RAID_6 5 /* also used for RAID 60 */
518#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800519#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
520
521static ssize_t raid_level_show(struct device *dev,
522 struct device_attribute *attr, char *buf)
523{
524 ssize_t l = 0;
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600525 unsigned char rlevel;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800526 struct ctlr_info *h;
527 struct scsi_device *sdev;
528 struct hpsa_scsi_dev_t *hdev;
529 unsigned long flags;
530
531 sdev = to_scsi_device(dev);
532 h = sdev_to_hba(sdev);
533 spin_lock_irqsave(&h->lock, flags);
534 hdev = sdev->hostdata;
535 if (!hdev) {
536 spin_unlock_irqrestore(&h->lock, flags);
537 return -ENODEV;
538 }
539
540 /* Is this even a logical drive? */
541 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
542 spin_unlock_irqrestore(&h->lock, flags);
543 l = snprintf(buf, PAGE_SIZE, "N/A\n");
544 return l;
545 }
546
547 rlevel = hdev->raid_level;
548 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600549 if (rlevel > RAID_UNKNOWN)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800550 rlevel = RAID_UNKNOWN;
551 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
552 return l;
553}
554
555static ssize_t lunid_show(struct device *dev,
556 struct device_attribute *attr, char *buf)
557{
558 struct ctlr_info *h;
559 struct scsi_device *sdev;
560 struct hpsa_scsi_dev_t *hdev;
561 unsigned long flags;
562 unsigned char lunid[8];
563
564 sdev = to_scsi_device(dev);
565 h = sdev_to_hba(sdev);
566 spin_lock_irqsave(&h->lock, flags);
567 hdev = sdev->hostdata;
568 if (!hdev) {
569 spin_unlock_irqrestore(&h->lock, flags);
570 return -ENODEV;
571 }
572 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
573 spin_unlock_irqrestore(&h->lock, flags);
574 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
575 lunid[0], lunid[1], lunid[2], lunid[3],
576 lunid[4], lunid[5], lunid[6], lunid[7]);
577}
578
579static ssize_t unique_id_show(struct device *dev,
580 struct device_attribute *attr, char *buf)
581{
582 struct ctlr_info *h;
583 struct scsi_device *sdev;
584 struct hpsa_scsi_dev_t *hdev;
585 unsigned long flags;
586 unsigned char sn[16];
587
588 sdev = to_scsi_device(dev);
589 h = sdev_to_hba(sdev);
590 spin_lock_irqsave(&h->lock, flags);
591 hdev = sdev->hostdata;
592 if (!hdev) {
593 spin_unlock_irqrestore(&h->lock, flags);
594 return -ENODEV;
595 }
596 memcpy(sn, hdev->device_id, sizeof(sn));
597 spin_unlock_irqrestore(&h->lock, flags);
598 return snprintf(buf, 16 * 2 + 2,
599 "%02X%02X%02X%02X%02X%02X%02X%02X"
600 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
601 sn[0], sn[1], sn[2], sn[3],
602 sn[4], sn[5], sn[6], sn[7],
603 sn[8], sn[9], sn[10], sn[11],
604 sn[12], sn[13], sn[14], sn[15]);
605}
606
Scott Teelc1988682014-02-18 13:55:54 -0600607static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
608 struct device_attribute *attr, char *buf)
609{
610 struct ctlr_info *h;
611 struct scsi_device *sdev;
612 struct hpsa_scsi_dev_t *hdev;
613 unsigned long flags;
614 int offload_enabled;
615
616 sdev = to_scsi_device(dev);
617 h = sdev_to_hba(sdev);
618 spin_lock_irqsave(&h->lock, flags);
619 hdev = sdev->hostdata;
620 if (!hdev) {
621 spin_unlock_irqrestore(&h->lock, flags);
622 return -ENODEV;
623 }
624 offload_enabled = hdev->offload_enabled;
625 spin_unlock_irqrestore(&h->lock, flags);
626 return snprintf(buf, 20, "%d\n", offload_enabled);
627}
628
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600629static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
630static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
631static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
632static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
Scott Teelc1988682014-02-18 13:55:54 -0600633static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
634 host_show_hp_ssd_smart_path_enabled, NULL);
Scott Teelda0697b2014-02-18 13:57:00 -0600635static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
636 host_show_hp_ssd_smart_path_status,
637 host_store_hp_ssd_smart_path_status);
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600638static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
639 host_store_raid_offload_debug);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600640static DEVICE_ATTR(firmware_revision, S_IRUGO,
641 host_show_firmware_revision, NULL);
642static DEVICE_ATTR(commands_outstanding, S_IRUGO,
643 host_show_commands_outstanding, NULL);
644static DEVICE_ATTR(transport_mode, S_IRUGO,
645 host_show_transport_mode, NULL);
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600646static DEVICE_ATTR(resettable, S_IRUGO,
647 host_show_resettable, NULL);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600648
649static struct device_attribute *hpsa_sdev_attrs[] = {
650 &dev_attr_raid_level,
651 &dev_attr_lunid,
652 &dev_attr_unique_id,
Scott Teelc1988682014-02-18 13:55:54 -0600653 &dev_attr_hp_ssd_smart_path_enabled,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600654 NULL,
655};
656
657static struct device_attribute *hpsa_shost_attrs[] = {
658 &dev_attr_rescan,
659 &dev_attr_firmware_revision,
660 &dev_attr_commands_outstanding,
661 &dev_attr_transport_mode,
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600662 &dev_attr_resettable,
Scott Teelda0697b2014-02-18 13:57:00 -0600663 &dev_attr_hp_ssd_smart_path_status,
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600664 &dev_attr_raid_offload_debug,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600665 NULL,
666};
667
668static struct scsi_host_template hpsa_driver_template = {
669 .module = THIS_MODULE,
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600670 .name = HPSA,
671 .proc_name = HPSA,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600672 .queuecommand = hpsa_scsi_queue_command,
673 .scan_start = hpsa_scan_start,
674 .scan_finished = hpsa_scan_finished,
675 .change_queue_depth = hpsa_change_queue_depth,
676 .this_id = -1,
677 .use_clustering = ENABLE_CLUSTERING,
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500678 .eh_abort_handler = hpsa_eh_abort_handler,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600679 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
680 .ioctl = hpsa_ioctl,
681 .slave_alloc = hpsa_slave_alloc,
682 .slave_destroy = hpsa_slave_destroy,
683#ifdef CONFIG_COMPAT
684 .compat_ioctl = hpsa_compat_ioctl,
685#endif
686 .sdev_attrs = hpsa_sdev_attrs,
687 .shost_attrs = hpsa_shost_attrs,
Stephen M. Cameronc0d6a4d2011-10-26 16:20:53 -0500688 .max_sectors = 8192,
Martin K. Petersen54b2b502013-10-23 06:25:40 -0400689 .no_write_same = 1,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600690};
691
692
693/* Enqueuing and dequeuing functions for cmdlists. */
694static inline void addQ(struct list_head *list, struct CommandList *c)
695{
696 list_add_tail(&c->list, list);
697}
698
Matt Gates254f7962012-05-01 11:43:06 -0500699static inline u32 next_command(struct ctlr_info *h, u8 q)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600700{
701 u32 a;
Stephen M. Cameron072b0512014-05-29 10:53:07 -0500702 struct reply_queue_buffer *rq = &h->reply_queue[q];
Matt Gatese16a33a2012-05-01 11:43:11 -0500703 unsigned long flags;
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600704
Matt Gatese1f7de02014-02-18 13:55:17 -0600705 if (h->transMethod & CFGTBL_Trans_io_accel1)
706 return h->access.command_completed(h, q);
707
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600708 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
Matt Gates254f7962012-05-01 11:43:06 -0500709 return h->access.command_completed(h, q);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600710
Matt Gates254f7962012-05-01 11:43:06 -0500711 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
712 a = rq->head[rq->current_entry];
713 rq->current_entry++;
Matt Gatese16a33a2012-05-01 11:43:11 -0500714 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600715 h->commands_outstanding--;
Matt Gatese16a33a2012-05-01 11:43:11 -0500716 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600717 } else {
718 a = FIFO_EMPTY;
719 }
720 /* Check for wraparound */
Matt Gates254f7962012-05-01 11:43:06 -0500721 if (rq->current_entry == h->max_commands) {
722 rq->current_entry = 0;
723 rq->wraparound ^= 1;
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600724 }
725 return a;
726}
727
Scott Teelc3497752014-02-18 13:56:34 -0600728/*
729 * There are some special bits in the bus address of the
730 * command that we have to set for the controller to know
731 * how to process the command:
732 *
733 * Normal performant mode:
734 * bit 0: 1 means performant mode, 0 means simple mode.
735 * bits 1-3 = block fetch table entry
736 * bits 4-6 = command type (== 0)
737 *
738 * ioaccel1 mode:
739 * bit 0 = "performant mode" bit.
740 * bits 1-3 = block fetch table entry
741 * bits 4-6 = command type (== 110)
742 * (command type is needed because ioaccel1 mode
743 * commands are submitted through the same register as normal
744 * mode commands, so this is how the controller knows whether
745 * the command is normal mode or ioaccel1 mode.)
746 *
747 * ioaccel2 mode:
748 * bit 0 = "performant mode" bit.
749 * bits 1-4 = block fetch table entry (note extra bit)
750 * bits 4-6 = not needed, because ioaccel2 mode has
751 * a separate special register for submitting commands.
752 */
753
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600754/* set_performant_mode: Modify the tag for cciss performant
755 * set bit 0 for pull model, bits 3-1 for block fetch
756 * register number
757 */
758static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
759{
Matt Gates254f7962012-05-01 11:43:06 -0500760 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600761 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
Hannes Reineckeeee0f032014-01-15 13:30:53 +0100762 if (likely(h->msix_vector > 0))
Matt Gates254f7962012-05-01 11:43:06 -0500763 c->Header.ReplyQueue =
John Kacur804a5cb2013-07-26 16:06:18 +0200764 raw_smp_processor_id() % h->nreply_queues;
Matt Gates254f7962012-05-01 11:43:06 -0500765 }
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600766}
767
Scott Teelc3497752014-02-18 13:56:34 -0600768static void set_ioaccel1_performant_mode(struct ctlr_info *h,
769 struct CommandList *c)
770{
771 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
772
773 /* Tell the controller to post the reply to the queue for this
774 * processor. This seems to give the best I/O throughput.
775 */
776 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
777 /* Set the bits in the address sent down to include:
778 * - performant mode bit (bit 0)
779 * - pull count (bits 1-3)
780 * - command type (bits 4-6)
781 */
782 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
783 IOACCEL1_BUSADDR_CMDTYPE;
784}
785
786static void set_ioaccel2_performant_mode(struct ctlr_info *h,
787 struct CommandList *c)
788{
789 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
790
791 /* Tell the controller to post the reply to the queue for this
792 * processor. This seems to give the best I/O throughput.
793 */
794 cp->reply_queue = smp_processor_id() % h->nreply_queues;
795 /* Set the bits in the address sent down to include:
796 * - performant mode bit not used in ioaccel mode 2
797 * - pull count (bits 0-3)
798 * - command type isn't needed for ioaccel2
799 */
800 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
801}
802
Stephen M. Camerone85c5972012-05-01 11:43:42 -0500803static int is_firmware_flash_cmd(u8 *cdb)
804{
805 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
806}
807
808/*
809 * During firmware flash, the heartbeat register may not update as frequently
810 * as it should. So we dial down lockup detection during firmware flash. and
811 * dial it back up when firmware flash completes.
812 */
813#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
814#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
815static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
816 struct CommandList *c)
817{
818 if (!is_firmware_flash_cmd(c->Request.CDB))
819 return;
820 atomic_inc(&h->firmware_flash_in_progress);
821 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
822}
823
824static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
825 struct CommandList *c)
826{
827 if (is_firmware_flash_cmd(c->Request.CDB) &&
828 atomic_dec_and_test(&h->firmware_flash_in_progress))
829 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
830}
831
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600832static void enqueue_cmd_and_start_io(struct ctlr_info *h,
833 struct CommandList *c)
834{
835 unsigned long flags;
836
Scott Teelc3497752014-02-18 13:56:34 -0600837 switch (c->cmd_type) {
838 case CMD_IOACCEL1:
839 set_ioaccel1_performant_mode(h, c);
840 break;
841 case CMD_IOACCEL2:
842 set_ioaccel2_performant_mode(h, c);
843 break;
844 default:
845 set_performant_mode(h, c);
846 }
Stephen M. Camerone85c5972012-05-01 11:43:42 -0500847 dial_down_lockup_detection_during_fw_flash(h, c);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600848 spin_lock_irqsave(&h->lock, flags);
849 addQ(&h->reqQ, c);
850 h->Qdepth++;
Stephen M. Cameron0b570752014-05-29 10:53:28 -0500851 start_io(h, &flags);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600852 spin_unlock_irqrestore(&h->lock, flags);
853}
854
855static inline void removeQ(struct CommandList *c)
856{
857 if (WARN_ON(list_empty(&c->list)))
858 return;
859 list_del_init(&c->list);
860}
861
862static inline int is_hba_lunid(unsigned char scsi3addr[])
863{
864 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
865}
866
867static inline int is_scsi_rev_5(struct ctlr_info *h)
868{
869 if (!h->hba_inquiry_data)
870 return 0;
871 if ((h->hba_inquiry_data[2] & 0x07) == 5)
872 return 1;
873 return 0;
874}
875
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800876static int hpsa_find_target_lun(struct ctlr_info *h,
877 unsigned char scsi3addr[], int bus, int *target, int *lun)
878{
879 /* finds an unused bus, target, lun for a new physical device
880 * assumes h->devlock is held
881 */
882 int i, found = 0;
Scott Teelcfe5bad2011-10-26 16:21:07 -0500883 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800884
Akinobu Mita263d9402012-01-21 00:15:27 +0900885 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800886
887 for (i = 0; i < h->ndevices; i++) {
888 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
Akinobu Mita263d9402012-01-21 00:15:27 +0900889 __set_bit(h->dev[i]->target, lun_taken);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800890 }
891
Akinobu Mita263d9402012-01-21 00:15:27 +0900892 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
893 if (i < HPSA_MAX_DEVICES) {
894 /* *bus = 1; */
895 *target = i;
896 *lun = 0;
897 found = 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800898 }
899 return !found;
900}
901
902/* Add an entry into h->dev[] array. */
903static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
904 struct hpsa_scsi_dev_t *device,
905 struct hpsa_scsi_dev_t *added[], int *nadded)
906{
907 /* assumes h->devlock is held */
908 int n = h->ndevices;
909 int i;
910 unsigned char addr1[8], addr2[8];
911 struct hpsa_scsi_dev_t *sd;
912
Scott Teelcfe5bad2011-10-26 16:21:07 -0500913 if (n >= HPSA_MAX_DEVICES) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800914 dev_err(&h->pdev->dev, "too many devices, some will be "
915 "inaccessible.\n");
916 return -1;
917 }
918
919 /* physical devices do not have lun or target assigned until now. */
920 if (device->lun != -1)
921 /* Logical device, lun is already assigned. */
922 goto lun_assigned;
923
924 /* If this device a non-zero lun of a multi-lun device
925 * byte 4 of the 8-byte LUN addr will contain the logical
926 * unit no, zero otherise.
927 */
928 if (device->scsi3addr[4] == 0) {
929 /* This is not a non-zero lun of a multi-lun device */
930 if (hpsa_find_target_lun(h, device->scsi3addr,
931 device->bus, &device->target, &device->lun) != 0)
932 return -1;
933 goto lun_assigned;
934 }
935
936 /* This is a non-zero lun of a multi-lun device.
937 * Search through our list and find the device which
938 * has the same 8 byte LUN address, excepting byte 4.
939 * Assign the same bus and target for this new LUN.
940 * Use the logical unit number from the firmware.
941 */
942 memcpy(addr1, device->scsi3addr, 8);
943 addr1[4] = 0;
944 for (i = 0; i < n; i++) {
945 sd = h->dev[i];
946 memcpy(addr2, sd->scsi3addr, 8);
947 addr2[4] = 0;
948 /* differ only in byte 4? */
949 if (memcmp(addr1, addr2, 8) == 0) {
950 device->bus = sd->bus;
951 device->target = sd->target;
952 device->lun = device->scsi3addr[4];
953 break;
954 }
955 }
956 if (device->lun == -1) {
957 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
958 " suspect firmware bug or unsupported hardware "
959 "configuration.\n");
960 return -1;
961 }
962
963lun_assigned:
964
965 h->dev[n] = device;
966 h->ndevices++;
967 added[*nadded] = device;
968 (*nadded)++;
969
970 /* initially, (before registering with scsi layer) we don't
971 * know our hostno and we don't want to print anything first
972 * time anyway (the scsi layer's inquiries will show that info)
973 */
974 /* if (hostno != -1) */
975 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
976 scsi_device_type(device->devtype), hostno,
977 device->bus, device->target, device->lun);
978 return 0;
979}
980
Scott Teelbd9244f2012-01-19 14:01:30 -0600981/* Update an entry in h->dev[] array. */
982static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
983 int entry, struct hpsa_scsi_dev_t *new_entry)
984{
985 /* assumes h->devlock is held */
986 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
987
988 /* Raid level changed. */
989 h->dev[entry]->raid_level = new_entry->raid_level;
Stephen M. Cameron250fb122014-02-18 13:55:38 -0600990
991 /* Raid offload parameters changed. */
992 h->dev[entry]->offload_config = new_entry->offload_config;
993 h->dev[entry]->offload_enabled = new_entry->offload_enabled;
Stephen M. Cameron9fb0de22014-02-18 13:56:50 -0600994 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
995 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
996 h->dev[entry]->raid_map = new_entry->raid_map;
Stephen M. Cameron250fb122014-02-18 13:55:38 -0600997
Scott Teelbd9244f2012-01-19 14:01:30 -0600998 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
999 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
1000 new_entry->target, new_entry->lun);
1001}
1002
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001003/* Replace an entry from h->dev[] array. */
1004static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1005 int entry, struct hpsa_scsi_dev_t *new_entry,
1006 struct hpsa_scsi_dev_t *added[], int *nadded,
1007 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1008{
1009 /* assumes h->devlock is held */
Scott Teelcfe5bad2011-10-26 16:21:07 -05001010 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001011 removed[*nremoved] = h->dev[entry];
1012 (*nremoved)++;
Stephen M. Cameron01350d02011-08-09 08:18:01 -05001013
1014 /*
1015 * New physical devices won't have target/lun assigned yet
1016 * so we need to preserve the values in the slot we are replacing.
1017 */
1018 if (new_entry->target == -1) {
1019 new_entry->target = h->dev[entry]->target;
1020 new_entry->lun = h->dev[entry]->lun;
1021 }
1022
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001023 h->dev[entry] = new_entry;
1024 added[*nadded] = new_entry;
1025 (*nadded)++;
1026 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
1027 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
1028 new_entry->target, new_entry->lun);
1029}
1030
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001031/* Remove an entry from h->dev[] array. */
1032static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1033 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1034{
1035 /* assumes h->devlock is held */
1036 int i;
1037 struct hpsa_scsi_dev_t *sd;
1038
Scott Teelcfe5bad2011-10-26 16:21:07 -05001039 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001040
1041 sd = h->dev[entry];
1042 removed[*nremoved] = h->dev[entry];
1043 (*nremoved)++;
1044
1045 for (i = entry; i < h->ndevices-1; i++)
1046 h->dev[i] = h->dev[i+1];
1047 h->ndevices--;
1048 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
1049 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
1050 sd->lun);
1051}
1052
1053#define SCSI3ADDR_EQ(a, b) ( \
1054 (a)[7] == (b)[7] && \
1055 (a)[6] == (b)[6] && \
1056 (a)[5] == (b)[5] && \
1057 (a)[4] == (b)[4] && \
1058 (a)[3] == (b)[3] && \
1059 (a)[2] == (b)[2] && \
1060 (a)[1] == (b)[1] && \
1061 (a)[0] == (b)[0])
1062
1063static void fixup_botched_add(struct ctlr_info *h,
1064 struct hpsa_scsi_dev_t *added)
1065{
1066 /* called when scsi_add_device fails in order to re-adjust
1067 * h->dev[] to match the mid layer's view.
1068 */
1069 unsigned long flags;
1070 int i, j;
1071
1072 spin_lock_irqsave(&h->lock, flags);
1073 for (i = 0; i < h->ndevices; i++) {
1074 if (h->dev[i] == added) {
1075 for (j = i; j < h->ndevices-1; j++)
1076 h->dev[j] = h->dev[j+1];
1077 h->ndevices--;
1078 break;
1079 }
1080 }
1081 spin_unlock_irqrestore(&h->lock, flags);
1082 kfree(added);
1083}
1084
1085static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1086 struct hpsa_scsi_dev_t *dev2)
1087{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001088 /* we compare everything except lun and target as these
1089 * are not yet assigned. Compare parts likely
1090 * to differ first
1091 */
1092 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1093 sizeof(dev1->scsi3addr)) != 0)
1094 return 0;
1095 if (memcmp(dev1->device_id, dev2->device_id,
1096 sizeof(dev1->device_id)) != 0)
1097 return 0;
1098 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1099 return 0;
1100 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1101 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001102 if (dev1->devtype != dev2->devtype)
1103 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001104 if (dev1->bus != dev2->bus)
1105 return 0;
1106 return 1;
1107}
1108
Scott Teelbd9244f2012-01-19 14:01:30 -06001109static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1110 struct hpsa_scsi_dev_t *dev2)
1111{
1112 /* Device attributes that can change, but don't mean
1113 * that the device is a different device, nor that the OS
1114 * needs to be told anything about the change.
1115 */
1116 if (dev1->raid_level != dev2->raid_level)
1117 return 1;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001118 if (dev1->offload_config != dev2->offload_config)
1119 return 1;
1120 if (dev1->offload_enabled != dev2->offload_enabled)
1121 return 1;
Scott Teelbd9244f2012-01-19 14:01:30 -06001122 return 0;
1123}
1124
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001125/* Find needle in haystack. If exact match found, return DEVICE_SAME,
1126 * and return needle location in *index. If scsi3addr matches, but not
1127 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
Scott Teelbd9244f2012-01-19 14:01:30 -06001128 * location in *index.
1129 * In the case of a minor device attribute change, such as RAID level, just
1130 * return DEVICE_UPDATED, along with the updated device's location in index.
1131 * If needle not found, return DEVICE_NOT_FOUND.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001132 */
1133static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1134 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1135 int *index)
1136{
1137 int i;
1138#define DEVICE_NOT_FOUND 0
1139#define DEVICE_CHANGED 1
1140#define DEVICE_SAME 2
Scott Teelbd9244f2012-01-19 14:01:30 -06001141#define DEVICE_UPDATED 3
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001142 for (i = 0; i < haystack_size; i++) {
Stephen M. Cameron23231042010-02-04 08:43:36 -06001143 if (haystack[i] == NULL) /* previously removed. */
1144 continue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001145 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1146 *index = i;
Scott Teelbd9244f2012-01-19 14:01:30 -06001147 if (device_is_the_same(needle, haystack[i])) {
1148 if (device_updated(needle, haystack[i]))
1149 return DEVICE_UPDATED;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001150 return DEVICE_SAME;
Scott Teelbd9244f2012-01-19 14:01:30 -06001151 } else {
Stephen M. Cameron98465902014-02-21 16:25:00 -06001152 /* Keep offline devices offline */
1153 if (needle->volume_offline)
1154 return DEVICE_NOT_FOUND;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001155 return DEVICE_CHANGED;
Scott Teelbd9244f2012-01-19 14:01:30 -06001156 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001157 }
1158 }
1159 *index = -1;
1160 return DEVICE_NOT_FOUND;
1161}
1162
Stephen M. Cameron98465902014-02-21 16:25:00 -06001163static void hpsa_monitor_offline_device(struct ctlr_info *h,
1164 unsigned char scsi3addr[])
1165{
1166 struct offline_device_entry *device;
1167 unsigned long flags;
1168
1169 /* Check to see if device is already on the list */
1170 spin_lock_irqsave(&h->offline_device_lock, flags);
1171 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1172 if (memcmp(device->scsi3addr, scsi3addr,
1173 sizeof(device->scsi3addr)) == 0) {
1174 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1175 return;
1176 }
1177 }
1178 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1179
1180 /* Device is not on the list, add it. */
1181 device = kmalloc(sizeof(*device), GFP_KERNEL);
1182 if (!device) {
1183 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1184 return;
1185 }
1186 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1187 spin_lock_irqsave(&h->offline_device_lock, flags);
1188 list_add_tail(&device->offline_list, &h->offline_device_list);
1189 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1190}
1191
1192/* Print a message explaining various offline volume states */
1193static void hpsa_show_volume_status(struct ctlr_info *h,
1194 struct hpsa_scsi_dev_t *sd)
1195{
1196 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1197 dev_info(&h->pdev->dev,
1198 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1199 h->scsi_host->host_no,
1200 sd->bus, sd->target, sd->lun);
1201 switch (sd->volume_offline) {
1202 case HPSA_LV_OK:
1203 break;
1204 case HPSA_LV_UNDERGOING_ERASE:
1205 dev_info(&h->pdev->dev,
1206 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1207 h->scsi_host->host_no,
1208 sd->bus, sd->target, sd->lun);
1209 break;
1210 case HPSA_LV_UNDERGOING_RPI:
1211 dev_info(&h->pdev->dev,
1212 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1213 h->scsi_host->host_no,
1214 sd->bus, sd->target, sd->lun);
1215 break;
1216 case HPSA_LV_PENDING_RPI:
1217 dev_info(&h->pdev->dev,
1218 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1219 h->scsi_host->host_no,
1220 sd->bus, sd->target, sd->lun);
1221 break;
1222 case HPSA_LV_ENCRYPTED_NO_KEY:
1223 dev_info(&h->pdev->dev,
1224 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1225 h->scsi_host->host_no,
1226 sd->bus, sd->target, sd->lun);
1227 break;
1228 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1229 dev_info(&h->pdev->dev,
1230 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1231 h->scsi_host->host_no,
1232 sd->bus, sd->target, sd->lun);
1233 break;
1234 case HPSA_LV_UNDERGOING_ENCRYPTION:
1235 dev_info(&h->pdev->dev,
1236 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1237 h->scsi_host->host_no,
1238 sd->bus, sd->target, sd->lun);
1239 break;
1240 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1241 dev_info(&h->pdev->dev,
1242 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1243 h->scsi_host->host_no,
1244 sd->bus, sd->target, sd->lun);
1245 break;
1246 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1247 dev_info(&h->pdev->dev,
1248 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1249 h->scsi_host->host_no,
1250 sd->bus, sd->target, sd->lun);
1251 break;
1252 case HPSA_LV_PENDING_ENCRYPTION:
1253 dev_info(&h->pdev->dev,
1254 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1255 h->scsi_host->host_no,
1256 sd->bus, sd->target, sd->lun);
1257 break;
1258 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1259 dev_info(&h->pdev->dev,
1260 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1261 h->scsi_host->host_no,
1262 sd->bus, sd->target, sd->lun);
1263 break;
1264 }
1265}
1266
Stephen M. Cameron4967bd32010-02-04 08:41:49 -06001267static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001268 struct hpsa_scsi_dev_t *sd[], int nsds)
1269{
1270 /* sd contains scsi3 addresses and devtypes, and inquiry
1271 * data. This function takes what's in sd to be the current
1272 * reality and updates h->dev[] to reflect that reality.
1273 */
1274 int i, entry, device_change, changes = 0;
1275 struct hpsa_scsi_dev_t *csd;
1276 unsigned long flags;
1277 struct hpsa_scsi_dev_t **added, **removed;
1278 int nadded, nremoved;
1279 struct Scsi_Host *sh = NULL;
1280
Scott Teelcfe5bad2011-10-26 16:21:07 -05001281 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1282 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001283
1284 if (!added || !removed) {
1285 dev_warn(&h->pdev->dev, "out of memory in "
1286 "adjust_hpsa_scsi_table\n");
1287 goto free_and_out;
1288 }
1289
1290 spin_lock_irqsave(&h->devlock, flags);
1291
1292 /* find any devices in h->dev[] that are not in
1293 * sd[] and remove them from h->dev[], and for any
1294 * devices which have changed, remove the old device
1295 * info and add the new device info.
Scott Teelbd9244f2012-01-19 14:01:30 -06001296 * If minor device attributes change, just update
1297 * the existing device structure.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001298 */
1299 i = 0;
1300 nremoved = 0;
1301 nadded = 0;
1302 while (i < h->ndevices) {
1303 csd = h->dev[i];
1304 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1305 if (device_change == DEVICE_NOT_FOUND) {
1306 changes++;
1307 hpsa_scsi_remove_entry(h, hostno, i,
1308 removed, &nremoved);
1309 continue; /* remove ^^^, hence i not incremented */
1310 } else if (device_change == DEVICE_CHANGED) {
1311 changes++;
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001312 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1313 added, &nadded, removed, &nremoved);
Stephen M. Cameronc7f172d2010-02-04 08:43:31 -06001314 /* Set it to NULL to prevent it from being freed
1315 * at the bottom of hpsa_update_scsi_devices()
1316 */
1317 sd[entry] = NULL;
Scott Teelbd9244f2012-01-19 14:01:30 -06001318 } else if (device_change == DEVICE_UPDATED) {
1319 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001320 }
1321 i++;
1322 }
1323
1324 /* Now, make sure every device listed in sd[] is also
1325 * listed in h->dev[], adding them if they aren't found
1326 */
1327
1328 for (i = 0; i < nsds; i++) {
1329 if (!sd[i]) /* if already added above. */
1330 continue;
Stephen M. Cameron98465902014-02-21 16:25:00 -06001331
1332 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1333 * as the SCSI mid-layer does not handle such devices well.
1334 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1335 * at 160Hz, and prevents the system from coming up.
1336 */
1337 if (sd[i]->volume_offline) {
1338 hpsa_show_volume_status(h, sd[i]);
1339 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
1340 h->scsi_host->host_no,
1341 sd[i]->bus, sd[i]->target, sd[i]->lun);
1342 continue;
1343 }
1344
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001345 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1346 h->ndevices, &entry);
1347 if (device_change == DEVICE_NOT_FOUND) {
1348 changes++;
1349 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1350 added, &nadded) != 0)
1351 break;
1352 sd[i] = NULL; /* prevent from being freed later. */
1353 } else if (device_change == DEVICE_CHANGED) {
1354 /* should never happen... */
1355 changes++;
1356 dev_warn(&h->pdev->dev,
1357 "device unexpectedly changed.\n");
1358 /* but if it does happen, we just ignore that device */
1359 }
1360 }
1361 spin_unlock_irqrestore(&h->devlock, flags);
1362
Stephen M. Cameron98465902014-02-21 16:25:00 -06001363 /* Monitor devices which are in one of several NOT READY states to be
1364 * brought online later. This must be done without holding h->devlock,
1365 * so don't touch h->dev[]
1366 */
1367 for (i = 0; i < nsds; i++) {
1368 if (!sd[i]) /* if already added above. */
1369 continue;
1370 if (sd[i]->volume_offline)
1371 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1372 }
1373
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001374 /* Don't notify scsi mid layer of any changes the first time through
1375 * (or if there are no changes) scsi_scan_host will do it later the
1376 * first time through.
1377 */
1378 if (hostno == -1 || !changes)
1379 goto free_and_out;
1380
1381 sh = h->scsi_host;
1382 /* Notify scsi mid layer of any removed devices */
1383 for (i = 0; i < nremoved; i++) {
1384 struct scsi_device *sdev =
1385 scsi_device_lookup(sh, removed[i]->bus,
1386 removed[i]->target, removed[i]->lun);
1387 if (sdev != NULL) {
1388 scsi_remove_device(sdev);
1389 scsi_device_put(sdev);
1390 } else {
1391 /* We don't expect to get here.
1392 * future cmds to this device will get selection
1393 * timeout as if the device was gone.
1394 */
1395 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1396 " for removal.", hostno, removed[i]->bus,
1397 removed[i]->target, removed[i]->lun);
1398 }
1399 kfree(removed[i]);
1400 removed[i] = NULL;
1401 }
1402
1403 /* Notify scsi mid layer of any added devices */
1404 for (i = 0; i < nadded; i++) {
1405 if (scsi_add_device(sh, added[i]->bus,
1406 added[i]->target, added[i]->lun) == 0)
1407 continue;
1408 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1409 "device not added.\n", hostno, added[i]->bus,
1410 added[i]->target, added[i]->lun);
1411 /* now we have to remove it from h->dev,
1412 * since it didn't get added to scsi mid layer
1413 */
1414 fixup_botched_add(h, added[i]);
1415 }
1416
1417free_and_out:
1418 kfree(added);
1419 kfree(removed);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001420}
1421
1422/*
Joe Perches9e03aa22013-09-03 13:45:58 -07001423 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001424 * Assume's h->devlock is held.
1425 */
1426static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1427 int bus, int target, int lun)
1428{
1429 int i;
1430 struct hpsa_scsi_dev_t *sd;
1431
1432 for (i = 0; i < h->ndevices; i++) {
1433 sd = h->dev[i];
1434 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1435 return sd;
1436 }
1437 return NULL;
1438}
1439
1440/* link sdev->hostdata to our per-device structure. */
1441static int hpsa_slave_alloc(struct scsi_device *sdev)
1442{
1443 struct hpsa_scsi_dev_t *sd;
1444 unsigned long flags;
1445 struct ctlr_info *h;
1446
1447 h = sdev_to_hba(sdev);
1448 spin_lock_irqsave(&h->devlock, flags);
1449 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1450 sdev_id(sdev), sdev->lun);
1451 if (sd != NULL)
1452 sdev->hostdata = sd;
1453 spin_unlock_irqrestore(&h->devlock, flags);
1454 return 0;
1455}
1456
1457static void hpsa_slave_destroy(struct scsi_device *sdev)
1458{
Stephen M. Cameronbcc44252010-02-04 08:41:54 -06001459 /* nothing to do. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001460}
1461
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001462static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1463{
1464 int i;
1465
1466 if (!h->cmd_sg_list)
1467 return;
1468 for (i = 0; i < h->nr_cmds; i++) {
1469 kfree(h->cmd_sg_list[i]);
1470 h->cmd_sg_list[i] = NULL;
1471 }
1472 kfree(h->cmd_sg_list);
1473 h->cmd_sg_list = NULL;
1474}
1475
1476static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1477{
1478 int i;
1479
1480 if (h->chainsize <= 0)
1481 return 0;
1482
1483 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1484 GFP_KERNEL);
1485 if (!h->cmd_sg_list)
1486 return -ENOMEM;
1487 for (i = 0; i < h->nr_cmds; i++) {
1488 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1489 h->chainsize, GFP_KERNEL);
1490 if (!h->cmd_sg_list[i])
1491 goto clean;
1492 }
1493 return 0;
1494
1495clean:
1496 hpsa_free_sg_chain_blocks(h);
1497 return -ENOMEM;
1498}
1499
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001500static int hpsa_map_sg_chain_block(struct ctlr_info *h,
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001501 struct CommandList *c)
1502{
1503 struct SGDescriptor *chain_sg, *chain_block;
1504 u64 temp64;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001505 u32 chain_len;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001506
1507 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1508 chain_block = h->cmd_sg_list[c->cmdindex];
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001509 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1510 chain_len = sizeof(*chain_sg) *
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001511 (c->Header.SGTotal - h->max_cmd_sg_entries);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001512 chain_sg->Len = cpu_to_le32(chain_len);
1513 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001514 PCI_DMA_TODEVICE);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001515 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1516 /* prevent subsequent unmapping */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001517 chain_sg->Addr = cpu_to_le64(0);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001518 return -1;
1519 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001520 chain_sg->Addr = cpu_to_le64(temp64);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001521 return 0;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001522}
1523
1524static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1525 struct CommandList *c)
1526{
1527 struct SGDescriptor *chain_sg;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001528
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001529 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001530 return;
1531
1532 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001533 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1534 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001535}
1536
Scott Teela09c1442014-02-18 13:57:21 -06001537
1538/* Decode the various types of errors on ioaccel2 path.
1539 * Return 1 for any error that should generate a RAID path retry.
1540 * Return 0 for errors that don't require a RAID path retry.
1541 */
1542static int handle_ioaccel_mode2_error(struct ctlr_info *h,
Scott Teelc3497752014-02-18 13:56:34 -06001543 struct CommandList *c,
1544 struct scsi_cmnd *cmd,
1545 struct io_accel2_cmd *c2)
1546{
1547 int data_len;
Scott Teela09c1442014-02-18 13:57:21 -06001548 int retry = 0;
Scott Teelc3497752014-02-18 13:56:34 -06001549
1550 switch (c2->error_data.serv_response) {
1551 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1552 switch (c2->error_data.status) {
1553 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1554 break;
1555 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1556 dev_warn(&h->pdev->dev,
1557 "%s: task complete with check condition.\n",
1558 "HP SSD Smart Path");
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05001559 cmd->result |= SAM_STAT_CHECK_CONDITION;
Scott Teelc3497752014-02-18 13:56:34 -06001560 if (c2->error_data.data_present !=
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05001561 IOACCEL2_SENSE_DATA_PRESENT) {
1562 memset(cmd->sense_buffer, 0,
1563 SCSI_SENSE_BUFFERSIZE);
Scott Teelc3497752014-02-18 13:56:34 -06001564 break;
Stephen M. Cameronee6b1882014-05-29 10:53:54 -05001565 }
Scott Teelc3497752014-02-18 13:56:34 -06001566 /* copy the sense data */
1567 data_len = c2->error_data.sense_data_len;
1568 if (data_len > SCSI_SENSE_BUFFERSIZE)
1569 data_len = SCSI_SENSE_BUFFERSIZE;
1570 if (data_len > sizeof(c2->error_data.sense_data_buff))
1571 data_len =
1572 sizeof(c2->error_data.sense_data_buff);
1573 memcpy(cmd->sense_buffer,
1574 c2->error_data.sense_data_buff, data_len);
Scott Teela09c1442014-02-18 13:57:21 -06001575 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001576 break;
1577 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1578 dev_warn(&h->pdev->dev,
1579 "%s: task complete with BUSY status.\n",
1580 "HP SSD Smart Path");
Scott Teela09c1442014-02-18 13:57:21 -06001581 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001582 break;
1583 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1584 dev_warn(&h->pdev->dev,
1585 "%s: task complete with reservation conflict.\n",
1586 "HP SSD Smart Path");
Scott Teela09c1442014-02-18 13:57:21 -06001587 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001588 break;
1589 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1590 /* Make scsi midlayer do unlimited retries */
1591 cmd->result = DID_IMM_RETRY << 16;
1592 break;
1593 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1594 dev_warn(&h->pdev->dev,
1595 "%s: task complete with aborted status.\n",
1596 "HP SSD Smart Path");
Scott Teela09c1442014-02-18 13:57:21 -06001597 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001598 break;
1599 default:
1600 dev_warn(&h->pdev->dev,
1601 "%s: task complete with unrecognized status: 0x%02x\n",
1602 "HP SSD Smart Path", c2->error_data.status);
Scott Teela09c1442014-02-18 13:57:21 -06001603 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001604 break;
1605 }
1606 break;
1607 case IOACCEL2_SERV_RESPONSE_FAILURE:
1608 /* don't expect to get here. */
1609 dev_warn(&h->pdev->dev,
1610 "unexpected delivery or target failure, status = 0x%02x\n",
1611 c2->error_data.status);
Scott Teela09c1442014-02-18 13:57:21 -06001612 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001613 break;
1614 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1615 break;
1616 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1617 break;
1618 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1619 dev_warn(&h->pdev->dev, "task management function rejected.\n");
Scott Teela09c1442014-02-18 13:57:21 -06001620 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001621 break;
1622 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1623 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1624 break;
1625 default:
1626 dev_warn(&h->pdev->dev,
1627 "%s: Unrecognized server response: 0x%02x\n",
Scott Teela09c1442014-02-18 13:57:21 -06001628 "HP SSD Smart Path",
1629 c2->error_data.serv_response);
1630 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001631 break;
1632 }
Scott Teela09c1442014-02-18 13:57:21 -06001633
1634 return retry; /* retry on raid path? */
Scott Teelc3497752014-02-18 13:56:34 -06001635}
1636
1637static void process_ioaccel2_completion(struct ctlr_info *h,
1638 struct CommandList *c, struct scsi_cmnd *cmd,
1639 struct hpsa_scsi_dev_t *dev)
1640{
1641 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
Scott Teela09c1442014-02-18 13:57:21 -06001642 int raid_retry = 0;
Scott Teelc3497752014-02-18 13:56:34 -06001643
1644 /* check for good status */
1645 if (likely(c2->error_data.serv_response == 0 &&
1646 c2->error_data.status == 0)) {
1647 cmd_free(h, c);
1648 cmd->scsi_done(cmd);
1649 return;
1650 }
1651
1652 /* Any RAID offload error results in retry which will use
1653 * the normal I/O path so the controller can handle whatever's
1654 * wrong.
1655 */
1656 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1657 c2->error_data.serv_response ==
1658 IOACCEL2_SERV_RESPONSE_FAILURE) {
Scott Teelc3497752014-02-18 13:56:34 -06001659 dev->offload_enabled = 0;
Scott Teele863d682014-02-18 13:57:05 -06001660 h->drv_req_rescan = 1; /* schedule controller for a rescan */
Scott Teelc3497752014-02-18 13:56:34 -06001661 cmd->result = DID_SOFT_ERROR << 16;
1662 cmd_free(h, c);
1663 cmd->scsi_done(cmd);
1664 return;
1665 }
Scott Teela09c1442014-02-18 13:57:21 -06001666 raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
1667 /* If error found, disable Smart Path, schedule a rescan,
1668 * and force a retry on the standard path.
1669 */
1670 if (raid_retry) {
1671 dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
1672 "HP SSD Smart Path");
1673 dev->offload_enabled = 0; /* Disable Smart Path */
1674 h->drv_req_rescan = 1; /* schedule controller rescan */
1675 cmd->result = DID_SOFT_ERROR << 16;
1676 }
Scott Teelc3497752014-02-18 13:56:34 -06001677 cmd_free(h, c);
1678 cmd->scsi_done(cmd);
1679}
1680
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05001681static void complete_scsi_command(struct CommandList *cp)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001682{
1683 struct scsi_cmnd *cmd;
1684 struct ctlr_info *h;
1685 struct ErrorInfo *ei;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001686 struct hpsa_scsi_dev_t *dev;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001687
1688 unsigned char sense_key;
1689 unsigned char asc; /* additional sense code */
1690 unsigned char ascq; /* additional sense code qualifier */
Stephen M. Camerondb111e12011-06-03 09:57:34 -05001691 unsigned long sense_data_size;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001692
1693 ei = cp->err_info;
1694 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1695 h = cp->h;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001696 dev = cmd->device->hostdata;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001697
1698 scsi_dma_unmap(cmd); /* undo the DMA mappings */
Matt Gatese1f7de02014-02-18 13:55:17 -06001699 if ((cp->cmd_type == CMD_SCSI) &&
1700 (cp->Header.SGTotal > h->max_cmd_sg_entries))
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001701 hpsa_unmap_sg_chain_block(h, cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001702
1703 cmd->result = (DID_OK << 16); /* host byte */
1704 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
Scott Teelc3497752014-02-18 13:56:34 -06001705
1706 if (cp->cmd_type == CMD_IOACCEL2)
1707 return process_ioaccel2_completion(h, cp, cmd, dev);
1708
Stephen M. Cameron55126722010-02-25 14:03:01 -06001709 cmd->result |= ei->ScsiStatus;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001710
Robert Elliott6aa4c362014-07-03 10:18:19 -05001711 scsi_set_resid(cmd, ei->ResidualCnt);
1712 if (ei->CommandStatus == 0) {
1713 cmd_free(h, cp);
1714 cmd->scsi_done(cmd);
1715 return;
1716 }
1717
1718 /* copy the sense data */
Stephen M. Camerondb111e12011-06-03 09:57:34 -05001719 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1720 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1721 else
1722 sense_data_size = sizeof(ei->SenseInfo);
1723 if (ei->SenseLen < sense_data_size)
1724 sense_data_size = ei->SenseLen;
1725
1726 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001727
Matt Gatese1f7de02014-02-18 13:55:17 -06001728 /* For I/O accelerator commands, copy over some fields to the normal
1729 * CISS header used below for error handling.
1730 */
1731 if (cp->cmd_type == CMD_IOACCEL1) {
1732 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1733 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
1734 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001735 cp->Header.tag = c->tag;
Matt Gatese1f7de02014-02-18 13:55:17 -06001736 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1737 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001738
1739 /* Any RAID offload error results in retry which will use
1740 * the normal I/O path so the controller can handle whatever's
1741 * wrong.
1742 */
1743 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1744 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1745 dev->offload_enabled = 0;
1746 cmd->result = DID_SOFT_ERROR << 16;
1747 cmd_free(h, cp);
1748 cmd->scsi_done(cmd);
1749 return;
1750 }
Matt Gatese1f7de02014-02-18 13:55:17 -06001751 }
1752
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001753 /* an error has occurred */
1754 switch (ei->CommandStatus) {
1755
1756 case CMD_TARGET_STATUS:
1757 if (ei->ScsiStatus) {
1758 /* Get sense key */
1759 sense_key = 0xf & ei->SenseInfo[2];
1760 /* Get additional sense code */
1761 asc = ei->SenseInfo[12];
1762 /* Get addition sense code qualifier */
1763 ascq = ei->SenseInfo[13];
1764 }
1765
1766 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
Matt Gates3ce438d2013-12-04 17:10:36 -06001767 if (check_for_unit_attention(h, cp))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001768 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001769 if (sense_key == ILLEGAL_REQUEST) {
1770 /*
1771 * SCSI REPORT_LUNS is commonly unsupported on
1772 * Smart Array. Suppress noisy complaint.
1773 */
1774 if (cp->Request.CDB[0] == REPORT_LUNS)
1775 break;
1776
1777 /* If ASC/ASCQ indicate Logical Unit
1778 * Not Supported condition,
1779 */
1780 if ((asc == 0x25) && (ascq == 0x0)) {
1781 dev_warn(&h->pdev->dev, "cp %p "
1782 "has check condition\n", cp);
1783 break;
1784 }
1785 }
1786
1787 if (sense_key == NOT_READY) {
1788 /* If Sense is Not Ready, Logical Unit
1789 * Not ready, Manual Intervention
1790 * required
1791 */
1792 if ((asc == 0x04) && (ascq == 0x03)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001793 dev_warn(&h->pdev->dev, "cp %p "
1794 "has check condition: unit "
1795 "not ready, manual "
1796 "intervention required\n", cp);
1797 break;
1798 }
1799 }
Matt Gates1d3b3602010-02-04 08:43:00 -06001800 if (sense_key == ABORTED_COMMAND) {
1801 /* Aborted command is retryable */
1802 dev_warn(&h->pdev->dev, "cp %p "
1803 "has check condition: aborted command: "
1804 "ASC: 0x%x, ASCQ: 0x%x\n",
1805 cp, asc, ascq);
Stephen M. Cameron2e311fb2013-09-23 13:33:41 -05001806 cmd->result |= DID_SOFT_ERROR << 16;
Matt Gates1d3b3602010-02-04 08:43:00 -06001807 break;
1808 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001809 /* Must be some other type of check condition */
Stephen M. Cameron21b8e4e2012-05-01 11:42:25 -05001810 dev_dbg(&h->pdev->dev, "cp %p has check condition: "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001811 "unknown type: "
1812 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1813 "Returning result: 0x%x, "
1814 "cmd=[%02x %02x %02x %02x %02x "
Mike Miller807be732010-02-04 08:43:26 -06001815 "%02x %02x %02x %02x %02x %02x "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001816 "%02x %02x %02x %02x %02x]\n",
1817 cp, sense_key, asc, ascq,
1818 cmd->result,
1819 cmd->cmnd[0], cmd->cmnd[1],
1820 cmd->cmnd[2], cmd->cmnd[3],
1821 cmd->cmnd[4], cmd->cmnd[5],
1822 cmd->cmnd[6], cmd->cmnd[7],
Mike Miller807be732010-02-04 08:43:26 -06001823 cmd->cmnd[8], cmd->cmnd[9],
1824 cmd->cmnd[10], cmd->cmnd[11],
1825 cmd->cmnd[12], cmd->cmnd[13],
1826 cmd->cmnd[14], cmd->cmnd[15]);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001827 break;
1828 }
1829
1830
1831 /* Problem was not a check condition
1832 * Pass it up to the upper layers...
1833 */
1834 if (ei->ScsiStatus) {
1835 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1836 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1837 "Returning result: 0x%x\n",
1838 cp, ei->ScsiStatus,
1839 sense_key, asc, ascq,
1840 cmd->result);
1841 } else { /* scsi status is zero??? How??? */
1842 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1843 "Returning no connection.\n", cp),
1844
1845 /* Ordinarily, this case should never happen,
1846 * but there is a bug in some released firmware
1847 * revisions that allows it to happen if, for
1848 * example, a 4100 backplane loses power and
1849 * the tape drive is in it. We assume that
1850 * it's a fatal error of some kind because we
1851 * can't show that it wasn't. We will make it
1852 * look like selection timeout since that is
1853 * the most common reason for this to occur,
1854 * and it's severe enough.
1855 */
1856
1857 cmd->result = DID_NO_CONNECT << 16;
1858 }
1859 break;
1860
1861 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1862 break;
1863 case CMD_DATA_OVERRUN:
1864 dev_warn(&h->pdev->dev, "cp %p has"
1865 " completed with data overrun "
1866 "reported\n", cp);
1867 break;
1868 case CMD_INVALID: {
1869 /* print_bytes(cp, sizeof(*cp), 1, 0);
1870 print_cmd(cp); */
1871 /* We get CMD_INVALID if you address a non-existent device
1872 * instead of a selection timeout (no response). You will
1873 * see this if you yank out a drive, then try to access it.
1874 * This is kind of a shame because it means that any other
1875 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1876 * missing target. */
1877 cmd->result = DID_NO_CONNECT << 16;
1878 }
1879 break;
1880 case CMD_PROTOCOL_ERR:
Stephen M. Cameron256d0ea2012-09-14 16:34:25 -05001881 cmd->result = DID_ERROR << 16;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001882 dev_warn(&h->pdev->dev, "cp %p has "
Stephen M. Cameron256d0ea2012-09-14 16:34:25 -05001883 "protocol error\n", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001884 break;
1885 case CMD_HARDWARE_ERR:
1886 cmd->result = DID_ERROR << 16;
1887 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1888 break;
1889 case CMD_CONNECTION_LOST:
1890 cmd->result = DID_ERROR << 16;
1891 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1892 break;
1893 case CMD_ABORTED:
1894 cmd->result = DID_ABORT << 16;
1895 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1896 cp, ei->ScsiStatus);
1897 break;
1898 case CMD_ABORT_FAILED:
1899 cmd->result = DID_ERROR << 16;
1900 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1901 break;
1902 case CMD_UNSOLICITED_ABORT:
Stephen M. Cameronf6e76052011-07-26 11:08:52 -05001903 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1904 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001905 "abort\n", cp);
1906 break;
1907 case CMD_TIMEOUT:
1908 cmd->result = DID_TIME_OUT << 16;
1909 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1910 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06001911 case CMD_UNABORTABLE:
1912 cmd->result = DID_ERROR << 16;
1913 dev_warn(&h->pdev->dev, "Command unabortable\n");
1914 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001915 case CMD_IOACCEL_DISABLED:
1916 /* This only handles the direct pass-through case since RAID
1917 * offload is handled above. Just attempt a retry.
1918 */
1919 cmd->result = DID_SOFT_ERROR << 16;
1920 dev_warn(&h->pdev->dev,
1921 "cp %p had HP SSD Smart Path error\n", cp);
1922 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001923 default:
1924 cmd->result = DID_ERROR << 16;
1925 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1926 cp, ei->CommandStatus);
1927 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001928 cmd_free(h, cp);
Tomas Henzl2cc5bfa2013-08-01 15:14:00 +02001929 cmd->scsi_done(cmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001930}
1931
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001932static void hpsa_pci_unmap(struct pci_dev *pdev,
1933 struct CommandList *c, int sg_used, int data_direction)
1934{
1935 int i;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001936
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001937 for (i = 0; i < sg_used; i++)
1938 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
1939 le32_to_cpu(c->SG[i].Len),
1940 data_direction);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001941}
1942
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001943static int hpsa_map_one(struct pci_dev *pdev,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001944 struct CommandList *cp,
1945 unsigned char *buf,
1946 size_t buflen,
1947 int data_direction)
1948{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06001949 u64 addr64;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001950
1951 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1952 cp->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001953 cp->Header.SGTotal = cpu_to_le16(0);
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001954 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001955 }
1956
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001957 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
Shuah Khaneceaae12013-02-20 11:24:34 -06001958 if (dma_mapping_error(&pdev->dev, addr64)) {
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001959 /* Prevent subsequent unmap of something never mapped */
Shuah Khaneceaae12013-02-20 11:24:34 -06001960 cp->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001961 cp->Header.SGTotal = cpu_to_le16(0);
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001962 return -1;
Shuah Khaneceaae12013-02-20 11:24:34 -06001963 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06001964 cp->SG[0].Addr = cpu_to_le64(addr64);
1965 cp->SG[0].Len = cpu_to_le32(buflen);
1966 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
1967 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
1968 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001969 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001970}
1971
1972static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1973 struct CommandList *c)
1974{
1975 DECLARE_COMPLETION_ONSTACK(wait);
1976
1977 c->waiting = &wait;
1978 enqueue_cmd_and_start_io(h, c);
1979 wait_for_completion(&wait);
1980}
1981
Stephen M. Cameron094963d2014-05-29 10:53:18 -05001982static u32 lockup_detected(struct ctlr_info *h)
1983{
1984 int cpu;
1985 u32 rc, *lockup_detected;
1986
1987 cpu = get_cpu();
1988 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
1989 rc = *lockup_detected;
1990 put_cpu();
1991 return rc;
1992}
1993
Stephen M. Camerona0c12412011-10-26 16:22:04 -05001994static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1995 struct CommandList *c)
1996{
Stephen M. Camerona0c12412011-10-26 16:22:04 -05001997 /* If controller lockup detected, fake a hardware error. */
Stephen M. Cameron094963d2014-05-29 10:53:18 -05001998 if (unlikely(lockup_detected(h)))
Stephen M. Camerona0c12412011-10-26 16:22:04 -05001999 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05002000 else
Stephen M. Camerona0c12412011-10-26 16:22:04 -05002001 hpsa_scsi_do_simple_cmd_core(h, c);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05002002}
2003
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002004#define MAX_DRIVER_CMD_RETRIES 25
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002005static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2006 struct CommandList *c, int data_direction)
2007{
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002008 int backoff_time = 10, retry_count = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002009
2010 do {
Joe Perches7630abd2011-05-08 23:32:40 -07002011 memset(c->err_info, 0, sizeof(*c->err_info));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002012 hpsa_scsi_do_simple_cmd_core(h, c);
2013 retry_count++;
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002014 if (retry_count > 3) {
2015 msleep(backoff_time);
2016 if (backoff_time < 1000)
2017 backoff_time *= 2;
2018 }
Matt Bondurant852af202012-05-01 11:42:35 -05002019 } while ((check_for_unit_attention(h, c) ||
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002020 check_for_busy(h, c)) &&
2021 retry_count <= MAX_DRIVER_CMD_RETRIES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002022 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2023}
2024
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002025static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2026 struct CommandList *c)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002027{
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002028 const u8 *cdb = c->Request.CDB;
2029 const u8 *lun = c->Header.LUN.LunAddrBytes;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002030
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002031 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2032 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2033 txt, lun[0], lun[1], lun[2], lun[3],
2034 lun[4], lun[5], lun[6], lun[7],
2035 cdb[0], cdb[1], cdb[2], cdb[3],
2036 cdb[4], cdb[5], cdb[6], cdb[7],
2037 cdb[8], cdb[9], cdb[10], cdb[11],
2038 cdb[12], cdb[13], cdb[14], cdb[15]);
2039}
2040
2041static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2042 struct CommandList *cp)
2043{
2044 const struct ErrorInfo *ei = cp->err_info;
2045 struct device *d = &cp->h->pdev->dev;
2046 const u8 *sd = ei->SenseInfo;
2047
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002048 switch (ei->CommandStatus) {
2049 case CMD_TARGET_STATUS:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002050 hpsa_print_cmd(h, "SCSI status", cp);
2051 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2052 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
2053 sd[2] & 0x0f, sd[12], sd[13]);
2054 else
2055 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002056 if (ei->ScsiStatus == 0)
2057 dev_warn(d, "SCSI status is abnormally zero. "
2058 "(probably indicates selection timeout "
2059 "reported incorrectly due to a known "
2060 "firmware bug, circa July, 2001.)\n");
2061 break;
2062 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002063 break;
2064 case CMD_DATA_OVERRUN:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002065 hpsa_print_cmd(h, "overrun condition", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002066 break;
2067 case CMD_INVALID: {
2068 /* controller unfortunately reports SCSI passthru's
2069 * to non-existent targets as invalid commands.
2070 */
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002071 hpsa_print_cmd(h, "invalid command", cp);
2072 dev_warn(d, "probably means device no longer present\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002073 }
2074 break;
2075 case CMD_PROTOCOL_ERR:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002076 hpsa_print_cmd(h, "protocol error", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002077 break;
2078 case CMD_HARDWARE_ERR:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002079 hpsa_print_cmd(h, "hardware error", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002080 break;
2081 case CMD_CONNECTION_LOST:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002082 hpsa_print_cmd(h, "connection lost", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002083 break;
2084 case CMD_ABORTED:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002085 hpsa_print_cmd(h, "aborted", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002086 break;
2087 case CMD_ABORT_FAILED:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002088 hpsa_print_cmd(h, "abort failed", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002089 break;
2090 case CMD_UNSOLICITED_ABORT:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002091 hpsa_print_cmd(h, "unsolicited abort", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002092 break;
2093 case CMD_TIMEOUT:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002094 hpsa_print_cmd(h, "timed out", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002095 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002096 case CMD_UNABORTABLE:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002097 hpsa_print_cmd(h, "unabortable", cp);
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002098 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002099 default:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002100 hpsa_print_cmd(h, "unknown status", cp);
2101 dev_warn(d, "Unknown command status %x\n",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002102 ei->CommandStatus);
2103 }
2104}
2105
2106static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002107 u16 page, unsigned char *buf,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002108 unsigned char bufsize)
2109{
2110 int rc = IO_OK;
2111 struct CommandList *c;
2112 struct ErrorInfo *ei;
2113
2114 c = cmd_special_alloc(h);
2115
2116 if (c == NULL) { /* trouble... */
2117 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06002118 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002119 }
2120
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002121 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2122 page, scsi3addr, TYPE_CMD)) {
2123 rc = -1;
2124 goto out;
2125 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002126 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2127 ei = c->err_info;
2128 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002129 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002130 rc = -1;
2131 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002132out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002133 cmd_special_free(h, c);
2134 return rc;
2135}
2136
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002137static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2138 unsigned char *scsi3addr, unsigned char page,
2139 struct bmic_controller_parameters *buf, size_t bufsize)
2140{
2141 int rc = IO_OK;
2142 struct CommandList *c;
2143 struct ErrorInfo *ei;
2144
2145 c = cmd_special_alloc(h);
2146
2147 if (c == NULL) { /* trouble... */
2148 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2149 return -ENOMEM;
2150 }
2151
2152 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2153 page, scsi3addr, TYPE_CMD)) {
2154 rc = -1;
2155 goto out;
2156 }
2157 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2158 ei = c->err_info;
2159 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2160 hpsa_scsi_interpret_error(h, c);
2161 rc = -1;
2162 }
2163out:
2164 cmd_special_free(h, c);
2165 return rc;
2166 }
2167
Scott Teelbf711ac2014-02-18 13:56:39 -06002168static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2169 u8 reset_type)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002170{
2171 int rc = IO_OK;
2172 struct CommandList *c;
2173 struct ErrorInfo *ei;
2174
2175 c = cmd_special_alloc(h);
2176
2177 if (c == NULL) { /* trouble... */
2178 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
Stephen M. Camerone9ea04a2010-02-25 14:03:06 -06002179 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002180 }
2181
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002182 /* fill_cmd can't fail here, no data buffer to map. */
Scott Teelbf711ac2014-02-18 13:56:39 -06002183 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2184 scsi3addr, TYPE_MSG);
2185 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002186 hpsa_scsi_do_simple_cmd_core(h, c);
2187 /* no unmap needed here because no data xfer. */
2188
2189 ei = c->err_info;
2190 if (ei->CommandStatus != 0) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002191 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002192 rc = -1;
2193 }
2194 cmd_special_free(h, c);
2195 return rc;
2196}
2197
2198static void hpsa_get_raid_level(struct ctlr_info *h,
2199 unsigned char *scsi3addr, unsigned char *raid_level)
2200{
2201 int rc;
2202 unsigned char *buf;
2203
2204 *raid_level = RAID_UNKNOWN;
2205 buf = kzalloc(64, GFP_KERNEL);
2206 if (!buf)
2207 return;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002208 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002209 if (rc == 0)
2210 *raid_level = buf[8];
2211 if (*raid_level > RAID_UNKNOWN)
2212 *raid_level = RAID_UNKNOWN;
2213 kfree(buf);
2214 return;
2215}
2216
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002217#define HPSA_MAP_DEBUG
2218#ifdef HPSA_MAP_DEBUG
2219static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2220 struct raid_map_data *map_buff)
2221{
2222 struct raid_map_disk_data *dd = &map_buff->data[0];
2223 int map, row, col;
2224 u16 map_cnt, row_cnt, disks_per_row;
2225
2226 if (rc != 0)
2227 return;
2228
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06002229 /* Show details only if debugging has been activated. */
2230 if (h->raid_offload_debug < 2)
2231 return;
2232
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002233 dev_info(&h->pdev->dev, "structure_size = %u\n",
2234 le32_to_cpu(map_buff->structure_size));
2235 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2236 le32_to_cpu(map_buff->volume_blk_size));
2237 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2238 le64_to_cpu(map_buff->volume_blk_cnt));
2239 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2240 map_buff->phys_blk_shift);
2241 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2242 map_buff->parity_rotation_shift);
2243 dev_info(&h->pdev->dev, "strip_size = %u\n",
2244 le16_to_cpu(map_buff->strip_size));
2245 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2246 le64_to_cpu(map_buff->disk_starting_blk));
2247 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2248 le64_to_cpu(map_buff->disk_blk_cnt));
2249 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2250 le16_to_cpu(map_buff->data_disks_per_row));
2251 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2252 le16_to_cpu(map_buff->metadata_disks_per_row));
2253 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2254 le16_to_cpu(map_buff->row_cnt));
2255 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2256 le16_to_cpu(map_buff->layout_map_count));
Scott Teeldd0e19f2014-02-18 13:57:31 -06002257 dev_info(&h->pdev->dev, "flags = %u\n",
2258 le16_to_cpu(map_buff->flags));
2259 if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
2260 dev_info(&h->pdev->dev, "encrypytion = ON\n");
2261 else
2262 dev_info(&h->pdev->dev, "encrypytion = OFF\n");
2263 dev_info(&h->pdev->dev, "dekindex = %u\n",
2264 le16_to_cpu(map_buff->dekindex));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002265
2266 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2267 for (map = 0; map < map_cnt; map++) {
2268 dev_info(&h->pdev->dev, "Map%u:\n", map);
2269 row_cnt = le16_to_cpu(map_buff->row_cnt);
2270 for (row = 0; row < row_cnt; row++) {
2271 dev_info(&h->pdev->dev, " Row%u:\n", row);
2272 disks_per_row =
2273 le16_to_cpu(map_buff->data_disks_per_row);
2274 for (col = 0; col < disks_per_row; col++, dd++)
2275 dev_info(&h->pdev->dev,
2276 " D%02u: h=0x%04x xor=%u,%u\n",
2277 col, dd->ioaccel_handle,
2278 dd->xor_mult[0], dd->xor_mult[1]);
2279 disks_per_row =
2280 le16_to_cpu(map_buff->metadata_disks_per_row);
2281 for (col = 0; col < disks_per_row; col++, dd++)
2282 dev_info(&h->pdev->dev,
2283 " M%02u: h=0x%04x xor=%u,%u\n",
2284 col, dd->ioaccel_handle,
2285 dd->xor_mult[0], dd->xor_mult[1]);
2286 }
2287 }
2288}
2289#else
2290static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2291 __attribute__((unused)) int rc,
2292 __attribute__((unused)) struct raid_map_data *map_buff)
2293{
2294}
2295#endif
2296
2297static int hpsa_get_raid_map(struct ctlr_info *h,
2298 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2299{
2300 int rc = 0;
2301 struct CommandList *c;
2302 struct ErrorInfo *ei;
2303
2304 c = cmd_special_alloc(h);
2305 if (c == NULL) {
2306 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2307 return -ENOMEM;
2308 }
2309 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2310 sizeof(this_device->raid_map), 0,
2311 scsi3addr, TYPE_CMD)) {
2312 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2313 cmd_special_free(h, c);
2314 return -ENOMEM;
2315 }
2316 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2317 ei = c->err_info;
2318 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002319 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002320 cmd_special_free(h, c);
2321 return -1;
2322 }
2323 cmd_special_free(h, c);
2324
2325 /* @todo in the future, dynamically allocate RAID map memory */
2326 if (le32_to_cpu(this_device->raid_map.structure_size) >
2327 sizeof(this_device->raid_map)) {
2328 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2329 rc = -1;
2330 }
2331 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2332 return rc;
2333}
2334
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06002335static int hpsa_vpd_page_supported(struct ctlr_info *h,
2336 unsigned char scsi3addr[], u8 page)
2337{
2338 int rc;
2339 int i;
2340 int pages;
2341 unsigned char *buf, bufsize;
2342
2343 buf = kzalloc(256, GFP_KERNEL);
2344 if (!buf)
2345 return 0;
2346
2347 /* Get the size of the page list first */
2348 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2349 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2350 buf, HPSA_VPD_HEADER_SZ);
2351 if (rc != 0)
2352 goto exit_unsupported;
2353 pages = buf[3];
2354 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2355 bufsize = pages + HPSA_VPD_HEADER_SZ;
2356 else
2357 bufsize = 255;
2358
2359 /* Get the whole VPD page list */
2360 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2361 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2362 buf, bufsize);
2363 if (rc != 0)
2364 goto exit_unsupported;
2365
2366 pages = buf[3];
2367 for (i = 1; i <= pages; i++)
2368 if (buf[3 + i] == page)
2369 goto exit_supported;
2370exit_unsupported:
2371 kfree(buf);
2372 return 0;
2373exit_supported:
2374 kfree(buf);
2375 return 1;
2376}
2377
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002378static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2379 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2380{
2381 int rc;
2382 unsigned char *buf;
2383 u8 ioaccel_status;
2384
2385 this_device->offload_config = 0;
2386 this_device->offload_enabled = 0;
2387
2388 buf = kzalloc(64, GFP_KERNEL);
2389 if (!buf)
2390 return;
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06002391 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2392 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002393 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002394 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002395 if (rc != 0)
2396 goto out;
2397
2398#define IOACCEL_STATUS_BYTE 4
2399#define OFFLOAD_CONFIGURED_BIT 0x01
2400#define OFFLOAD_ENABLED_BIT 0x02
2401 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2402 this_device->offload_config =
2403 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2404 if (this_device->offload_config) {
2405 this_device->offload_enabled =
2406 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2407 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2408 this_device->offload_enabled = 0;
2409 }
2410out:
2411 kfree(buf);
2412 return;
2413}
2414
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002415/* Get the device id from inquiry page 0x83 */
2416static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2417 unsigned char *device_id, int buflen)
2418{
2419 int rc;
2420 unsigned char *buf;
2421
2422 if (buflen > 16)
2423 buflen = 16;
2424 buf = kzalloc(64, GFP_KERNEL);
2425 if (!buf)
Stephen M. Camerona84d7942014-05-29 10:54:20 -05002426 return -ENOMEM;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002427 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002428 if (rc == 0)
2429 memcpy(device_id, &buf[8], buflen);
2430 kfree(buf);
2431 return rc != 0;
2432}
2433
2434static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2435 struct ReportLUNdata *buf, int bufsize,
2436 int extended_response)
2437{
2438 int rc = IO_OK;
2439 struct CommandList *c;
2440 unsigned char scsi3addr[8];
2441 struct ErrorInfo *ei;
2442
2443 c = cmd_special_alloc(h);
2444 if (c == NULL) { /* trouble... */
2445 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2446 return -1;
2447 }
Stephen M. Camerone89c0ae2010-02-04 08:42:04 -06002448 /* address the controller */
2449 memset(scsi3addr, 0, sizeof(scsi3addr));
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002450 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2451 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2452 rc = -1;
2453 goto out;
2454 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002455 if (extended_response)
2456 c->Request.CDB[1] = extended_response;
2457 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2458 ei = c->err_info;
2459 if (ei->CommandStatus != 0 &&
2460 ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002461 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002462 rc = -1;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002463 } else {
2464 if (buf->extended_response_flag != extended_response) {
2465 dev_err(&h->pdev->dev,
2466 "report luns requested format %u, got %u\n",
2467 extended_response,
2468 buf->extended_response_flag);
2469 rc = -1;
2470 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002471 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002472out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002473 cmd_special_free(h, c);
2474 return rc;
2475}
2476
2477static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2478 struct ReportLUNdata *buf,
2479 int bufsize, int extended_response)
2480{
2481 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
2482}
2483
2484static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2485 struct ReportLUNdata *buf, int bufsize)
2486{
2487 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2488}
2489
2490static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2491 int bus, int target, int lun)
2492{
2493 device->bus = bus;
2494 device->target = target;
2495 device->lun = lun;
2496}
2497
Stephen M. Cameron98465902014-02-21 16:25:00 -06002498/* Use VPD inquiry to get details of volume status */
2499static int hpsa_get_volume_status(struct ctlr_info *h,
2500 unsigned char scsi3addr[])
2501{
2502 int rc;
2503 int status;
2504 int size;
2505 unsigned char *buf;
2506
2507 buf = kzalloc(64, GFP_KERNEL);
2508 if (!buf)
2509 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2510
2511 /* Does controller have VPD for logical volume status? */
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05002512 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
Stephen M. Cameron98465902014-02-21 16:25:00 -06002513 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06002514
2515 /* Get the size of the VPD return buffer */
2516 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2517 buf, HPSA_VPD_HEADER_SZ);
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05002518 if (rc != 0)
Stephen M. Cameron98465902014-02-21 16:25:00 -06002519 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06002520 size = buf[3];
2521
2522 /* Now get the whole VPD buffer */
2523 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2524 buf, size + HPSA_VPD_HEADER_SZ);
Stephen M. Cameron24a4b072014-05-29 10:54:10 -05002525 if (rc != 0)
Stephen M. Cameron98465902014-02-21 16:25:00 -06002526 goto exit_failed;
Stephen M. Cameron98465902014-02-21 16:25:00 -06002527 status = buf[4]; /* status byte */
2528
2529 kfree(buf);
2530 return status;
2531exit_failed:
2532 kfree(buf);
2533 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2534}
2535
2536/* Determine offline status of a volume.
2537 * Return either:
2538 * 0 (not offline)
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05002539 * 0xff (offline for unknown reasons)
Stephen M. Cameron98465902014-02-21 16:25:00 -06002540 * # (integer code indicating one of several NOT READY states
2541 * describing why a volume is to be kept offline)
2542 */
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05002543static int hpsa_volume_offline(struct ctlr_info *h,
Stephen M. Cameron98465902014-02-21 16:25:00 -06002544 unsigned char scsi3addr[])
2545{
2546 struct CommandList *c;
2547 unsigned char *sense, sense_key, asc, ascq;
2548 int ldstat = 0;
2549 u16 cmd_status;
2550 u8 scsi_status;
2551#define ASC_LUN_NOT_READY 0x04
2552#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2553#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2554
2555 c = cmd_alloc(h);
2556 if (!c)
2557 return 0;
2558 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2559 hpsa_scsi_do_simple_cmd_core(h, c);
2560 sense = c->err_info->SenseInfo;
2561 sense_key = sense[2];
2562 asc = sense[12];
2563 ascq = sense[13];
2564 cmd_status = c->err_info->CommandStatus;
2565 scsi_status = c->err_info->ScsiStatus;
2566 cmd_free(h, c);
2567 /* Is the volume 'not ready'? */
2568 if (cmd_status != CMD_TARGET_STATUS ||
2569 scsi_status != SAM_STAT_CHECK_CONDITION ||
2570 sense_key != NOT_READY ||
2571 asc != ASC_LUN_NOT_READY) {
2572 return 0;
2573 }
2574
2575 /* Determine the reason for not ready state */
2576 ldstat = hpsa_get_volume_status(h, scsi3addr);
2577
2578 /* Keep volume offline in certain cases: */
2579 switch (ldstat) {
2580 case HPSA_LV_UNDERGOING_ERASE:
2581 case HPSA_LV_UNDERGOING_RPI:
2582 case HPSA_LV_PENDING_RPI:
2583 case HPSA_LV_ENCRYPTED_NO_KEY:
2584 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2585 case HPSA_LV_UNDERGOING_ENCRYPTION:
2586 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2587 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2588 return ldstat;
2589 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2590 /* If VPD status page isn't available,
2591 * use ASC/ASCQ to determine state
2592 */
2593 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2594 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2595 return ldstat;
2596 break;
2597 default:
2598 break;
2599 }
2600 return 0;
2601}
2602
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002603static int hpsa_update_device_info(struct ctlr_info *h,
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002604 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2605 unsigned char *is_OBDR_device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002606{
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002607
2608#define OBDR_SIG_OFFSET 43
2609#define OBDR_TAPE_SIG "$DR-10"
2610#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2611#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2612
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06002613 unsigned char *inq_buff;
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002614 unsigned char *obdr_sig;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002615
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06002616 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002617 if (!inq_buff)
2618 goto bail_out;
2619
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002620 /* Do an inquiry to the device to see what it is. */
2621 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2622 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2623 /* Inquiry failed (msg printed already) */
2624 dev_err(&h->pdev->dev,
2625 "hpsa_update_device_info: inquiry failed\n");
2626 goto bail_out;
2627 }
2628
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002629 this_device->devtype = (inq_buff[0] & 0x1f);
2630 memcpy(this_device->scsi3addr, scsi3addr, 8);
2631 memcpy(this_device->vendor, &inq_buff[8],
2632 sizeof(this_device->vendor));
2633 memcpy(this_device->model, &inq_buff[16],
2634 sizeof(this_device->model));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002635 memset(this_device->device_id, 0,
2636 sizeof(this_device->device_id));
2637 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2638 sizeof(this_device->device_id));
2639
2640 if (this_device->devtype == TYPE_DISK &&
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002641 is_logical_dev_addr_mode(scsi3addr)) {
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05002642 int volume_offline;
2643
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002644 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002645 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2646 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
Stephen M. Cameron67955ba2014-05-29 10:54:25 -05002647 volume_offline = hpsa_volume_offline(h, scsi3addr);
2648 if (volume_offline < 0 || volume_offline > 0xff)
2649 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2650 this_device->volume_offline = volume_offline & 0xff;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002651 } else {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002652 this_device->raid_level = RAID_UNKNOWN;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002653 this_device->offload_config = 0;
2654 this_device->offload_enabled = 0;
Stephen M. Cameron98465902014-02-21 16:25:00 -06002655 this_device->volume_offline = 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002656 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002657
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002658 if (is_OBDR_device) {
2659 /* See if this is a One-Button-Disaster-Recovery device
2660 * by looking for "$DR-10" at offset 43 in inquiry data.
2661 */
2662 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2663 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2664 strncmp(obdr_sig, OBDR_TAPE_SIG,
2665 OBDR_SIG_LEN) == 0);
2666 }
2667
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002668 kfree(inq_buff);
2669 return 0;
2670
2671bail_out:
2672 kfree(inq_buff);
2673 return 1;
2674}
2675
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002676static unsigned char *ext_target_model[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002677 "MSA2012",
2678 "MSA2024",
2679 "MSA2312",
2680 "MSA2324",
Stephen M. Cameronfda38512011-05-03 15:00:07 -05002681 "P2000 G3 SAS",
Stephen M. Camerone06c8e52013-09-23 13:33:56 -05002682 "MSA 2040 SAS",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002683 NULL,
2684};
2685
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002686static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002687{
2688 int i;
2689
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002690 for (i = 0; ext_target_model[i]; i++)
2691 if (strncmp(device->model, ext_target_model[i],
2692 strlen(ext_target_model[i])) == 0)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002693 return 1;
2694 return 0;
2695}
2696
2697/* Helper function to assign bus, target, lun mapping of devices.
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002698 * Puts non-external target logical volumes on bus 0, external target logical
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002699 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2700 * Logical drive target and lun are assigned at this time, but
2701 * physical device lun and target assignment are deferred (assigned
2702 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2703 */
2704static void figure_bus_target_lun(struct ctlr_info *h,
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002705 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002706{
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002707 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002708
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002709 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
2710 /* physical device, target and lun filled in later */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002711 if (is_hba_lunid(lunaddrbytes))
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002712 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002713 else
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002714 /* defer target, lun assignment for physical devices */
2715 hpsa_set_bus_target_lun(device, 2, -1, -1);
2716 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002717 }
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002718 /* It's a logical device */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002719 if (is_ext_target(h, device)) {
2720 /* external target way, put logicals on bus 1
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002721 * and match target/lun numbers box
2722 * reports, other smart array, bus 0, target 0, match lunid
2723 */
2724 hpsa_set_bus_target_lun(device,
2725 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
2726 return;
2727 }
2728 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002729}
2730
2731/*
2732 * If there is no lun 0 on a target, linux won't find any devices.
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002733 * For the external targets (arrays), we have to manually detect the enclosure
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002734 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2735 * it for some reason. *tmpdevice is the target we're adding,
2736 * this_device is a pointer into the current element of currentsd[]
2737 * that we're building up in update_scsi_devices(), below.
2738 * lunzerobits is a bitmap that tracks which targets already have a
2739 * lun 0 assigned.
2740 * Returns 1 if an enclosure was added, 0 if not.
2741 */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002742static int add_ext_target_dev(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002743 struct hpsa_scsi_dev_t *tmpdevice,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002744 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002745 unsigned long lunzerobits[], int *n_ext_target_devs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002746{
2747 unsigned char scsi3addr[8];
2748
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002749 if (test_bit(tmpdevice->target, lunzerobits))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002750 return 0; /* There is already a lun 0 on this target. */
2751
2752 if (!is_logical_dev_addr_mode(lunaddrbytes))
2753 return 0; /* It's the logical targets that may lack lun 0. */
2754
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002755 if (!is_ext_target(h, tmpdevice))
2756 return 0; /* Only external target devices have this problem. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002757
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002758 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002759 return 0;
2760
Stephen M. Cameronc4f8a292011-01-07 10:55:43 -06002761 memset(scsi3addr, 0, 8);
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002762 scsi3addr[3] = tmpdevice->target;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002763 if (is_hba_lunid(scsi3addr))
2764 return 0; /* Don't add the RAID controller here. */
2765
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002766 if (is_scsi_rev_5(h))
2767 return 0; /* p1210m doesn't need to do this. */
2768
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002769 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
Scott Teelaca4a522012-01-19 14:01:19 -06002770 dev_warn(&h->pdev->dev, "Maximum number of external "
2771 "target devices exceeded. Check your hardware "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002772 "configuration.");
2773 return 0;
2774 }
2775
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002776 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002777 return 0;
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002778 (*n_ext_target_devs)++;
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002779 hpsa_set_bus_target_lun(this_device,
2780 tmpdevice->bus, tmpdevice->target, 0);
2781 set_bit(tmpdevice->target, lunzerobits);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002782 return 1;
2783}
2784
2785/*
Scott Teel54b6e9e2014-02-18 13:56:45 -06002786 * Get address of physical disk used for an ioaccel2 mode command:
2787 * 1. Extract ioaccel2 handle from the command.
2788 * 2. Find a matching ioaccel2 handle from list of physical disks.
2789 * 3. Return:
2790 * 1 and set scsi3addr to address of matching physical
2791 * 0 if no matching physical disk was found.
2792 */
2793static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2794 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
2795{
2796 struct ReportExtendedLUNdata *physicals = NULL;
2797 int responsesize = 24; /* size of physical extended response */
2798 int extended = 2; /* flag forces reporting 'other dev info'. */
2799 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
2800 u32 nphysicals = 0; /* number of reported physical devs */
2801 int found = 0; /* found match (1) or not (0) */
2802 u32 find; /* handle we need to match */
2803 int i;
2804 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
2805 struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2806 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2807 u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2808 u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2809
2810 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2811 return 0; /* no match */
2812
2813 /* point to the ioaccel2 device handle */
2814 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2815 if (c2a == NULL)
2816 return 0; /* no match */
2817
2818 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
2819 if (scmd == NULL)
2820 return 0; /* no match */
2821
2822 d = scmd->device->hostdata;
2823 if (d == NULL)
2824 return 0; /* no match */
2825
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06002826 it_nexus = cpu_to_le32(d->ioaccel_handle);
2827 scsi_nexus = cpu_to_le32(c2a->scsi_nexus);
Scott Teel54b6e9e2014-02-18 13:56:45 -06002828 find = c2a->scsi_nexus;
2829
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06002830 if (h->raid_offload_debug > 0)
2831 dev_info(&h->pdev->dev,
2832 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2833 __func__, scsi_nexus,
2834 d->device_id[0], d->device_id[1], d->device_id[2],
2835 d->device_id[3], d->device_id[4], d->device_id[5],
2836 d->device_id[6], d->device_id[7], d->device_id[8],
2837 d->device_id[9], d->device_id[10], d->device_id[11],
2838 d->device_id[12], d->device_id[13], d->device_id[14],
2839 d->device_id[15]);
2840
Scott Teel54b6e9e2014-02-18 13:56:45 -06002841 /* Get the list of physical devices */
2842 physicals = kzalloc(reportsize, GFP_KERNEL);
Joe Handzik3b51a7a2014-03-26 17:48:11 -05002843 if (physicals == NULL)
2844 return 0;
Scott Teel54b6e9e2014-02-18 13:56:45 -06002845 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
2846 reportsize, extended)) {
2847 dev_err(&h->pdev->dev,
2848 "Can't lookup %s device handle: report physical LUNs failed.\n",
2849 "HP SSD Smart Path");
2850 kfree(physicals);
2851 return 0;
2852 }
2853 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2854 responsesize;
2855
Scott Teel54b6e9e2014-02-18 13:56:45 -06002856 /* find ioaccel2 handle in list of physicals: */
2857 for (i = 0; i < nphysicals; i++) {
Stephen M. Camerond5b5d962014-05-29 10:53:34 -05002858 struct ext_report_lun_entry *entry = &physicals->LUN[i];
2859
Scott Teel54b6e9e2014-02-18 13:56:45 -06002860 /* handle is in bytes 28-31 of each lun */
Stephen M. Camerond5b5d962014-05-29 10:53:34 -05002861 if (entry->ioaccel_handle != find)
Scott Teel54b6e9e2014-02-18 13:56:45 -06002862 continue; /* didn't match */
Scott Teel54b6e9e2014-02-18 13:56:45 -06002863 found = 1;
Stephen M. Camerond5b5d962014-05-29 10:53:34 -05002864 memcpy(scsi3addr, entry->lunid, 8);
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06002865 if (h->raid_offload_debug > 0)
2866 dev_info(&h->pdev->dev,
Stephen M. Camerond5b5d962014-05-29 10:53:34 -05002867 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06002868 __func__, find,
Stephen M. Camerond5b5d962014-05-29 10:53:34 -05002869 entry->ioaccel_handle, scsi3addr);
Scott Teel54b6e9e2014-02-18 13:56:45 -06002870 break; /* found it */
2871 }
2872
2873 kfree(physicals);
2874 if (found)
2875 return 1;
2876 else
2877 return 0;
2878
2879}
2880/*
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002881 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
2882 * logdev. The number of luns in physdev and logdev are returned in
2883 * *nphysicals and *nlogicals, respectively.
2884 * Returns 0 on success, -1 otherwise.
2885 */
2886static int hpsa_gather_lun_info(struct ctlr_info *h,
Stephen M. Cameron92084712014-11-14 17:26:54 -06002887 int reportphyslunsize, int reportloglunsize,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002888 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002889 struct ReportLUNdata *logdev, u32 *nlogicals)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002890{
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002891 int physical_entry_size = 8;
2892
2893 *physical_mode = 0;
2894
2895 /* For I/O accelerator mode we need to read physical device handles */
Mike MIller317d4ad2014-02-18 13:56:20 -06002896 if (h->transMethod & CFGTBL_Trans_io_accel1 ||
2897 h->transMethod & CFGTBL_Trans_io_accel2) {
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002898 *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
2899 physical_entry_size = 24;
2900 }
Stephen M. Cameron92084712014-11-14 17:26:54 -06002901 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportphyslunsize,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002902 *physical_mode)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002903 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2904 return -1;
2905 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002906 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
2907 physical_entry_size;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002908 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
2909 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
2910 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2911 *nphysicals - HPSA_MAX_PHYS_LUN);
2912 *nphysicals = HPSA_MAX_PHYS_LUN;
2913 }
Stephen M. Cameron92084712014-11-14 17:26:54 -06002914 if (hpsa_scsi_do_report_log_luns(h, logdev, reportloglunsize)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002915 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2916 return -1;
2917 }
Stephen M. Cameron6df1e952010-02-04 08:42:19 -06002918 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002919 /* Reject Logicals in excess of our max capability. */
2920 if (*nlogicals > HPSA_MAX_LUN) {
2921 dev_warn(&h->pdev->dev,
2922 "maximum logical LUNs (%d) exceeded. "
2923 "%d LUNs ignored.\n", HPSA_MAX_LUN,
2924 *nlogicals - HPSA_MAX_LUN);
2925 *nlogicals = HPSA_MAX_LUN;
2926 }
2927 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
2928 dev_warn(&h->pdev->dev,
2929 "maximum logical + physical LUNs (%d) exceeded. "
2930 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2931 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
2932 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
2933 }
2934 return 0;
2935}
2936
Don Brace42a91642014-11-14 17:26:27 -06002937static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
2938 int i, int nphysicals, int nlogicals,
Matt Gatesa93aa1f2014-02-18 13:55:07 -06002939 struct ReportExtendedLUNdata *physdev_list,
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002940 struct ReportLUNdata *logdev_list)
2941{
2942 /* Helper function, figure out where the LUN ID info is coming from
2943 * given index i, lists of physical and logical devices, where in
2944 * the list the raid controller is supposed to appear (first or last)
2945 */
2946
2947 int logicals_start = nphysicals + (raid_ctlr_position == 0);
2948 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
2949
2950 if (i == raid_ctlr_position)
2951 return RAID_CTLR_LUNID;
2952
2953 if (i < logicals_start)
Stephen M. Camerond5b5d962014-05-29 10:53:34 -05002954 return &physdev_list->LUN[i -
2955 (raid_ctlr_position == 0)].lunid[0];
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002956
2957 if (i < last_device)
2958 return &logdev_list->LUN[i - nphysicals -
2959 (raid_ctlr_position == 0)][0];
2960 BUG();
2961 return NULL;
2962}
2963
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002964static int hpsa_hba_mode_enabled(struct ctlr_info *h)
2965{
2966 int rc;
Joe Handzik6e8e8082014-05-15 15:44:42 -05002967 int hba_mode_enabled;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002968 struct bmic_controller_parameters *ctlr_params;
2969 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
2970 GFP_KERNEL);
2971
2972 if (!ctlr_params)
Joe Handzik96444fb2014-05-15 15:44:47 -05002973 return -ENOMEM;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002974 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
2975 sizeof(struct bmic_controller_parameters));
Joe Handzik96444fb2014-05-15 15:44:47 -05002976 if (rc) {
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002977 kfree(ctlr_params);
Joe Handzik96444fb2014-05-15 15:44:47 -05002978 return rc;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002979 }
Joe Handzik6e8e8082014-05-15 15:44:42 -05002980
2981 hba_mode_enabled =
2982 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
2983 kfree(ctlr_params);
2984 return hba_mode_enabled;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06002985}
2986
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002987static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
2988{
2989 /* the idea here is we could get notified
2990 * that some devices have changed, so we do a report
2991 * physical luns and report logical luns cmd, and adjust
2992 * our list of devices accordingly.
2993 *
2994 * The scsi3addr's of devices won't change so long as the
2995 * adapter is not reset. That means we can rescan and
2996 * tell which devices we already know about, vs. new
2997 * devices, vs. disappearing devices.
2998 */
Matt Gatesa93aa1f2014-02-18 13:55:07 -06002999 struct ReportExtendedLUNdata *physdev_list = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003000 struct ReportLUNdata *logdev_list = NULL;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003001 u32 nphysicals = 0;
3002 u32 nlogicals = 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003003 int physical_mode = 0;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003004 u32 ndev_allocated = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003005 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3006 int ncurrent = 0;
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003007 int i, n_ext_target_devs, ndevs_to_allocate;
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003008 int raid_ctlr_position;
Joe Handzik2bbf5c72014-05-21 11:16:01 -05003009 int rescan_hba_mode;
Scott Teelaca4a522012-01-19 14:01:19 -06003010 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003011
Scott Teelcfe5bad2011-10-26 16:21:07 -05003012 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameron92084712014-11-14 17:26:54 -06003013 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3014 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003015 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3016
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003017 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003018 dev_err(&h->pdev->dev, "out of memory\n");
3019 goto out;
3020 }
3021 memset(lunzerobits, 0, sizeof(lunzerobits));
3022
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003023 rescan_hba_mode = hpsa_hba_mode_enabled(h);
Joe Handzik96444fb2014-05-15 15:44:47 -05003024 if (rescan_hba_mode < 0)
3025 goto out;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003026
3027 if (!h->hba_mode_enabled && rescan_hba_mode)
3028 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3029 else if (h->hba_mode_enabled && !rescan_hba_mode)
3030 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3031
3032 h->hba_mode_enabled = rescan_hba_mode;
3033
Stephen M. Cameron92084712014-11-14 17:26:54 -06003034 if (hpsa_gather_lun_info(h,
3035 sizeof(*physdev_list), sizeof(*logdev_list),
Matt Gatesa93aa1f2014-02-18 13:55:07 -06003036 (struct ReportLUNdata *) physdev_list, &nphysicals,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003037 &physical_mode, logdev_list, &nlogicals))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003038 goto out;
3039
Scott Teelaca4a522012-01-19 14:01:19 -06003040 /* We might see up to the maximum number of logical and physical disks
3041 * plus external target devices, and a device for the local RAID
3042 * controller.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003043 */
Scott Teelaca4a522012-01-19 14:01:19 -06003044 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003045
3046 /* Allocate the per device structures */
3047 for (i = 0; i < ndevs_to_allocate; i++) {
Scott Teelb7ec0212011-10-26 16:21:12 -05003048 if (i >= HPSA_MAX_DEVICES) {
3049 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3050 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3051 ndevs_to_allocate - HPSA_MAX_DEVICES);
3052 break;
3053 }
3054
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003055 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3056 if (!currentsd[i]) {
3057 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3058 __FILE__, __LINE__);
3059 goto out;
3060 }
3061 ndev_allocated++;
3062 }
3063
Stephen M. Cameron86452912014-05-29 10:53:49 -05003064 if (is_scsi_rev_5(h))
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003065 raid_ctlr_position = 0;
3066 else
3067 raid_ctlr_position = nphysicals + nlogicals;
3068
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003069 /* adjust our table of devices */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003070 n_ext_target_devs = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003071 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003072 u8 *lunaddrbytes, is_OBDR = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003073
3074 /* Figure out where the LUN ID info is coming from */
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003075 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3076 i, nphysicals, nlogicals, physdev_list, logdev_list);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003077 /* skip masked physical devices. */
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003078 if (lunaddrbytes[3] & 0xC0 &&
3079 i < nphysicals + (raid_ctlr_position == 0))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003080 continue;
3081
3082 /* Get device type, vendor, model, device id */
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003083 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3084 &is_OBDR))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003085 continue; /* skip it if we can't talk to it. */
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003086 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003087 this_device = currentsd[ncurrent];
3088
3089 /*
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003090 * For external target devices, we have to insert a LUN 0 which
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003091 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3092 * is nonetheless an enclosure device there. We have to
3093 * present that otherwise linux won't find anything if
3094 * there is no lun 0.
3095 */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003096 if (add_ext_target_dev(h, tmpdevice, this_device,
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003097 lunaddrbytes, lunzerobits,
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003098 &n_ext_target_devs)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003099 ncurrent++;
3100 this_device = currentsd[ncurrent];
3101 }
3102
3103 *this_device = *tmpdevice;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003104
3105 switch (this_device->devtype) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003106 case TYPE_ROM:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003107 /* We don't *really* support actual CD-ROM devices,
3108 * just "One Button Disaster Recovery" tape drive
3109 * which temporarily pretends to be a CD-ROM drive.
3110 * So we check that the device is really an OBDR tape
3111 * device by checking for "$DR-10" in bytes 43-48 of
3112 * the inquiry data.
3113 */
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003114 if (is_OBDR)
3115 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003116 break;
3117 case TYPE_DISK:
Stephen M. Cameron316b2212014-02-21 16:25:15 -06003118 if (h->hba_mode_enabled) {
3119 /* never use raid mapper in HBA mode */
3120 this_device->offload_enabled = 0;
3121 ncurrent++;
3122 break;
3123 } else if (h->acciopath_status) {
3124 if (i >= nphysicals) {
3125 ncurrent++;
3126 break;
3127 }
3128 } else {
3129 if (i < nphysicals)
3130 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003131 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003132 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003133 }
3134 if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
3135 memcpy(&this_device->ioaccel_handle,
3136 &lunaddrbytes[20],
3137 sizeof(this_device->ioaccel_handle));
3138 ncurrent++;
3139 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003140 break;
3141 case TYPE_TAPE:
3142 case TYPE_MEDIUM_CHANGER:
3143 ncurrent++;
3144 break;
3145 case TYPE_RAID:
3146 /* Only present the Smartarray HBA as a RAID controller.
3147 * If it's a RAID controller other than the HBA itself
3148 * (an external RAID controller, MSA500 or similar)
3149 * don't present it.
3150 */
3151 if (!is_hba_lunid(lunaddrbytes))
3152 break;
3153 ncurrent++;
3154 break;
3155 default:
3156 break;
3157 }
Scott Teelcfe5bad2011-10-26 16:21:07 -05003158 if (ncurrent >= HPSA_MAX_DEVICES)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003159 break;
3160 }
3161 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3162out:
3163 kfree(tmpdevice);
3164 for (i = 0; i < ndev_allocated; i++)
3165 kfree(currentsd[i]);
3166 kfree(currentsd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003167 kfree(physdev_list);
3168 kfree(logdev_list);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003169}
3170
3171/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3172 * dma mapping and fills in the scatter gather entries of the
3173 * hpsa command, cp.
3174 */
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003175static int hpsa_scatter_gather(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003176 struct CommandList *cp,
3177 struct scsi_cmnd *cmd)
3178{
3179 unsigned int len;
3180 struct scatterlist *sg;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003181 u64 addr64;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003182 int use_sg, i, sg_index, chained;
3183 struct SGDescriptor *curr_sg;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003184
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003185 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003186
3187 use_sg = scsi_dma_map(cmd);
3188 if (use_sg < 0)
3189 return use_sg;
3190
3191 if (!use_sg)
3192 goto sglist_finished;
3193
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003194 curr_sg = cp->SG;
3195 chained = 0;
3196 sg_index = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003197 scsi_for_each_sg(cmd, sg, use_sg, i) {
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003198 if (i == h->max_cmd_sg_entries - 1 &&
3199 use_sg > h->max_cmd_sg_entries) {
3200 chained = 1;
3201 curr_sg = h->cmd_sg_list[cp->cmdindex];
3202 sg_index = 0;
3203 }
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003204 addr64 = (u64) sg_dma_address(sg);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003205 len = sg_dma_len(sg);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003206 curr_sg->Addr = cpu_to_le64(addr64);
3207 curr_sg->Len = cpu_to_le32(len);
3208 curr_sg->Ext = cpu_to_le32(0);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003209 curr_sg++;
3210 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003211 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003212
3213 if (use_sg + chained > h->maxSG)
3214 h->maxSG = use_sg + chained;
3215
3216 if (chained) {
3217 cp->Header.SGList = h->max_cmd_sg_entries;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003218 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06003219 if (hpsa_map_sg_chain_block(h, cp)) {
3220 scsi_dma_unmap(cmd);
3221 return -1;
3222 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003223 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003224 }
3225
3226sglist_finished:
3227
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003228 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003229 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in this cmd list */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003230 return 0;
3231}
3232
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003233#define IO_ACCEL_INELIGIBLE (1)
3234static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3235{
3236 int is_write = 0;
3237 u32 block;
3238 u32 block_cnt;
3239
3240 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3241 switch (cdb[0]) {
3242 case WRITE_6:
3243 case WRITE_12:
3244 is_write = 1;
3245 case READ_6:
3246 case READ_12:
3247 if (*cdb_len == 6) {
3248 block = (((u32) cdb[2]) << 8) | cdb[3];
3249 block_cnt = cdb[4];
3250 } else {
3251 BUG_ON(*cdb_len != 12);
3252 block = (((u32) cdb[2]) << 24) |
3253 (((u32) cdb[3]) << 16) |
3254 (((u32) cdb[4]) << 8) |
3255 cdb[5];
3256 block_cnt =
3257 (((u32) cdb[6]) << 24) |
3258 (((u32) cdb[7]) << 16) |
3259 (((u32) cdb[8]) << 8) |
3260 cdb[9];
3261 }
3262 if (block_cnt > 0xffff)
3263 return IO_ACCEL_INELIGIBLE;
3264
3265 cdb[0] = is_write ? WRITE_10 : READ_10;
3266 cdb[1] = 0;
3267 cdb[2] = (u8) (block >> 24);
3268 cdb[3] = (u8) (block >> 16);
3269 cdb[4] = (u8) (block >> 8);
3270 cdb[5] = (u8) (block);
3271 cdb[6] = 0;
3272 cdb[7] = (u8) (block_cnt >> 8);
3273 cdb[8] = (u8) (block_cnt);
3274 cdb[9] = 0;
3275 *cdb_len = 10;
3276 break;
3277 }
3278 return 0;
3279}
3280
Scott Teelc3497752014-02-18 13:56:34 -06003281static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003282 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3283 u8 *scsi3addr)
Matt Gatese1f7de02014-02-18 13:55:17 -06003284{
3285 struct scsi_cmnd *cmd = c->scsi_cmd;
Matt Gatese1f7de02014-02-18 13:55:17 -06003286 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3287 unsigned int len;
3288 unsigned int total_len = 0;
3289 struct scatterlist *sg;
3290 u64 addr64;
3291 int use_sg, i;
3292 struct SGDescriptor *curr_sg;
3293 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3294
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003295 /* TODO: implement chaining support */
3296 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3297 return IO_ACCEL_INELIGIBLE;
3298
Matt Gatese1f7de02014-02-18 13:55:17 -06003299 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3300
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003301 if (fixup_ioaccel_cdb(cdb, &cdb_len))
3302 return IO_ACCEL_INELIGIBLE;
3303
Matt Gatese1f7de02014-02-18 13:55:17 -06003304 c->cmd_type = CMD_IOACCEL1;
3305
3306 /* Adjust the DMA address to point to the accelerated command buffer */
3307 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3308 (c->cmdindex * sizeof(*cp));
3309 BUG_ON(c->busaddr & 0x0000007F);
3310
3311 use_sg = scsi_dma_map(cmd);
3312 if (use_sg < 0)
3313 return use_sg;
3314
3315 if (use_sg) {
3316 curr_sg = cp->SG;
3317 scsi_for_each_sg(cmd, sg, use_sg, i) {
3318 addr64 = (u64) sg_dma_address(sg);
3319 len = sg_dma_len(sg);
3320 total_len += len;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003321 curr_sg->Addr = cpu_to_le64(addr64);
3322 curr_sg->Len = cpu_to_le32(len);
3323 curr_sg->Ext = cpu_to_le32(0);
Matt Gatese1f7de02014-02-18 13:55:17 -06003324 curr_sg++;
3325 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003326 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
Matt Gatese1f7de02014-02-18 13:55:17 -06003327
3328 switch (cmd->sc_data_direction) {
3329 case DMA_TO_DEVICE:
3330 control |= IOACCEL1_CONTROL_DATA_OUT;
3331 break;
3332 case DMA_FROM_DEVICE:
3333 control |= IOACCEL1_CONTROL_DATA_IN;
3334 break;
3335 case DMA_NONE:
3336 control |= IOACCEL1_CONTROL_NODATAXFER;
3337 break;
3338 default:
3339 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3340 cmd->sc_data_direction);
3341 BUG();
3342 break;
3343 }
3344 } else {
3345 control |= IOACCEL1_CONTROL_NODATAXFER;
3346 }
3347
Scott Teelc3497752014-02-18 13:56:34 -06003348 c->Header.SGList = use_sg;
Matt Gatese1f7de02014-02-18 13:55:17 -06003349 /* Fill out the command structure to submit */
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003350 cp->dev_handle = ioaccel_handle & 0xFFFF;
Matt Gatese1f7de02014-02-18 13:55:17 -06003351 cp->transfer_len = total_len;
3352 cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003353 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
Matt Gatese1f7de02014-02-18 13:55:17 -06003354 cp->control = control;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003355 memcpy(cp->CDB, cdb, cdb_len);
3356 memcpy(cp->CISS_LUN, scsi3addr, 8);
Scott Teelc3497752014-02-18 13:56:34 -06003357 /* Tag was already set at init time. */
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003358 enqueue_cmd_and_start_io(h, c);
Matt Gatese1f7de02014-02-18 13:55:17 -06003359 return 0;
3360}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003361
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003362/*
3363 * Queue a command directly to a device behind the controller using the
3364 * I/O accelerator path.
3365 */
3366static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3367 struct CommandList *c)
3368{
3369 struct scsi_cmnd *cmd = c->scsi_cmd;
3370 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3371
3372 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3373 cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
3374}
3375
Scott Teeldd0e19f2014-02-18 13:57:31 -06003376/*
3377 * Set encryption parameters for the ioaccel2 request
3378 */
3379static void set_encrypt_ioaccel2(struct ctlr_info *h,
3380 struct CommandList *c, struct io_accel2_cmd *cp)
3381{
3382 struct scsi_cmnd *cmd = c->scsi_cmd;
3383 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3384 struct raid_map_data *map = &dev->raid_map;
3385 u64 first_block;
3386
3387 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3388
3389 /* Are we doing encryption on this device */
3390 if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
3391 return;
3392 /* Set the data encryption key index. */
3393 cp->dekindex = map->dekindex;
3394
3395 /* Set the encryption enable flag, encoded into direction field. */
3396 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3397
3398 /* Set encryption tweak values based on logical block address
3399 * If block size is 512, tweak value is LBA.
3400 * For other block sizes, tweak is (LBA * block size)/ 512)
3401 */
3402 switch (cmd->cmnd[0]) {
3403 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3404 case WRITE_6:
3405 case READ_6:
3406 if (map->volume_blk_size == 512) {
3407 cp->tweak_lower =
3408 (((u32) cmd->cmnd[2]) << 8) |
3409 cmd->cmnd[3];
3410 cp->tweak_upper = 0;
3411 } else {
3412 first_block =
3413 (((u64) cmd->cmnd[2]) << 8) |
3414 cmd->cmnd[3];
3415 first_block = (first_block * map->volume_blk_size)/512;
3416 cp->tweak_lower = (u32)first_block;
3417 cp->tweak_upper = (u32)(first_block >> 32);
3418 }
3419 break;
3420 case WRITE_10:
3421 case READ_10:
3422 if (map->volume_blk_size == 512) {
3423 cp->tweak_lower =
3424 (((u32) cmd->cmnd[2]) << 24) |
3425 (((u32) cmd->cmnd[3]) << 16) |
3426 (((u32) cmd->cmnd[4]) << 8) |
3427 cmd->cmnd[5];
3428 cp->tweak_upper = 0;
3429 } else {
3430 first_block =
3431 (((u64) cmd->cmnd[2]) << 24) |
3432 (((u64) cmd->cmnd[3]) << 16) |
3433 (((u64) cmd->cmnd[4]) << 8) |
3434 cmd->cmnd[5];
3435 first_block = (first_block * map->volume_blk_size)/512;
3436 cp->tweak_lower = (u32)first_block;
3437 cp->tweak_upper = (u32)(first_block >> 32);
3438 }
3439 break;
3440 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3441 case WRITE_12:
3442 case READ_12:
3443 if (map->volume_blk_size == 512) {
3444 cp->tweak_lower =
3445 (((u32) cmd->cmnd[2]) << 24) |
3446 (((u32) cmd->cmnd[3]) << 16) |
3447 (((u32) cmd->cmnd[4]) << 8) |
3448 cmd->cmnd[5];
3449 cp->tweak_upper = 0;
3450 } else {
3451 first_block =
3452 (((u64) cmd->cmnd[2]) << 24) |
3453 (((u64) cmd->cmnd[3]) << 16) |
3454 (((u64) cmd->cmnd[4]) << 8) |
3455 cmd->cmnd[5];
3456 first_block = (first_block * map->volume_blk_size)/512;
3457 cp->tweak_lower = (u32)first_block;
3458 cp->tweak_upper = (u32)(first_block >> 32);
3459 }
3460 break;
3461 case WRITE_16:
3462 case READ_16:
3463 if (map->volume_blk_size == 512) {
3464 cp->tweak_lower =
3465 (((u32) cmd->cmnd[6]) << 24) |
3466 (((u32) cmd->cmnd[7]) << 16) |
3467 (((u32) cmd->cmnd[8]) << 8) |
3468 cmd->cmnd[9];
3469 cp->tweak_upper =
3470 (((u32) cmd->cmnd[2]) << 24) |
3471 (((u32) cmd->cmnd[3]) << 16) |
3472 (((u32) cmd->cmnd[4]) << 8) |
3473 cmd->cmnd[5];
3474 } else {
3475 first_block =
3476 (((u64) cmd->cmnd[2]) << 56) |
3477 (((u64) cmd->cmnd[3]) << 48) |
3478 (((u64) cmd->cmnd[4]) << 40) |
3479 (((u64) cmd->cmnd[5]) << 32) |
3480 (((u64) cmd->cmnd[6]) << 24) |
3481 (((u64) cmd->cmnd[7]) << 16) |
3482 (((u64) cmd->cmnd[8]) << 8) |
3483 cmd->cmnd[9];
3484 first_block = (first_block * map->volume_blk_size)/512;
3485 cp->tweak_lower = (u32)first_block;
3486 cp->tweak_upper = (u32)(first_block >> 32);
3487 }
3488 break;
3489 default:
3490 dev_err(&h->pdev->dev,
3491 "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
3492 __func__);
3493 BUG();
3494 break;
3495 }
3496}
3497
Scott Teelc3497752014-02-18 13:56:34 -06003498static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3499 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3500 u8 *scsi3addr)
3501{
3502 struct scsi_cmnd *cmd = c->scsi_cmd;
3503 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3504 struct ioaccel2_sg_element *curr_sg;
3505 int use_sg, i;
3506 struct scatterlist *sg;
3507 u64 addr64;
3508 u32 len;
3509 u32 total_len = 0;
3510
3511 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3512 return IO_ACCEL_INELIGIBLE;
3513
3514 if (fixup_ioaccel_cdb(cdb, &cdb_len))
3515 return IO_ACCEL_INELIGIBLE;
3516 c->cmd_type = CMD_IOACCEL2;
3517 /* Adjust the DMA address to point to the accelerated command buffer */
3518 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3519 (c->cmdindex * sizeof(*cp));
3520 BUG_ON(c->busaddr & 0x0000007F);
3521
3522 memset(cp, 0, sizeof(*cp));
3523 cp->IU_type = IOACCEL2_IU_TYPE;
3524
3525 use_sg = scsi_dma_map(cmd);
3526 if (use_sg < 0)
3527 return use_sg;
3528
3529 if (use_sg) {
3530 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3531 curr_sg = cp->sg;
3532 scsi_for_each_sg(cmd, sg, use_sg, i) {
3533 addr64 = (u64) sg_dma_address(sg);
3534 len = sg_dma_len(sg);
3535 total_len += len;
3536 curr_sg->address = cpu_to_le64(addr64);
3537 curr_sg->length = cpu_to_le32(len);
3538 curr_sg->reserved[0] = 0;
3539 curr_sg->reserved[1] = 0;
3540 curr_sg->reserved[2] = 0;
3541 curr_sg->chain_indicator = 0;
3542 curr_sg++;
3543 }
3544
3545 switch (cmd->sc_data_direction) {
3546 case DMA_TO_DEVICE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06003547 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3548 cp->direction |= IOACCEL2_DIR_DATA_OUT;
Scott Teelc3497752014-02-18 13:56:34 -06003549 break;
3550 case DMA_FROM_DEVICE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06003551 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3552 cp->direction |= IOACCEL2_DIR_DATA_IN;
Scott Teelc3497752014-02-18 13:56:34 -06003553 break;
3554 case DMA_NONE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06003555 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3556 cp->direction |= IOACCEL2_DIR_NO_DATA;
Scott Teelc3497752014-02-18 13:56:34 -06003557 break;
3558 default:
3559 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3560 cmd->sc_data_direction);
3561 BUG();
3562 break;
3563 }
3564 } else {
Scott Teeldd0e19f2014-02-18 13:57:31 -06003565 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3566 cp->direction |= IOACCEL2_DIR_NO_DATA;
Scott Teelc3497752014-02-18 13:56:34 -06003567 }
Scott Teeldd0e19f2014-02-18 13:57:31 -06003568
3569 /* Set encryption parameters, if necessary */
3570 set_encrypt_ioaccel2(h, c, cp);
3571
Scott Teelc3497752014-02-18 13:56:34 -06003572 cp->scsi_nexus = ioaccel_handle;
Scott Teeldd0e19f2014-02-18 13:57:31 -06003573 cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
Scott Teelc3497752014-02-18 13:56:34 -06003574 DIRECT_LOOKUP_BIT;
3575 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
Scott Teelc3497752014-02-18 13:56:34 -06003576
3577 /* fill in sg elements */
3578 cp->sg_count = (u8) use_sg;
3579
3580 cp->data_len = cpu_to_le32(total_len);
3581 cp->err_ptr = cpu_to_le64(c->busaddr +
3582 offsetof(struct io_accel2_cmd, error_data));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06003583 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
Scott Teelc3497752014-02-18 13:56:34 -06003584
3585 enqueue_cmd_and_start_io(h, c);
3586 return 0;
3587}
3588
3589/*
3590 * Queue a command to the correct I/O accelerator path.
3591 */
3592static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3593 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3594 u8 *scsi3addr)
3595{
3596 if (h->transMethod & CFGTBL_Trans_io_accel1)
3597 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3598 cdb, cdb_len, scsi3addr);
3599 else
3600 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3601 cdb, cdb_len, scsi3addr);
3602}
3603
Scott Teel6b80b182014-02-18 13:56:55 -06003604static void raid_map_helper(struct raid_map_data *map,
3605 int offload_to_mirror, u32 *map_index, u32 *current_group)
3606{
3607 if (offload_to_mirror == 0) {
3608 /* use physical disk in the first mirrored group. */
3609 *map_index %= map->data_disks_per_row;
3610 return;
3611 }
3612 do {
3613 /* determine mirror group that *map_index indicates */
3614 *current_group = *map_index / map->data_disks_per_row;
3615 if (offload_to_mirror == *current_group)
3616 continue;
3617 if (*current_group < (map->layout_map_count - 1)) {
3618 /* select map index from next group */
3619 *map_index += map->data_disks_per_row;
3620 (*current_group)++;
3621 } else {
3622 /* select map index from first group */
3623 *map_index %= map->data_disks_per_row;
3624 *current_group = 0;
3625 }
3626 } while (offload_to_mirror != *current_group);
3627}
3628
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003629/*
3630 * Attempt to perform offload RAID mapping for a logical volume I/O.
3631 */
3632static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3633 struct CommandList *c)
3634{
3635 struct scsi_cmnd *cmd = c->scsi_cmd;
3636 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3637 struct raid_map_data *map = &dev->raid_map;
3638 struct raid_map_disk_data *dd = &map->data[0];
3639 int is_write = 0;
3640 u32 map_index;
3641 u64 first_block, last_block;
3642 u32 block_cnt;
3643 u32 blocks_per_row;
3644 u64 first_row, last_row;
3645 u32 first_row_offset, last_row_offset;
3646 u32 first_column, last_column;
Scott Teel6b80b182014-02-18 13:56:55 -06003647 u64 r0_first_row, r0_last_row;
3648 u32 r5or6_blocks_per_row;
3649 u64 r5or6_first_row, r5or6_last_row;
3650 u32 r5or6_first_row_offset, r5or6_last_row_offset;
3651 u32 r5or6_first_column, r5or6_last_column;
3652 u32 total_disks_per_row;
3653 u32 stripesize;
3654 u32 first_group, last_group, current_group;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003655 u32 map_row;
3656 u32 disk_handle;
3657 u64 disk_block;
3658 u32 disk_block_cnt;
3659 u8 cdb[16];
3660 u8 cdb_len;
3661#if BITS_PER_LONG == 32
3662 u64 tmpdiv;
3663#endif
Scott Teel6b80b182014-02-18 13:56:55 -06003664 int offload_to_mirror;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003665
3666 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3667
3668 /* check for valid opcode, get LBA and block count */
3669 switch (cmd->cmnd[0]) {
3670 case WRITE_6:
3671 is_write = 1;
3672 case READ_6:
3673 first_block =
3674 (((u64) cmd->cmnd[2]) << 8) |
3675 cmd->cmnd[3];
3676 block_cnt = cmd->cmnd[4];
Stephen M. Cameron3fa89a02014-07-03 10:18:14 -05003677 if (block_cnt == 0)
3678 block_cnt = 256;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003679 break;
3680 case WRITE_10:
3681 is_write = 1;
3682 case READ_10:
3683 first_block =
3684 (((u64) cmd->cmnd[2]) << 24) |
3685 (((u64) cmd->cmnd[3]) << 16) |
3686 (((u64) cmd->cmnd[4]) << 8) |
3687 cmd->cmnd[5];
3688 block_cnt =
3689 (((u32) cmd->cmnd[7]) << 8) |
3690 cmd->cmnd[8];
3691 break;
3692 case WRITE_12:
3693 is_write = 1;
3694 case READ_12:
3695 first_block =
3696 (((u64) cmd->cmnd[2]) << 24) |
3697 (((u64) cmd->cmnd[3]) << 16) |
3698 (((u64) cmd->cmnd[4]) << 8) |
3699 cmd->cmnd[5];
3700 block_cnt =
3701 (((u32) cmd->cmnd[6]) << 24) |
3702 (((u32) cmd->cmnd[7]) << 16) |
3703 (((u32) cmd->cmnd[8]) << 8) |
3704 cmd->cmnd[9];
3705 break;
3706 case WRITE_16:
3707 is_write = 1;
3708 case READ_16:
3709 first_block =
3710 (((u64) cmd->cmnd[2]) << 56) |
3711 (((u64) cmd->cmnd[3]) << 48) |
3712 (((u64) cmd->cmnd[4]) << 40) |
3713 (((u64) cmd->cmnd[5]) << 32) |
3714 (((u64) cmd->cmnd[6]) << 24) |
3715 (((u64) cmd->cmnd[7]) << 16) |
3716 (((u64) cmd->cmnd[8]) << 8) |
3717 cmd->cmnd[9];
3718 block_cnt =
3719 (((u32) cmd->cmnd[10]) << 24) |
3720 (((u32) cmd->cmnd[11]) << 16) |
3721 (((u32) cmd->cmnd[12]) << 8) |
3722 cmd->cmnd[13];
3723 break;
3724 default:
3725 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
3726 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003727 last_block = first_block + block_cnt - 1;
3728
3729 /* check for write to non-RAID-0 */
3730 if (is_write && dev->raid_level != 0)
3731 return IO_ACCEL_INELIGIBLE;
3732
3733 /* check for invalid block or wraparound */
3734 if (last_block >= map->volume_blk_cnt || last_block < first_block)
3735 return IO_ACCEL_INELIGIBLE;
3736
3737 /* calculate stripe information for the request */
3738 blocks_per_row = map->data_disks_per_row * map->strip_size;
3739#if BITS_PER_LONG == 32
3740 tmpdiv = first_block;
3741 (void) do_div(tmpdiv, blocks_per_row);
3742 first_row = tmpdiv;
3743 tmpdiv = last_block;
3744 (void) do_div(tmpdiv, blocks_per_row);
3745 last_row = tmpdiv;
3746 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3747 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3748 tmpdiv = first_row_offset;
3749 (void) do_div(tmpdiv, map->strip_size);
3750 first_column = tmpdiv;
3751 tmpdiv = last_row_offset;
3752 (void) do_div(tmpdiv, map->strip_size);
3753 last_column = tmpdiv;
3754#else
3755 first_row = first_block / blocks_per_row;
3756 last_row = last_block / blocks_per_row;
3757 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3758 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3759 first_column = first_row_offset / map->strip_size;
3760 last_column = last_row_offset / map->strip_size;
3761#endif
3762
3763 /* if this isn't a single row/column then give to the controller */
3764 if ((first_row != last_row) || (first_column != last_column))
3765 return IO_ACCEL_INELIGIBLE;
3766
3767 /* proceeding with driver mapping */
Scott Teel6b80b182014-02-18 13:56:55 -06003768 total_disks_per_row = map->data_disks_per_row +
3769 map->metadata_disks_per_row;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003770 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3771 map->row_cnt;
Scott Teel6b80b182014-02-18 13:56:55 -06003772 map_index = (map_row * total_disks_per_row) + first_column;
3773
3774 switch (dev->raid_level) {
3775 case HPSA_RAID_0:
3776 break; /* nothing special to do */
3777 case HPSA_RAID_1:
3778 /* Handles load balance across RAID 1 members.
3779 * (2-drive R1 and R10 with even # of drives.)
3780 * Appropriate for SSDs, not optimal for HDDs
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003781 */
Scott Teel6b80b182014-02-18 13:56:55 -06003782 BUG_ON(map->layout_map_count != 2);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003783 if (dev->offload_to_mirror)
3784 map_index += map->data_disks_per_row;
3785 dev->offload_to_mirror = !dev->offload_to_mirror;
Scott Teel6b80b182014-02-18 13:56:55 -06003786 break;
3787 case HPSA_RAID_ADM:
3788 /* Handles N-way mirrors (R1-ADM)
3789 * and R10 with # of drives divisible by 3.)
3790 */
3791 BUG_ON(map->layout_map_count != 3);
3792
3793 offload_to_mirror = dev->offload_to_mirror;
3794 raid_map_helper(map, offload_to_mirror,
3795 &map_index, &current_group);
3796 /* set mirror group to use next time */
3797 offload_to_mirror =
3798 (offload_to_mirror >= map->layout_map_count - 1)
3799 ? 0 : offload_to_mirror + 1;
Scott Teel6b80b182014-02-18 13:56:55 -06003800 dev->offload_to_mirror = offload_to_mirror;
3801 /* Avoid direct use of dev->offload_to_mirror within this
3802 * function since multiple threads might simultaneously
3803 * increment it beyond the range of dev->layout_map_count -1.
3804 */
3805 break;
3806 case HPSA_RAID_5:
3807 case HPSA_RAID_6:
3808 if (map->layout_map_count <= 1)
3809 break;
3810
3811 /* Verify first and last block are in same RAID group */
3812 r5or6_blocks_per_row =
3813 map->strip_size * map->data_disks_per_row;
3814 BUG_ON(r5or6_blocks_per_row == 0);
3815 stripesize = r5or6_blocks_per_row * map->layout_map_count;
3816#if BITS_PER_LONG == 32
3817 tmpdiv = first_block;
3818 first_group = do_div(tmpdiv, stripesize);
3819 tmpdiv = first_group;
3820 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3821 first_group = tmpdiv;
3822 tmpdiv = last_block;
3823 last_group = do_div(tmpdiv, stripesize);
3824 tmpdiv = last_group;
3825 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3826 last_group = tmpdiv;
3827#else
3828 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
3829 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
Scott Teel6b80b182014-02-18 13:56:55 -06003830#endif
Stephen M. Cameron000ff7c2014-03-13 17:12:50 -05003831 if (first_group != last_group)
Scott Teel6b80b182014-02-18 13:56:55 -06003832 return IO_ACCEL_INELIGIBLE;
3833
3834 /* Verify request is in a single row of RAID 5/6 */
3835#if BITS_PER_LONG == 32
3836 tmpdiv = first_block;
3837 (void) do_div(tmpdiv, stripesize);
3838 first_row = r5or6_first_row = r0_first_row = tmpdiv;
3839 tmpdiv = last_block;
3840 (void) do_div(tmpdiv, stripesize);
3841 r5or6_last_row = r0_last_row = tmpdiv;
3842#else
3843 first_row = r5or6_first_row = r0_first_row =
3844 first_block / stripesize;
3845 r5or6_last_row = r0_last_row = last_block / stripesize;
3846#endif
3847 if (r5or6_first_row != r5or6_last_row)
3848 return IO_ACCEL_INELIGIBLE;
3849
3850
3851 /* Verify request is in a single column */
3852#if BITS_PER_LONG == 32
3853 tmpdiv = first_block;
3854 first_row_offset = do_div(tmpdiv, stripesize);
3855 tmpdiv = first_row_offset;
3856 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
3857 r5or6_first_row_offset = first_row_offset;
3858 tmpdiv = last_block;
3859 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
3860 tmpdiv = r5or6_last_row_offset;
3861 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
3862 tmpdiv = r5or6_first_row_offset;
3863 (void) do_div(tmpdiv, map->strip_size);
3864 first_column = r5or6_first_column = tmpdiv;
3865 tmpdiv = r5or6_last_row_offset;
3866 (void) do_div(tmpdiv, map->strip_size);
3867 r5or6_last_column = tmpdiv;
3868#else
3869 first_row_offset = r5or6_first_row_offset =
3870 (u32)((first_block % stripesize) %
3871 r5or6_blocks_per_row);
3872
3873 r5or6_last_row_offset =
3874 (u32)((last_block % stripesize) %
3875 r5or6_blocks_per_row);
3876
3877 first_column = r5or6_first_column =
3878 r5or6_first_row_offset / map->strip_size;
3879 r5or6_last_column =
3880 r5or6_last_row_offset / map->strip_size;
3881#endif
3882 if (r5or6_first_column != r5or6_last_column)
3883 return IO_ACCEL_INELIGIBLE;
3884
3885 /* Request is eligible */
3886 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3887 map->row_cnt;
3888
3889 map_index = (first_group *
3890 (map->row_cnt * total_disks_per_row)) +
3891 (map_row * total_disks_per_row) + first_column;
3892 break;
3893 default:
3894 return IO_ACCEL_INELIGIBLE;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003895 }
Scott Teel6b80b182014-02-18 13:56:55 -06003896
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003897 disk_handle = dd[map_index].ioaccel_handle;
3898 disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
3899 (first_row_offset - (first_column * map->strip_size));
3900 disk_block_cnt = block_cnt;
3901
3902 /* handle differing logical/physical block sizes */
3903 if (map->phys_blk_shift) {
3904 disk_block <<= map->phys_blk_shift;
3905 disk_block_cnt <<= map->phys_blk_shift;
3906 }
3907 BUG_ON(disk_block_cnt > 0xffff);
3908
3909 /* build the new CDB for the physical disk I/O */
3910 if (disk_block > 0xffffffff) {
3911 cdb[0] = is_write ? WRITE_16 : READ_16;
3912 cdb[1] = 0;
3913 cdb[2] = (u8) (disk_block >> 56);
3914 cdb[3] = (u8) (disk_block >> 48);
3915 cdb[4] = (u8) (disk_block >> 40);
3916 cdb[5] = (u8) (disk_block >> 32);
3917 cdb[6] = (u8) (disk_block >> 24);
3918 cdb[7] = (u8) (disk_block >> 16);
3919 cdb[8] = (u8) (disk_block >> 8);
3920 cdb[9] = (u8) (disk_block);
3921 cdb[10] = (u8) (disk_block_cnt >> 24);
3922 cdb[11] = (u8) (disk_block_cnt >> 16);
3923 cdb[12] = (u8) (disk_block_cnt >> 8);
3924 cdb[13] = (u8) (disk_block_cnt);
3925 cdb[14] = 0;
3926 cdb[15] = 0;
3927 cdb_len = 16;
3928 } else {
3929 cdb[0] = is_write ? WRITE_10 : READ_10;
3930 cdb[1] = 0;
3931 cdb[2] = (u8) (disk_block >> 24);
3932 cdb[3] = (u8) (disk_block >> 16);
3933 cdb[4] = (u8) (disk_block >> 8);
3934 cdb[5] = (u8) (disk_block);
3935 cdb[6] = 0;
3936 cdb[7] = (u8) (disk_block_cnt >> 8);
3937 cdb[8] = (u8) (disk_block_cnt);
3938 cdb[9] = 0;
3939 cdb_len = 10;
3940 }
3941 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
3942 dev->scsi3addr);
3943}
3944
Jeff Garzikf2812332010-11-16 02:10:29 -05003945static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003946 void (*done)(struct scsi_cmnd *))
3947{
3948 struct ctlr_info *h;
3949 struct hpsa_scsi_dev_t *dev;
3950 unsigned char scsi3addr[8];
3951 struct CommandList *c;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003952 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003953
3954 /* Get the ptr to our adapter structure out of cmd->host. */
3955 h = sdev_to_hba(cmd->device);
3956 dev = cmd->device->hostdata;
3957 if (!dev) {
3958 cmd->result = DID_NO_CONNECT << 16;
3959 done(cmd);
3960 return 0;
3961 }
3962 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3963
Stephen M. Cameron094963d2014-05-29 10:53:18 -05003964 if (unlikely(lockup_detected(h))) {
Stephen M. Camerona0c12412011-10-26 16:22:04 -05003965 cmd->result = DID_ERROR << 16;
3966 done(cmd);
3967 return 0;
3968 }
Matt Gatese16a33a2012-05-01 11:43:11 -05003969 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003970 if (c == NULL) { /* trouble... */
3971 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
3972 return SCSI_MLQUEUE_HOST_BUSY;
3973 }
3974
3975 /* Fill in the command list header */
3976
3977 cmd->scsi_done = done; /* save this for use by completion code */
3978
3979 /* save c in case we have to abort it */
3980 cmd->host_scribble = (unsigned char *) c;
3981
3982 c->cmd_type = CMD_SCSI;
3983 c->scsi_cmd = cmd;
Matt Gatese1f7de02014-02-18 13:55:17 -06003984
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003985 /* Call alternate submit routine for I/O accelerated commands.
3986 * Retries always go down the normal I/O path.
3987 */
3988 if (likely(cmd->retries == 0 &&
Scott Teelda0697b2014-02-18 13:57:00 -06003989 cmd->request->cmd_type == REQ_TYPE_FS &&
3990 h->acciopath_status)) {
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003991 if (dev->offload_enabled) {
3992 rc = hpsa_scsi_ioaccel_raid_map(h, c);
3993 if (rc == 0)
3994 return 0; /* Sent on ioaccel path */
3995 if (rc < 0) { /* scsi_dma_map failed. */
3996 cmd_free(h, c);
3997 return SCSI_MLQUEUE_HOST_BUSY;
3998 }
3999 } else if (dev->ioaccel_handle) {
4000 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4001 if (rc == 0)
4002 return 0; /* Sent on direct map path */
4003 if (rc < 0) { /* scsi_dma_map failed. */
4004 cmd_free(h, c);
4005 return SCSI_MLQUEUE_HOST_BUSY;
4006 }
4007 }
4008 }
Matt Gatese1f7de02014-02-18 13:55:17 -06004009
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004010 c->Header.ReplyQueue = 0; /* unused in simple mode */
4011 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004012 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT) |
4013 DIRECT_LOOKUP_BIT);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004014
4015 /* Fill in the request block... */
4016
4017 c->Request.Timeout = 0;
4018 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4019 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4020 c->Request.CDBLen = cmd->cmd_len;
4021 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4022 c->Request.Type.Type = TYPE_CMD;
4023 c->Request.Type.Attribute = ATTR_SIMPLE;
4024 switch (cmd->sc_data_direction) {
4025 case DMA_TO_DEVICE:
4026 c->Request.Type.Direction = XFER_WRITE;
4027 break;
4028 case DMA_FROM_DEVICE:
4029 c->Request.Type.Direction = XFER_READ;
4030 break;
4031 case DMA_NONE:
4032 c->Request.Type.Direction = XFER_NONE;
4033 break;
4034 case DMA_BIDIRECTIONAL:
4035 /* This can happen if a buggy application does a scsi passthru
4036 * and sets both inlen and outlen to non-zero. ( see
4037 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4038 */
4039
4040 c->Request.Type.Direction = XFER_RSVD;
4041 /* This is technically wrong, and hpsa controllers should
4042 * reject it with CMD_INVALID, which is the most correct
4043 * response, but non-fibre backends appear to let it
4044 * slide by, and give the same results as if this field
4045 * were set correctly. Either way is acceptable for
4046 * our purposes here.
4047 */
4048
4049 break;
4050
4051 default:
4052 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4053 cmd->sc_data_direction);
4054 BUG();
4055 break;
4056 }
4057
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06004058 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004059 cmd_free(h, c);
4060 return SCSI_MLQUEUE_HOST_BUSY;
4061 }
4062 enqueue_cmd_and_start_io(h, c);
4063 /* the cmd'll come back via intr handler in complete_scsi_command() */
4064 return 0;
4065}
4066
Jeff Garzikf2812332010-11-16 02:10:29 -05004067static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
4068
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004069static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4070{
4071 unsigned long flags;
4072
4073 /*
4074 * Don't let rescans be initiated on a controller known
4075 * to be locked up. If the controller locks up *during*
4076 * a rescan, that thread is probably hosed, but at least
4077 * we can prevent new rescan threads from piling up on a
4078 * locked up controller.
4079 */
Stephen M. Cameron094963d2014-05-29 10:53:18 -05004080 if (unlikely(lockup_detected(h))) {
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004081 spin_lock_irqsave(&h->scan_lock, flags);
4082 h->scan_finished = 1;
4083 wake_up_all(&h->scan_wait_queue);
4084 spin_unlock_irqrestore(&h->scan_lock, flags);
4085 return 1;
4086 }
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004087 return 0;
4088}
4089
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004090static void hpsa_scan_start(struct Scsi_Host *sh)
4091{
4092 struct ctlr_info *h = shost_to_hba(sh);
4093 unsigned long flags;
4094
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004095 if (do_not_scan_if_controller_locked_up(h))
4096 return;
4097
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004098 /* wait until any scan already in progress is finished. */
4099 while (1) {
4100 spin_lock_irqsave(&h->scan_lock, flags);
4101 if (h->scan_finished)
4102 break;
4103 spin_unlock_irqrestore(&h->scan_lock, flags);
4104 wait_event(h->scan_wait_queue, h->scan_finished);
4105 /* Note: We don't need to worry about a race between this
4106 * thread and driver unload because the midlayer will
4107 * have incremented the reference count, so unload won't
4108 * happen if we're in here.
4109 */
4110 }
4111 h->scan_finished = 0; /* mark scan as in progress */
4112 spin_unlock_irqrestore(&h->scan_lock, flags);
4113
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004114 if (do_not_scan_if_controller_locked_up(h))
4115 return;
4116
Stephen M. Camerona08a8472010-02-04 08:43:16 -06004117 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4118
4119 spin_lock_irqsave(&h->scan_lock, flags);
4120 h->scan_finished = 1; /* mark scan as finished. */
4121 wake_up_all(&h->scan_wait_queue);
4122 spin_unlock_irqrestore(&h->scan_lock, flags);
4123}
4124
4125static int hpsa_scan_finished(struct Scsi_Host *sh,
4126 unsigned long elapsed_time)
4127{
4128 struct ctlr_info *h = shost_to_hba(sh);
4129 unsigned long flags;
4130 int finished;
4131
4132 spin_lock_irqsave(&h->scan_lock, flags);
4133 finished = h->scan_finished;
4134 spin_unlock_irqrestore(&h->scan_lock, flags);
4135 return finished;
4136}
4137
Stephen M. Cameron667e23d2010-02-25 14:02:51 -06004138static int hpsa_change_queue_depth(struct scsi_device *sdev,
4139 int qdepth, int reason)
4140{
4141 struct ctlr_info *h = sdev_to_hba(sdev);
4142
4143 if (reason != SCSI_QDEPTH_DEFAULT)
4144 return -ENOTSUPP;
4145
4146 if (qdepth < 1)
4147 qdepth = 1;
4148 else
4149 if (qdepth > h->nr_cmds)
4150 qdepth = h->nr_cmds;
Christoph Hellwigc8b09f62014-11-03 20:15:14 +01004151 scsi_adjust_queue_depth(sdev, qdepth);
Stephen M. Cameron667e23d2010-02-25 14:02:51 -06004152 return sdev->queue_depth;
4153}
4154
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004155static void hpsa_unregister_scsi(struct ctlr_info *h)
4156{
4157 /* we are being forcibly unloaded, and may not refuse. */
4158 scsi_remove_host(h->scsi_host);
4159 scsi_host_put(h->scsi_host);
4160 h->scsi_host = NULL;
4161}
4162
4163static int hpsa_register_scsi(struct ctlr_info *h)
4164{
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004165 struct Scsi_Host *sh;
4166 int error;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004167
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004168 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4169 if (sh == NULL)
4170 goto fail;
4171
4172 sh->io_port = 0;
4173 sh->n_io_port = 0;
4174 sh->this_id = -1;
4175 sh->max_channel = 3;
4176 sh->max_cmd_len = MAX_COMMAND_SIZE;
4177 sh->max_lun = HPSA_MAX_LUN;
4178 sh->max_id = HPSA_MAX_LUN;
4179 sh->can_queue = h->nr_cmds;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06004180 if (h->hba_mode_enabled)
4181 sh->cmd_per_lun = 7;
4182 else
4183 sh->cmd_per_lun = h->nr_cmds;
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004184 sh->sg_tablesize = h->maxsgentries;
4185 h->scsi_host = sh;
4186 sh->hostdata[0] = (unsigned long) h;
4187 sh->irq = h->intr[h->intr_mode];
4188 sh->unique_id = sh->irq;
4189 error = scsi_add_host(sh, &h->pdev->dev);
4190 if (error)
4191 goto fail_host_put;
4192 scsi_scan_host(sh);
4193 return 0;
4194
4195 fail_host_put:
4196 dev_err(&h->pdev->dev, "%s: scsi_add_host"
4197 " failed for controller %d\n", __func__, h->ctlr);
4198 scsi_host_put(sh);
4199 return error;
4200 fail:
4201 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4202 " failed for controller %d\n", __func__, h->ctlr);
4203 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004204}
4205
4206static int wait_for_device_to_become_ready(struct ctlr_info *h,
4207 unsigned char lunaddr[])
4208{
Tomas Henzl89193582014-02-21 16:25:05 -06004209 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004210 int count = 0;
4211 int waittime = 1; /* seconds */
4212 struct CommandList *c;
4213
4214 c = cmd_special_alloc(h);
4215 if (!c) {
4216 dev_warn(&h->pdev->dev, "out of memory in "
4217 "wait_for_device_to_become_ready.\n");
4218 return IO_ERROR;
4219 }
4220
4221 /* Send test unit ready until device ready, or give up. */
4222 while (count < HPSA_TUR_RETRY_LIMIT) {
4223
4224 /* Wait for a bit. do this first, because if we send
4225 * the TUR right away, the reset will just abort it.
4226 */
4227 msleep(1000 * waittime);
4228 count++;
Tomas Henzl89193582014-02-21 16:25:05 -06004229 rc = 0; /* Device ready. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004230
4231 /* Increase wait time with each try, up to a point. */
4232 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4233 waittime = waittime * 2;
4234
Stephen M. Camerona2dac132013-02-20 11:24:41 -06004235 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4236 (void) fill_cmd(c, TEST_UNIT_READY, h,
4237 NULL, 0, 0, lunaddr, TYPE_CMD);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004238 hpsa_scsi_do_simple_cmd_core(h, c);
4239 /* no unmap needed here because no data xfer. */
4240
4241 if (c->err_info->CommandStatus == CMD_SUCCESS)
4242 break;
4243
4244 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4245 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4246 (c->err_info->SenseInfo[2] == NO_SENSE ||
4247 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4248 break;
4249
4250 dev_warn(&h->pdev->dev, "waiting %d secs "
4251 "for device to become ready.\n", waittime);
4252 rc = 1; /* device not ready. */
4253 }
4254
4255 if (rc)
4256 dev_warn(&h->pdev->dev, "giving up on device.\n");
4257 else
4258 dev_warn(&h->pdev->dev, "device is ready.\n");
4259
4260 cmd_special_free(h, c);
4261 return rc;
4262}
4263
4264/* Need at least one of these error handlers to keep ../scsi/hosts.c from
4265 * complaining. Doing a host- or bus-reset can't do anything good here.
4266 */
4267static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4268{
4269 int rc;
4270 struct ctlr_info *h;
4271 struct hpsa_scsi_dev_t *dev;
4272
4273 /* find the controller to which the command to be aborted was sent */
4274 h = sdev_to_hba(scsicmd->device);
4275 if (h == NULL) /* paranoia */
4276 return FAILED;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004277 dev = scsicmd->device->hostdata;
4278 if (!dev) {
4279 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4280 "device lookup failed.\n");
4281 return FAILED;
4282 }
Stephen M. Camerond416b0c2010-02-04 08:43:21 -06004283 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
4284 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004285 /* send a reset to the SCSI LUN which the command was sent to */
Scott Teelbf711ac2014-02-18 13:56:39 -06004286 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004287 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4288 return SUCCESS;
4289
4290 dev_warn(&h->pdev->dev, "resetting device failed.\n");
4291 return FAILED;
4292}
4293
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004294static void swizzle_abort_tag(u8 *tag)
4295{
4296 u8 original_tag[8];
4297
4298 memcpy(original_tag, tag, 8);
4299 tag[0] = original_tag[3];
4300 tag[1] = original_tag[2];
4301 tag[2] = original_tag[1];
4302 tag[3] = original_tag[0];
4303 tag[4] = original_tag[7];
4304 tag[5] = original_tag[6];
4305 tag[6] = original_tag[5];
4306 tag[7] = original_tag[4];
4307}
4308
Scott Teel17eb87d2014-02-18 13:55:28 -06004309static void hpsa_get_tag(struct ctlr_info *h,
4310 struct CommandList *c, u32 *taglower, u32 *tagupper)
4311{
4312 if (c->cmd_type == CMD_IOACCEL1) {
4313 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4314 &h->ioaccel_cmd_pool[c->cmdindex];
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004315 *tagupper = (u32) (cm1->tag >> 32);
4316 *taglower = (u32) (cm1->tag & 0x0ffffffffULL);
Scott Teel54b6e9e2014-02-18 13:56:45 -06004317 return;
Scott Teel17eb87d2014-02-18 13:55:28 -06004318 }
Scott Teel54b6e9e2014-02-18 13:56:45 -06004319 if (c->cmd_type == CMD_IOACCEL2) {
4320 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4321 &h->ioaccel2_cmd_pool[c->cmdindex];
Scott Teeldd0e19f2014-02-18 13:57:31 -06004322 /* upper tag not used in ioaccel2 mode */
4323 memset(tagupper, 0, sizeof(*tagupper));
4324 *taglower = cm2->Tag;
Scott Teel54b6e9e2014-02-18 13:56:45 -06004325 return;
4326 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004327 *tagupper = (u32) (c->Header.tag >> 32);
4328 *taglower = (u32) (c->Header.tag & 0x0ffffffffULL);
Scott Teel17eb87d2014-02-18 13:55:28 -06004329}
4330
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004331static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004332 struct CommandList *abort, int swizzle)
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004333{
4334 int rc = IO_OK;
4335 struct CommandList *c;
4336 struct ErrorInfo *ei;
Scott Teel17eb87d2014-02-18 13:55:28 -06004337 u32 tagupper, taglower;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004338
4339 c = cmd_special_alloc(h);
4340 if (c == NULL) { /* trouble... */
4341 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4342 return -ENOMEM;
4343 }
4344
Stephen M. Camerona2dac132013-02-20 11:24:41 -06004345 /* fill_cmd can't fail here, no buffer to map */
4346 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
4347 0, 0, scsi3addr, TYPE_MSG);
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004348 if (swizzle)
4349 swizzle_abort_tag(&c->Request.CDB[4]);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004350 hpsa_scsi_do_simple_cmd_core(h, c);
Scott Teel17eb87d2014-02-18 13:55:28 -06004351 hpsa_get_tag(h, abort, &taglower, &tagupper);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004352 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
Scott Teel17eb87d2014-02-18 13:55:28 -06004353 __func__, tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004354 /* no unmap needed here because no data xfer. */
4355
4356 ei = c->err_info;
4357 switch (ei->CommandStatus) {
4358 case CMD_SUCCESS:
4359 break;
4360 case CMD_UNABORTABLE: /* Very common, don't make noise. */
4361 rc = -1;
4362 break;
4363 default:
4364 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
Scott Teel17eb87d2014-02-18 13:55:28 -06004365 __func__, tagupper, taglower);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06004366 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004367 rc = -1;
4368 break;
4369 }
4370 cmd_special_free(h, c);
Scott Teeldd0e19f2014-02-18 13:57:31 -06004371 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4372 __func__, tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004373 return rc;
4374}
4375
4376/*
4377 * hpsa_find_cmd_in_queue
4378 *
4379 * Used to determine whether a command (find) is still present
4380 * in queue_head. Optionally excludes the last element of queue_head.
4381 *
4382 * This is used to avoid unnecessary aborts. Commands in h->reqQ have
4383 * not yet been submitted, and so can be aborted by the driver without
4384 * sending an abort to the hardware.
4385 *
4386 * Returns pointer to command if found in queue, NULL otherwise.
4387 */
4388static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
4389 struct scsi_cmnd *find, struct list_head *queue_head)
4390{
4391 unsigned long flags;
4392 struct CommandList *c = NULL; /* ptr into cmpQ */
4393
4394 if (!find)
Don Brace42a91642014-11-14 17:26:27 -06004395 return NULL;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004396 spin_lock_irqsave(&h->lock, flags);
4397 list_for_each_entry(c, queue_head, list) {
4398 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
4399 continue;
4400 if (c->scsi_cmd == find) {
4401 spin_unlock_irqrestore(&h->lock, flags);
4402 return c;
4403 }
4404 }
4405 spin_unlock_irqrestore(&h->lock, flags);
4406 return NULL;
4407}
4408
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004409static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
4410 u8 *tag, struct list_head *queue_head)
4411{
4412 unsigned long flags;
4413 struct CommandList *c;
4414
4415 spin_lock_irqsave(&h->lock, flags);
4416 list_for_each_entry(c, queue_head, list) {
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004417 if (memcmp(&c->Header.tag, tag, 8) != 0)
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004418 continue;
4419 spin_unlock_irqrestore(&h->lock, flags);
4420 return c;
4421 }
4422 spin_unlock_irqrestore(&h->lock, flags);
4423 return NULL;
4424}
4425
Scott Teel54b6e9e2014-02-18 13:56:45 -06004426/* ioaccel2 path firmware cannot handle abort task requests.
4427 * Change abort requests to physical target reset, and send to the
4428 * address of the physical disk used for the ioaccel 2 command.
4429 * Return 0 on success (IO_OK)
4430 * -1 on failure
4431 */
4432
4433static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4434 unsigned char *scsi3addr, struct CommandList *abort)
4435{
4436 int rc = IO_OK;
4437 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4438 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4439 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4440 unsigned char *psa = &phys_scsi3addr[0];
4441
4442 /* Get a pointer to the hpsa logical device. */
4443 scmd = (struct scsi_cmnd *) abort->scsi_cmd;
4444 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4445 if (dev == NULL) {
4446 dev_warn(&h->pdev->dev,
4447 "Cannot abort: no device pointer for command.\n");
4448 return -1; /* not abortable */
4449 }
4450
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06004451 if (h->raid_offload_debug > 0)
4452 dev_info(&h->pdev->dev,
4453 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4454 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4455 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4456 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4457
Scott Teel54b6e9e2014-02-18 13:56:45 -06004458 if (!dev->offload_enabled) {
4459 dev_warn(&h->pdev->dev,
4460 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4461 return -1; /* not abortable */
4462 }
4463
4464 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4465 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4466 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4467 return -1; /* not abortable */
4468 }
4469
4470 /* send the reset */
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06004471 if (h->raid_offload_debug > 0)
4472 dev_info(&h->pdev->dev,
4473 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4474 psa[0], psa[1], psa[2], psa[3],
4475 psa[4], psa[5], psa[6], psa[7]);
Scott Teel54b6e9e2014-02-18 13:56:45 -06004476 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
4477 if (rc != 0) {
4478 dev_warn(&h->pdev->dev,
4479 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4480 psa[0], psa[1], psa[2], psa[3],
4481 psa[4], psa[5], psa[6], psa[7]);
4482 return rc; /* failed to reset */
4483 }
4484
4485 /* wait for device to recover */
4486 if (wait_for_device_to_become_ready(h, psa) != 0) {
4487 dev_warn(&h->pdev->dev,
4488 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4489 psa[0], psa[1], psa[2], psa[3],
4490 psa[4], psa[5], psa[6], psa[7]);
4491 return -1; /* failed to recover */
4492 }
4493
4494 /* device recovered */
4495 dev_info(&h->pdev->dev,
4496 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4497 psa[0], psa[1], psa[2], psa[3],
4498 psa[4], psa[5], psa[6], psa[7]);
4499
4500 return rc; /* success */
4501}
4502
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004503/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
4504 * tell which kind we're dealing with, so we send the abort both ways. There
4505 * shouldn't be any collisions between swizzled and unswizzled tags due to the
4506 * way we construct our tags but we check anyway in case the assumptions which
4507 * make this true someday become false.
4508 */
4509static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4510 unsigned char *scsi3addr, struct CommandList *abort)
4511{
4512 u8 swizzled_tag[8];
4513 struct CommandList *c;
4514 int rc = 0, rc2 = 0;
4515
Scott Teel54b6e9e2014-02-18 13:56:45 -06004516 /* ioccelerator mode 2 commands should be aborted via the
4517 * accelerated path, since RAID path is unaware of these commands,
4518 * but underlying firmware can't handle abort TMF.
4519 * Change abort to physical device reset.
4520 */
4521 if (abort->cmd_type == CMD_IOACCEL2)
4522 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
4523
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004524 /* we do not expect to find the swizzled tag in our queue, but
4525 * check anyway just to be sure the assumptions which make this
4526 * the case haven't become wrong.
4527 */
4528 memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
4529 swizzle_abort_tag(swizzled_tag);
4530 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
4531 if (c != NULL) {
4532 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
4533 return hpsa_send_abort(h, scsi3addr, abort, 0);
4534 }
4535 rc = hpsa_send_abort(h, scsi3addr, abort, 0);
4536
4537 /* if the command is still in our queue, we can't conclude that it was
4538 * aborted (it might have just completed normally) but in any case
4539 * we don't need to try to abort it another way.
4540 */
4541 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
4542 if (c)
4543 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
4544 return rc && rc2;
4545}
4546
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004547/* Send an abort for the specified command.
4548 * If the device and controller support it,
4549 * send a task abort request.
4550 */
4551static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4552{
4553
4554 int i, rc;
4555 struct ctlr_info *h;
4556 struct hpsa_scsi_dev_t *dev;
4557 struct CommandList *abort; /* pointer to command to be aborted */
4558 struct CommandList *found;
4559 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
4560 char msg[256]; /* For debug messaging. */
4561 int ml = 0;
Scott Teel17eb87d2014-02-18 13:55:28 -06004562 u32 tagupper, taglower;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004563
4564 /* Find the controller of the command to be aborted */
4565 h = sdev_to_hba(sc->device);
4566 if (WARN(h == NULL,
4567 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
4568 return FAILED;
4569
4570 /* Check that controller supports some kind of task abort */
4571 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
4572 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
4573 return FAILED;
4574
4575 memset(msg, 0, sizeof(msg));
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02004576 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ",
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004577 h->scsi_host->host_no, sc->device->channel,
4578 sc->device->id, sc->device->lun);
4579
4580 /* Find the device of the command to be aborted */
4581 dev = sc->device->hostdata;
4582 if (!dev) {
4583 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
4584 msg);
4585 return FAILED;
4586 }
4587
4588 /* Get SCSI command to be aborted */
4589 abort = (struct CommandList *) sc->host_scribble;
4590 if (abort == NULL) {
4591 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
4592 msg);
4593 return FAILED;
4594 }
Scott Teel17eb87d2014-02-18 13:55:28 -06004595 hpsa_get_tag(h, abort, &taglower, &tagupper);
4596 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004597 as = (struct scsi_cmnd *) abort->scsi_cmd;
4598 if (as != NULL)
4599 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
4600 as->cmnd[0], as->serial_number);
4601 dev_dbg(&h->pdev->dev, "%s\n", msg);
4602 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
4603 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4604
4605 /* Search reqQ to See if command is queued but not submitted,
4606 * if so, complete the command with aborted status and remove
4607 * it from the reqQ.
4608 */
4609 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
4610 if (found) {
4611 found->err_info->CommandStatus = CMD_ABORTED;
4612 finish_cmd(found);
4613 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
4614 msg);
4615 return SUCCESS;
4616 }
4617
4618 /* not in reqQ, if also not in cmpQ, must have already completed */
4619 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
4620 if (!found) {
Stephen M. Camerond6ebd0f2012-07-26 11:34:17 -05004621 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004622 msg);
4623 return SUCCESS;
4624 }
4625
4626 /*
4627 * Command is in flight, or possibly already completed
4628 * by the firmware (but not to the scsi mid layer) but we can't
4629 * distinguish which. Send the abort down.
4630 */
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004631 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004632 if (rc != 0) {
4633 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
4634 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
4635 h->scsi_host->host_no,
4636 dev->bus, dev->target, dev->lun);
4637 return FAILED;
4638 }
4639 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
4640
4641 /* If the abort(s) above completed and actually aborted the
4642 * command, then the command to be aborted should already be
4643 * completed. If not, wait around a bit more to see if they
4644 * manage to complete normally.
4645 */
4646#define ABORT_COMPLETE_WAIT_SECS 30
4647 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
4648 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
4649 if (!found)
4650 return SUCCESS;
4651 msleep(100);
4652 }
4653 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4654 msg, ABORT_COMPLETE_WAIT_SECS);
4655 return FAILED;
4656}
4657
4658
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004659/*
4660 * For operations that cannot sleep, a command block is allocated at init,
4661 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4662 * which ones are free or in use. Lock must be held when calling this.
4663 * cmd_free() is the complement.
4664 */
4665static struct CommandList *cmd_alloc(struct ctlr_info *h)
4666{
4667 struct CommandList *c;
4668 int i;
4669 union u64bit temp64;
4670 dma_addr_t cmd_dma_handle, err_dma_handle;
Matt Gatese16a33a2012-05-01 11:43:11 -05004671 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004672
Matt Gatese16a33a2012-05-01 11:43:11 -05004673 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004674 do {
4675 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
Matt Gatese16a33a2012-05-01 11:43:11 -05004676 if (i == h->nr_cmds) {
4677 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004678 return NULL;
Matt Gatese16a33a2012-05-01 11:43:11 -05004679 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004680 } while (test_and_set_bit
4681 (i & (BITS_PER_LONG - 1),
4682 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
Matt Gatese16a33a2012-05-01 11:43:11 -05004683 spin_unlock_irqrestore(&h->lock, flags);
4684
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004685 c = h->cmd_pool + i;
4686 memset(c, 0, sizeof(*c));
4687 cmd_dma_handle = h->cmd_pool_dhandle
4688 + i * sizeof(*c);
4689 c->err_info = h->errinfo_pool + i;
4690 memset(c->err_info, 0, sizeof(*c->err_info));
4691 err_dma_handle = h->errinfo_pool_dhandle
4692 + i * sizeof(*c->err_info);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004693
4694 c->cmdindex = i;
4695
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06004696 INIT_LIST_HEAD(&c->list);
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06004697 c->busaddr = (u32) cmd_dma_handle;
4698 temp64.val = (u64) err_dma_handle;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004699 c->ErrDesc.Addr = cpu_to_le64(err_dma_handle);
4700 c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004701
4702 c->h = h;
4703 return c;
4704}
4705
4706/* For operations that can wait for kmalloc to possibly sleep,
4707 * this routine can be called. Lock need not be held to call
4708 * cmd_special_alloc. cmd_special_free() is the complement.
4709 */
4710static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
4711{
4712 struct CommandList *c;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004713 dma_addr_t cmd_dma_handle, err_dma_handle;
4714
Joe Perches7c845eb2014-08-08 14:24:46 -07004715 c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004716 if (c == NULL)
4717 return NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004718
Matt Gatese1f7de02014-02-18 13:55:17 -06004719 c->cmd_type = CMD_SCSI;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004720 c->cmdindex = -1;
4721
Joe Perches7c845eb2014-08-08 14:24:46 -07004722 c->err_info = pci_zalloc_consistent(h->pdev, sizeof(*c->err_info),
4723 &err_dma_handle);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004724
4725 if (c->err_info == NULL) {
4726 pci_free_consistent(h->pdev,
4727 sizeof(*c), c, cmd_dma_handle);
4728 return NULL;
4729 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004730
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06004731 INIT_LIST_HEAD(&c->list);
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06004732 c->busaddr = (u32) cmd_dma_handle;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004733 c->ErrDesc.Addr = cpu_to_le64(err_dma_handle);
4734 c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004735
4736 c->h = h;
4737 return c;
4738}
4739
4740static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4741{
4742 int i;
Matt Gatese16a33a2012-05-01 11:43:11 -05004743 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004744
4745 i = c - h->cmd_pool;
Matt Gatese16a33a2012-05-01 11:43:11 -05004746 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004747 clear_bit(i & (BITS_PER_LONG - 1),
4748 h->cmd_pool_bits + (i / BITS_PER_LONG));
Matt Gatese16a33a2012-05-01 11:43:11 -05004749 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004750}
4751
4752static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
4753{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004754 pci_free_consistent(h->pdev, sizeof(*c->err_info),
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004755 c->err_info,
4756 (dma_addr_t) le64_to_cpu(c->ErrDesc.Addr));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004757 pci_free_consistent(h->pdev, sizeof(*c),
Stephen M. Camerond896f3f2011-01-06 14:47:53 -06004758 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004759}
4760
4761#ifdef CONFIG_COMPAT
4762
Don Brace42a91642014-11-14 17:26:27 -06004763static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
4764 void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004765{
4766 IOCTL32_Command_struct __user *arg32 =
4767 (IOCTL32_Command_struct __user *) arg;
4768 IOCTL_Command_struct arg64;
4769 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
4770 int err;
4771 u32 cp;
4772
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06004773 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004774 err = 0;
4775 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4776 sizeof(arg64.LUN_info));
4777 err |= copy_from_user(&arg64.Request, &arg32->Request,
4778 sizeof(arg64.Request));
4779 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4780 sizeof(arg64.error_info));
4781 err |= get_user(arg64.buf_size, &arg32->buf_size);
4782 err |= get_user(cp, &arg32->buf);
4783 arg64.buf = compat_ptr(cp);
4784 err |= copy_to_user(p, &arg64, sizeof(arg64));
4785
4786 if (err)
4787 return -EFAULT;
4788
Don Brace42a91642014-11-14 17:26:27 -06004789 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004790 if (err)
4791 return err;
4792 err |= copy_in_user(&arg32->error_info, &p->error_info,
4793 sizeof(arg32->error_info));
4794 if (err)
4795 return -EFAULT;
4796 return err;
4797}
4798
4799static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
Don Brace42a91642014-11-14 17:26:27 -06004800 int cmd, void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004801{
4802 BIG_IOCTL32_Command_struct __user *arg32 =
4803 (BIG_IOCTL32_Command_struct __user *) arg;
4804 BIG_IOCTL_Command_struct arg64;
4805 BIG_IOCTL_Command_struct __user *p =
4806 compat_alloc_user_space(sizeof(arg64));
4807 int err;
4808 u32 cp;
4809
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06004810 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004811 err = 0;
4812 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4813 sizeof(arg64.LUN_info));
4814 err |= copy_from_user(&arg64.Request, &arg32->Request,
4815 sizeof(arg64.Request));
4816 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4817 sizeof(arg64.error_info));
4818 err |= get_user(arg64.buf_size, &arg32->buf_size);
4819 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
4820 err |= get_user(cp, &arg32->buf);
4821 arg64.buf = compat_ptr(cp);
4822 err |= copy_to_user(p, &arg64, sizeof(arg64));
4823
4824 if (err)
4825 return -EFAULT;
4826
Don Brace42a91642014-11-14 17:26:27 -06004827 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004828 if (err)
4829 return err;
4830 err |= copy_in_user(&arg32->error_info, &p->error_info,
4831 sizeof(arg32->error_info));
4832 if (err)
4833 return -EFAULT;
4834 return err;
4835}
Stephen M. Cameron71fe75a2010-02-04 08:43:51 -06004836
Don Brace42a91642014-11-14 17:26:27 -06004837static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
Stephen M. Cameron71fe75a2010-02-04 08:43:51 -06004838{
4839 switch (cmd) {
4840 case CCISS_GETPCIINFO:
4841 case CCISS_GETINTINFO:
4842 case CCISS_SETINTINFO:
4843 case CCISS_GETNODENAME:
4844 case CCISS_SETNODENAME:
4845 case CCISS_GETHEARTBEAT:
4846 case CCISS_GETBUSTYPES:
4847 case CCISS_GETFIRMVER:
4848 case CCISS_GETDRIVVER:
4849 case CCISS_REVALIDVOLS:
4850 case CCISS_DEREGDISK:
4851 case CCISS_REGNEWDISK:
4852 case CCISS_REGNEWD:
4853 case CCISS_RESCANDISK:
4854 case CCISS_GETLUNINFO:
4855 return hpsa_ioctl(dev, cmd, arg);
4856
4857 case CCISS_PASSTHRU32:
4858 return hpsa_ioctl32_passthru(dev, cmd, arg);
4859 case CCISS_BIG_PASSTHRU32:
4860 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
4861
4862 default:
4863 return -ENOIOCTLCMD;
4864 }
4865}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004866#endif
4867
4868static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
4869{
4870 struct hpsa_pci_info pciinfo;
4871
4872 if (!argp)
4873 return -EINVAL;
4874 pciinfo.domain = pci_domain_nr(h->pdev->bus);
4875 pciinfo.bus = h->pdev->bus->number;
4876 pciinfo.dev_fn = h->pdev->devfn;
4877 pciinfo.board_id = h->board_id;
4878 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
4879 return -EFAULT;
4880 return 0;
4881}
4882
4883static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
4884{
4885 DriverVer_type DriverVer;
4886 unsigned char vmaj, vmin, vsubmin;
4887 int rc;
4888
4889 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
4890 &vmaj, &vmin, &vsubmin);
4891 if (rc != 3) {
4892 dev_info(&h->pdev->dev, "driver version string '%s' "
4893 "unrecognized.", HPSA_DRIVER_VERSION);
4894 vmaj = 0;
4895 vmin = 0;
4896 vsubmin = 0;
4897 }
4898 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
4899 if (!argp)
4900 return -EINVAL;
4901 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
4902 return -EFAULT;
4903 return 0;
4904}
4905
4906static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4907{
4908 IOCTL_Command_struct iocommand;
4909 struct CommandList *c;
4910 char *buff = NULL;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004911 u64 temp64;
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004912 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004913
4914 if (!argp)
4915 return -EINVAL;
4916 if (!capable(CAP_SYS_RAWIO))
4917 return -EPERM;
4918 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
4919 return -EFAULT;
4920 if ((iocommand.buf_size < 1) &&
4921 (iocommand.Request.Type.Direction != XFER_NONE)) {
4922 return -EINVAL;
4923 }
4924 if (iocommand.buf_size > 0) {
4925 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4926 if (buff == NULL)
4927 return -EFAULT;
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05004928 if (iocommand.Request.Type.Direction & XFER_WRITE) {
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004929 /* Copy the data into the buffer we created */
4930 if (copy_from_user(buff, iocommand.buf,
4931 iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004932 rc = -EFAULT;
4933 goto out_kfree;
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004934 }
4935 } else {
4936 memset(buff, 0, iocommand.buf_size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004937 }
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004938 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004939 c = cmd_special_alloc(h);
4940 if (c == NULL) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004941 rc = -ENOMEM;
4942 goto out_kfree;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004943 }
4944 /* Fill in the command type */
4945 c->cmd_type = CMD_IOCTL_PEND;
4946 /* Fill in Command Header */
4947 c->Header.ReplyQueue = 0; /* unused in simple mode */
4948 if (iocommand.buf_size > 0) { /* buffer to fill */
4949 c->Header.SGList = 1;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004950 c->Header.SGTotal = cpu_to_le16(1);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004951 } else { /* no buffers to fill */
4952 c->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004953 c->Header.SGTotal = cpu_to_le16(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004954 }
4955 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4956 /* use the kernel address the cmd block for tag */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004957 c->Header.tag = c->busaddr;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004958
4959 /* Fill in Request block */
4960 memcpy(&c->Request, &iocommand.Request,
4961 sizeof(c->Request));
4962
4963 /* Fill in the scatter gather information */
4964 if (iocommand.buf_size > 0) {
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004965 temp64 = pci_map_single(h->pdev, buff,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004966 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004967 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
4968 c->SG[0].Addr = cpu_to_le64(0);
4969 c->SG[0].Len = cpu_to_le32(0);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06004970 rc = -ENOMEM;
4971 goto out;
4972 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06004973 c->SG[0].Addr = cpu_to_le64(temp64);
4974 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
4975 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004976 }
Stephen M. Camerona0c12412011-10-26 16:22:04 -05004977 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
Stephen M. Cameronc2dd32e2011-06-03 09:57:29 -05004978 if (iocommand.buf_size > 0)
4979 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004980 check_ioctl_unit_attention(h, c);
4981
4982 /* Copy the error information out */
4983 memcpy(&iocommand.error_info, c->err_info,
4984 sizeof(iocommand.error_info));
4985 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004986 rc = -EFAULT;
4987 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004988 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05004989 if ((iocommand.Request.Type.Direction & XFER_READ) &&
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004990 iocommand.buf_size > 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004991 /* Copy the data out of the buffer we created */
4992 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004993 rc = -EFAULT;
4994 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004995 }
4996 }
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004997out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004998 cmd_special_free(h, c);
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004999out_kfree:
5000 kfree(buff);
5001 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005002}
5003
5004static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5005{
5006 BIG_IOCTL_Command_struct *ioc;
5007 struct CommandList *c;
5008 unsigned char **buff = NULL;
5009 int *buff_size = NULL;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005010 u64 temp64;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005011 BYTE sg_used = 0;
5012 int status = 0;
5013 int i;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06005014 u32 left;
5015 u32 sz;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005016 BYTE __user *data_ptr;
5017
5018 if (!argp)
5019 return -EINVAL;
5020 if (!capable(CAP_SYS_RAWIO))
5021 return -EPERM;
5022 ioc = (BIG_IOCTL_Command_struct *)
5023 kmalloc(sizeof(*ioc), GFP_KERNEL);
5024 if (!ioc) {
5025 status = -ENOMEM;
5026 goto cleanup1;
5027 }
5028 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5029 status = -EFAULT;
5030 goto cleanup1;
5031 }
5032 if ((ioc->buf_size < 1) &&
5033 (ioc->Request.Type.Direction != XFER_NONE)) {
5034 status = -EINVAL;
5035 goto cleanup1;
5036 }
5037 /* Check kmalloc limits using all SGs */
5038 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5039 status = -EINVAL;
5040 goto cleanup1;
5041 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06005042 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005043 status = -EINVAL;
5044 goto cleanup1;
5045 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06005046 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005047 if (!buff) {
5048 status = -ENOMEM;
5049 goto cleanup1;
5050 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06005051 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005052 if (!buff_size) {
5053 status = -ENOMEM;
5054 goto cleanup1;
5055 }
5056 left = ioc->buf_size;
5057 data_ptr = ioc->buf;
5058 while (left) {
5059 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5060 buff_size[sg_used] = sz;
5061 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5062 if (buff[sg_used] == NULL) {
5063 status = -ENOMEM;
5064 goto cleanup1;
5065 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05005066 if (ioc->Request.Type.Direction & XFER_WRITE) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005067 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
Stephen M. Cameron0758f4f2014-07-03 10:18:03 -05005068 status = -EFAULT;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005069 goto cleanup1;
5070 }
5071 } else
5072 memset(buff[sg_used], 0, sz);
5073 left -= sz;
5074 data_ptr += sz;
5075 sg_used++;
5076 }
5077 c = cmd_special_alloc(h);
5078 if (c == NULL) {
5079 status = -ENOMEM;
5080 goto cleanup1;
5081 }
5082 c->cmd_type = CMD_IOCTL_PEND;
5083 c->Header.ReplyQueue = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005084 c->Header.SGList = (u8) sg_used;
5085 c->Header.SGTotal = cpu_to_le16(sg_used);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005086 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005087 c->Header.tag = c->busaddr;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005088 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5089 if (ioc->buf_size > 0) {
5090 int i;
5091 for (i = 0; i < sg_used; i++) {
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005092 temp64 = pci_map_single(h->pdev, buff[i],
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005093 buff_size[i], PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005094 if (dma_mapping_error(&h->pdev->dev,
5095 (dma_addr_t) temp64)) {
5096 c->SG[i].Addr = cpu_to_le64(0);
5097 c->SG[i].Len = cpu_to_le32(0);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06005098 hpsa_pci_unmap(h->pdev, c, i,
5099 PCI_DMA_BIDIRECTIONAL);
5100 status = -ENOMEM;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005101 goto cleanup0;
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06005102 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005103 c->SG[i].Addr = cpu_to_le64(temp64);
5104 c->SG[i].Len = cpu_to_le32(buff_size[i]);
5105 c->SG[i].Ext = cpu_to_le32(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005106 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005107 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005108 }
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005109 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005110 if (sg_used)
5111 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005112 check_ioctl_unit_attention(h, c);
5113 /* Copy the error information out */
5114 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5115 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005116 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005117 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005118 }
Stephen M. Cameron9233fb12014-05-29 10:52:41 -05005119 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005120 /* Copy the data out of the buffer we created */
5121 BYTE __user *ptr = ioc->buf;
5122 for (i = 0; i < sg_used; i++) {
5123 if (copy_to_user(ptr, buff[i], buff_size[i])) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005124 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005125 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005126 }
5127 ptr += buff_size[i];
5128 }
5129 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005130 status = 0;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005131cleanup0:
5132 cmd_special_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005133cleanup1:
5134 if (buff) {
5135 for (i = 0; i < sg_used; i++)
5136 kfree(buff[i]);
5137 kfree(buff);
5138 }
5139 kfree(buff_size);
5140 kfree(ioc);
5141 return status;
5142}
5143
5144static void check_ioctl_unit_attention(struct ctlr_info *h,
5145 struct CommandList *c)
5146{
5147 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5148 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5149 (void) check_for_unit_attention(h, c);
5150}
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005151
5152static int increment_passthru_count(struct ctlr_info *h)
5153{
5154 unsigned long flags;
5155
5156 spin_lock_irqsave(&h->passthru_count_lock, flags);
5157 if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) {
5158 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5159 return -1;
5160 }
5161 h->passthru_count++;
5162 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5163 return 0;
5164}
5165
5166static void decrement_passthru_count(struct ctlr_info *h)
5167{
5168 unsigned long flags;
5169
5170 spin_lock_irqsave(&h->passthru_count_lock, flags);
5171 if (h->passthru_count <= 0) {
5172 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5173 /* not expecting to get here. */
5174 dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n");
5175 return;
5176 }
5177 h->passthru_count--;
5178 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5179}
5180
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005181/*
5182 * ioctl
5183 */
Don Brace42a91642014-11-14 17:26:27 -06005184static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005185{
5186 struct ctlr_info *h;
5187 void __user *argp = (void __user *)arg;
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005188 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005189
5190 h = sdev_to_hba(dev);
5191
5192 switch (cmd) {
5193 case CCISS_DEREGDISK:
5194 case CCISS_REGNEWDISK:
5195 case CCISS_REGNEWD:
Stephen M. Camerona08a8472010-02-04 08:43:16 -06005196 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005197 return 0;
5198 case CCISS_GETPCIINFO:
5199 return hpsa_getpciinfo_ioctl(h, argp);
5200 case CCISS_GETDRIVVER:
5201 return hpsa_getdrivver_ioctl(h, argp);
5202 case CCISS_PASSTHRU:
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005203 if (increment_passthru_count(h))
5204 return -EAGAIN;
5205 rc = hpsa_passthru_ioctl(h, argp);
5206 decrement_passthru_count(h);
5207 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005208 case CCISS_BIG_PASSTHRU:
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005209 if (increment_passthru_count(h))
5210 return -EAGAIN;
5211 rc = hpsa_big_passthru_ioctl(h, argp);
5212 decrement_passthru_count(h);
5213 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005214 default:
5215 return -ENOTTY;
5216 }
5217}
5218
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005219static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5220 u8 reset_type)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005221{
5222 struct CommandList *c;
5223
5224 c = cmd_alloc(h);
5225 if (!c)
5226 return -ENOMEM;
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005227 /* fill_cmd can't fail here, no data buffer to map */
5228 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005229 RAID_CTLR_LUNID, TYPE_MSG);
5230 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
5231 c->waiting = NULL;
5232 enqueue_cmd_and_start_io(h, c);
5233 /* Don't wait for completion, the reset won't complete. Don't free
5234 * the command either. This is the last command we will send before
5235 * re-initializing everything, so it doesn't matter and won't leak.
5236 */
5237 return 0;
5238}
5239
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005240static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06005241 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005242 int cmd_type)
5243{
5244 int pci_dir = XFER_NONE;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005245 struct CommandList *a; /* for commands to be aborted */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005246 u32 tupper, tlower;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005247
5248 c->cmd_type = CMD_IOCTL_PEND;
5249 c->Header.ReplyQueue = 0;
5250 if (buff != NULL && size > 0) {
5251 c->Header.SGList = 1;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005252 c->Header.SGTotal = cpu_to_le16(1);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005253 } else {
5254 c->Header.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005255 c->Header.SGTotal = cpu_to_le16(0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005256 }
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005257 c->Header.tag = c->busaddr;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005258 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5259
5260 c->Request.Type.Type = cmd_type;
5261 if (cmd_type == TYPE_CMD) {
5262 switch (cmd) {
5263 case HPSA_INQUIRY:
5264 /* are we trying to read a vital product page */
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06005265 if (page_code & VPD_PAGE) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005266 c->Request.CDB[1] = 0x01;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06005267 c->Request.CDB[2] = (page_code & 0xff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005268 }
5269 c->Request.CDBLen = 6;
5270 c->Request.Type.Attribute = ATTR_SIMPLE;
5271 c->Request.Type.Direction = XFER_READ;
5272 c->Request.Timeout = 0;
5273 c->Request.CDB[0] = HPSA_INQUIRY;
5274 c->Request.CDB[4] = size & 0xFF;
5275 break;
5276 case HPSA_REPORT_LOG:
5277 case HPSA_REPORT_PHYS:
5278 /* Talking to controller so It's a physical command
5279 mode = 00 target = 0. Nothing to write.
5280 */
5281 c->Request.CDBLen = 12;
5282 c->Request.Type.Attribute = ATTR_SIMPLE;
5283 c->Request.Type.Direction = XFER_READ;
5284 c->Request.Timeout = 0;
5285 c->Request.CDB[0] = cmd;
5286 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5287 c->Request.CDB[7] = (size >> 16) & 0xFF;
5288 c->Request.CDB[8] = (size >> 8) & 0xFF;
5289 c->Request.CDB[9] = size & 0xFF;
5290 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005291 case HPSA_CACHE_FLUSH:
5292 c->Request.CDBLen = 12;
5293 c->Request.Type.Attribute = ATTR_SIMPLE;
5294 c->Request.Type.Direction = XFER_WRITE;
5295 c->Request.Timeout = 0;
5296 c->Request.CDB[0] = BMIC_WRITE;
5297 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
Stephen M. Cameronbb158ea2011-10-26 16:21:17 -05005298 c->Request.CDB[7] = (size >> 8) & 0xFF;
5299 c->Request.CDB[8] = size & 0xFF;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005300 break;
5301 case TEST_UNIT_READY:
5302 c->Request.CDBLen = 6;
5303 c->Request.Type.Attribute = ATTR_SIMPLE;
5304 c->Request.Type.Direction = XFER_NONE;
5305 c->Request.Timeout = 0;
5306 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005307 case HPSA_GET_RAID_MAP:
5308 c->Request.CDBLen = 12;
5309 c->Request.Type.Attribute = ATTR_SIMPLE;
5310 c->Request.Type.Direction = XFER_READ;
5311 c->Request.Timeout = 0;
5312 c->Request.CDB[0] = HPSA_CISS_READ;
5313 c->Request.CDB[1] = cmd;
5314 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5315 c->Request.CDB[7] = (size >> 16) & 0xFF;
5316 c->Request.CDB[8] = (size >> 8) & 0xFF;
5317 c->Request.CDB[9] = size & 0xFF;
5318 break;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06005319 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5320 c->Request.CDBLen = 10;
5321 c->Request.Type.Attribute = ATTR_SIMPLE;
5322 c->Request.Type.Direction = XFER_READ;
5323 c->Request.Timeout = 0;
5324 c->Request.CDB[0] = BMIC_READ;
5325 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5326 c->Request.CDB[7] = (size >> 16) & 0xFF;
5327 c->Request.CDB[8] = (size >> 8) & 0xFF;
5328 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005329 default:
5330 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5331 BUG();
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005332 return -1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005333 }
5334 } else if (cmd_type == TYPE_MSG) {
5335 switch (cmd) {
5336
5337 case HPSA_DEVICE_RESET_MSG:
5338 c->Request.CDBLen = 16;
5339 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
5340 c->Request.Type.Attribute = ATTR_SIMPLE;
5341 c->Request.Type.Direction = XFER_NONE;
5342 c->Request.Timeout = 0; /* Don't time out */
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005343 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5344 c->Request.CDB[0] = cmd;
Stephen M. Cameron21e89af2012-07-26 11:34:10 -05005345 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005346 /* If bytes 4-7 are zero, it means reset the */
5347 /* LunID device */
5348 c->Request.CDB[4] = 0x00;
5349 c->Request.CDB[5] = 0x00;
5350 c->Request.CDB[6] = 0x00;
5351 c->Request.CDB[7] = 0x00;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005352 break;
5353 case HPSA_ABORT_MSG:
5354 a = buff; /* point to command to be aborted */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005355 dev_dbg(&h->pdev->dev, "Abort Tag:0x%016llx using request Tag:0x%016llx",
5356 a->Header.tag, c->Header.tag);
5357 tlower = (u32) (a->Header.tag >> 32);
5358 tupper = (u32) (a->Header.tag & 0x0ffffffffULL);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005359 c->Request.CDBLen = 16;
5360 c->Request.Type.Type = TYPE_MSG;
5361 c->Request.Type.Attribute = ATTR_SIMPLE;
5362 c->Request.Type.Direction = XFER_WRITE;
5363 c->Request.Timeout = 0; /* Don't time out */
5364 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5365 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5366 c->Request.CDB[2] = 0x00; /* reserved */
5367 c->Request.CDB[3] = 0x00; /* reserved */
5368 /* Tag to abort goes in CDB[4]-CDB[11] */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005369 c->Request.CDB[4] = tlower & 0xFF;
5370 c->Request.CDB[5] = (tlower >> 8) & 0xFF;
5371 c->Request.CDB[6] = (tlower >> 16) & 0xFF;
5372 c->Request.CDB[7] = (tlower >> 24) & 0xFF;
5373 c->Request.CDB[8] = tupper & 0xFF;
5374 c->Request.CDB[9] = (tupper >> 8) & 0xFF;
5375 c->Request.CDB[10] = (tupper >> 16) & 0xFF;
5376 c->Request.CDB[11] = (tupper >> 24) & 0xFF;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005377 c->Request.CDB[12] = 0x00; /* reserved */
5378 c->Request.CDB[13] = 0x00; /* reserved */
5379 c->Request.CDB[14] = 0x00; /* reserved */
5380 c->Request.CDB[15] = 0x00; /* reserved */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005381 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005382 default:
5383 dev_warn(&h->pdev->dev, "unknown message type %d\n",
5384 cmd);
5385 BUG();
5386 }
5387 } else {
5388 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5389 BUG();
5390 }
5391
5392 switch (c->Request.Type.Direction) {
5393 case XFER_READ:
5394 pci_dir = PCI_DMA_FROMDEVICE;
5395 break;
5396 case XFER_WRITE:
5397 pci_dir = PCI_DMA_TODEVICE;
5398 break;
5399 case XFER_NONE:
5400 pci_dir = PCI_DMA_NONE;
5401 break;
5402 default:
5403 pci_dir = PCI_DMA_BIDIRECTIONAL;
5404 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005405 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5406 return -1;
5407 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005408}
5409
5410/*
5411 * Map (physical) PCI mem into (virtual) kernel space
5412 */
5413static void __iomem *remap_pci_mem(ulong base, ulong size)
5414{
5415 ulong page_base = ((ulong) base) & PAGE_MASK;
5416 ulong page_offs = ((ulong) base) - page_base;
Stephen M. Cameron088ba342012-07-26 11:34:23 -05005417 void __iomem *page_remapped = ioremap_nocache(page_base,
5418 page_offs + size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005419
5420 return page_remapped ? (page_remapped + page_offs) : NULL;
5421}
5422
5423/* Takes cmds off the submission queue and sends them to the hardware,
5424 * then puts them on the queue of cmds waiting for completion.
Stephen M. Cameron0b570752014-05-29 10:53:28 -05005425 * Assumes h->lock is held
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005426 */
Stephen M. Cameron0b570752014-05-29 10:53:28 -05005427static void start_io(struct ctlr_info *h, unsigned long *flags)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005428{
5429 struct CommandList *c;
5430
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06005431 while (!list_empty(&h->reqQ)) {
5432 c = list_entry(h->reqQ.next, struct CommandList, list);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005433 /* can't do anything if fifo is full */
5434 if ((h->access.fifo_full(h))) {
Stephen M. Cameron396883e2013-09-23 13:34:17 -05005435 h->fifo_recently_full = 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005436 dev_warn(&h->pdev->dev, "fifo full\n");
5437 break;
5438 }
Stephen M. Cameron396883e2013-09-23 13:34:17 -05005439 h->fifo_recently_full = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005440
5441 /* Get the first entry from the Request Q */
5442 removeQ(c);
5443 h->Qdepth--;
5444
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005445 /* Put job onto the completed Q */
5446 addQ(&h->cmpQ, c);
Matt Gatese16a33a2012-05-01 11:43:11 -05005447
5448 /* Must increment commands_outstanding before unlocking
5449 * and submitting to avoid race checking for fifo full
5450 * condition.
5451 */
5452 h->commands_outstanding++;
Matt Gatese16a33a2012-05-01 11:43:11 -05005453
5454 /* Tell the controller execute command */
Stephen M. Cameron0b570752014-05-29 10:53:28 -05005455 spin_unlock_irqrestore(&h->lock, *flags);
Matt Gatese16a33a2012-05-01 11:43:11 -05005456 h->access.submit_command(h, c);
Stephen M. Cameron0b570752014-05-29 10:53:28 -05005457 spin_lock_irqsave(&h->lock, *flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005458 }
Stephen M. Cameron0b570752014-05-29 10:53:28 -05005459}
5460
5461static void lock_and_start_io(struct ctlr_info *h)
5462{
5463 unsigned long flags;
5464
5465 spin_lock_irqsave(&h->lock, flags);
5466 start_io(h, &flags);
Matt Gatese16a33a2012-05-01 11:43:11 -05005467 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005468}
5469
Matt Gates254f7962012-05-01 11:43:06 -05005470static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005471{
Matt Gates254f7962012-05-01 11:43:06 -05005472 return h->access.command_completed(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005473}
5474
Stephen M. Cameron900c5442010-02-04 08:42:35 -06005475static inline bool interrupt_pending(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005476{
5477 return h->access.intr_pending(h);
5478}
5479
5480static inline long interrupt_not_for_us(struct ctlr_info *h)
5481{
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005482 return (h->access.intr_pending(h) == 0) ||
5483 (h->interrupts_enabled == 0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005484}
5485
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06005486static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5487 u32 raw_tag)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005488{
5489 if (unlikely(tag_index >= h->nr_cmds)) {
5490 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5491 return 1;
5492 }
5493 return 0;
5494}
5495
Stephen M. Cameron5a3d16f2012-05-01 11:42:46 -05005496static inline void finish_cmd(struct CommandList *c)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005497{
Matt Gatese16a33a2012-05-01 11:43:11 -05005498 unsigned long flags;
Stephen M. Cameron396883e2013-09-23 13:34:17 -05005499 int io_may_be_stalled = 0;
5500 struct ctlr_info *h = c->h;
Matt Gatese16a33a2012-05-01 11:43:11 -05005501
Stephen M. Cameron396883e2013-09-23 13:34:17 -05005502 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005503 removeQ(c);
Stephen M. Cameron396883e2013-09-23 13:34:17 -05005504
5505 /*
5506 * Check for possibly stalled i/o.
5507 *
5508 * If a fifo_full condition is encountered, requests will back up
5509 * in h->reqQ. This queue is only emptied out by start_io which is
5510 * only called when a new i/o request comes in. If no i/o's are
5511 * forthcoming, the i/o's in h->reqQ can get stuck. So we call
5512 * start_io from here if we detect such a danger.
5513 *
5514 * Normally, we shouldn't hit this case, but pounding on the
5515 * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if
5516 * commands_outstanding is low. We want to avoid calling
5517 * start_io from in here as much as possible, and esp. don't
5518 * want to get in a cycle where we call start_io every time
5519 * through here.
5520 */
5521 if (unlikely(h->fifo_recently_full) &&
5522 h->commands_outstanding < 5)
5523 io_may_be_stalled = 1;
5524
5525 spin_unlock_irqrestore(&h->lock, flags);
5526
Stephen M. Camerone85c5972012-05-01 11:43:42 -05005527 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
Scott Teelc3497752014-02-18 13:56:34 -06005528 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5529 || c->cmd_type == CMD_IOACCEL2))
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05005530 complete_scsi_command(c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005531 else if (c->cmd_type == CMD_IOCTL_PEND)
5532 complete(c->waiting);
Stephen M. Cameron396883e2013-09-23 13:34:17 -05005533 if (unlikely(io_may_be_stalled))
Stephen M. Cameron0b570752014-05-29 10:53:28 -05005534 lock_and_start_io(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005535}
5536
Stephen M. Camerona104c992010-02-04 08:42:24 -06005537static inline u32 hpsa_tag_contains_index(u32 tag)
5538{
Stephen M. Camerona104c992010-02-04 08:42:24 -06005539 return tag & DIRECT_LOOKUP_BIT;
5540}
5541
5542static inline u32 hpsa_tag_to_index(u32 tag)
5543{
Stephen M. Camerona104c992010-02-04 08:42:24 -06005544 return tag >> DIRECT_LOOKUP_SHIFT;
5545}
5546
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005547
5548static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
Stephen M. Camerona104c992010-02-04 08:42:24 -06005549{
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005550#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5551#define HPSA_SIMPLE_ERROR_BITS 0x03
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06005552 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005553 return tag & ~HPSA_SIMPLE_ERROR_BITS;
5554 return tag & ~HPSA_PERF_ERROR_BITS;
Stephen M. Camerona104c992010-02-04 08:42:24 -06005555}
5556
Don Brace303932f2010-02-04 08:42:40 -06005557/* process completion of an indexed ("direct lookup") command */
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005558static inline void process_indexed_cmd(struct ctlr_info *h,
Don Brace303932f2010-02-04 08:42:40 -06005559 u32 raw_tag)
5560{
5561 u32 tag_index;
5562 struct CommandList *c;
5563
5564 tag_index = hpsa_tag_to_index(raw_tag);
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005565 if (!bad_tag(h, tag_index, raw_tag)) {
5566 c = h->cmd_pool + tag_index;
5567 finish_cmd(c);
5568 }
Don Brace303932f2010-02-04 08:42:40 -06005569}
5570
5571/* process completion of a non-indexed command */
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005572static inline void process_nonindexed_cmd(struct ctlr_info *h,
Don Brace303932f2010-02-04 08:42:40 -06005573 u32 raw_tag)
5574{
5575 u32 tag;
5576 struct CommandList *c = NULL;
Matt Gatese16a33a2012-05-01 11:43:11 -05005577 unsigned long flags;
Don Brace303932f2010-02-04 08:42:40 -06005578
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005579 tag = hpsa_tag_discard_error_bits(h, raw_tag);
Matt Gatese16a33a2012-05-01 11:43:11 -05005580 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06005581 list_for_each_entry(c, &h->cmpQ, list) {
Don Brace303932f2010-02-04 08:42:40 -06005582 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
Matt Gatese16a33a2012-05-01 11:43:11 -05005583 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron5a3d16f2012-05-01 11:42:46 -05005584 finish_cmd(c);
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005585 return;
Don Brace303932f2010-02-04 08:42:40 -06005586 }
5587 }
Matt Gatese16a33a2012-05-01 11:43:11 -05005588 spin_unlock_irqrestore(&h->lock, flags);
Don Brace303932f2010-02-04 08:42:40 -06005589 bad_tag(h, h->nr_cmds + 1, raw_tag);
Don Brace303932f2010-02-04 08:42:40 -06005590}
5591
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005592/* Some controllers, like p400, will give us one interrupt
5593 * after a soft reset, even if we turned interrupts off.
5594 * Only need to check for this in the hpsa_xxx_discard_completions
5595 * functions.
5596 */
5597static int ignore_bogus_interrupt(struct ctlr_info *h)
5598{
5599 if (likely(!reset_devices))
5600 return 0;
5601
5602 if (likely(h->interrupts_enabled))
5603 return 0;
5604
5605 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5606 "(known firmware bug.) Ignoring.\n");
5607
5608 return 1;
5609}
5610
Matt Gates254f7962012-05-01 11:43:06 -05005611/*
5612 * Convert &h->q[x] (passed to interrupt handlers) back to h.
5613 * Relies on (h-q[x] == x) being true for x such that
5614 * 0 <= x < MAX_REPLY_QUEUES.
5615 */
5616static struct ctlr_info *queue_to_hba(u8 *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005617{
Matt Gates254f7962012-05-01 11:43:06 -05005618 return container_of((queue - *queue), struct ctlr_info, q[0]);
5619}
5620
5621static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5622{
5623 struct ctlr_info *h = queue_to_hba(queue);
5624 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005625 u32 raw_tag;
5626
5627 if (ignore_bogus_interrupt(h))
5628 return IRQ_NONE;
5629
5630 if (interrupt_not_for_us(h))
5631 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005632 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005633 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05005634 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005635 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05005636 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005637 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005638 return IRQ_HANDLED;
5639}
5640
Matt Gates254f7962012-05-01 11:43:06 -05005641static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005642{
Matt Gates254f7962012-05-01 11:43:06 -05005643 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005644 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05005645 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005646
5647 if (ignore_bogus_interrupt(h))
5648 return IRQ_NONE;
5649
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005650 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05005651 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005652 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05005653 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005654 return IRQ_HANDLED;
5655}
5656
Matt Gates254f7962012-05-01 11:43:06 -05005657static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005658{
Matt Gates254f7962012-05-01 11:43:06 -05005659 struct ctlr_info *h = queue_to_hba((u8 *) queue);
Don Brace303932f2010-02-04 08:42:40 -06005660 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05005661 u8 q = *(u8 *) queue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005662
5663 if (interrupt_not_for_us(h))
5664 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005665 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005666 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05005667 raw_tag = get_next_completion(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005668 while (raw_tag != FIFO_EMPTY) {
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005669 if (likely(hpsa_tag_contains_index(raw_tag)))
5670 process_indexed_cmd(h, raw_tag);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005671 else
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005672 process_nonindexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05005673 raw_tag = next_command(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005674 }
5675 }
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005676 return IRQ_HANDLED;
5677}
5678
Matt Gates254f7962012-05-01 11:43:06 -05005679static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005680{
Matt Gates254f7962012-05-01 11:43:06 -05005681 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005682 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05005683 u8 q = *(u8 *) queue;
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005684
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005685 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05005686 raw_tag = get_next_completion(h, q);
Don Brace303932f2010-02-04 08:42:40 -06005687 while (raw_tag != FIFO_EMPTY) {
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005688 if (likely(hpsa_tag_contains_index(raw_tag)))
5689 process_indexed_cmd(h, raw_tag);
Don Brace303932f2010-02-04 08:42:40 -06005690 else
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005691 process_nonindexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05005692 raw_tag = next_command(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005693 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005694 return IRQ_HANDLED;
5695}
5696
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005697/* Send a message CDB to the firmware. Careful, this only works
5698 * in simple mode, not performant mode due to the tag lookup.
5699 * We only ever use this immediately after a controller reset.
5700 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005701static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5702 unsigned char type)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005703{
5704 struct Command {
5705 struct CommandListHeader CommandHeader;
5706 struct RequestBlock Request;
5707 struct ErrDescriptor ErrorDescriptor;
5708 };
5709 struct Command *cmd;
5710 static const size_t cmd_sz = sizeof(*cmd) +
5711 sizeof(cmd->ErrorDescriptor);
5712 dma_addr_t paddr64;
5713 uint32_t paddr32, tag;
5714 void __iomem *vaddr;
5715 int i, err;
5716
5717 vaddr = pci_ioremap_bar(pdev, 0);
5718 if (vaddr == NULL)
5719 return -ENOMEM;
5720
5721 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5722 * CCISS commands, so they must be allocated from the lower 4GiB of
5723 * memory.
5724 */
5725 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5726 if (err) {
5727 iounmap(vaddr);
5728 return -ENOMEM;
5729 }
5730
5731 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
5732 if (cmd == NULL) {
5733 iounmap(vaddr);
5734 return -ENOMEM;
5735 }
5736
5737 /* This must fit, because of the 32-bit consistent DMA mask. Also,
5738 * although there's no guarantee, we assume that the address is at
5739 * least 4-byte aligned (most likely, it's page-aligned).
5740 */
5741 paddr32 = paddr64;
5742
5743 cmd->CommandHeader.ReplyQueue = 0;
5744 cmd->CommandHeader.SGList = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005745 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
5746 cmd->CommandHeader.tag = paddr32;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005747 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5748
5749 cmd->Request.CDBLen = 16;
5750 cmd->Request.Type.Type = TYPE_MSG;
5751 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
5752 cmd->Request.Type.Direction = XFER_NONE;
5753 cmd->Request.Timeout = 0; /* Don't time out */
5754 cmd->Request.CDB[0] = opcode;
5755 cmd->Request.CDB[1] = type;
5756 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06005757 cmd->ErrorDescriptor.Addr =
5758 cpu_to_le64((paddr32 + sizeof(*cmd)));
5759 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005760
5761 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
5762
5763 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5764 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005765 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005766 break;
5767 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5768 }
5769
5770 iounmap(vaddr);
5771
5772 /* we leak the DMA buffer here ... no choice since the controller could
5773 * still complete the command.
5774 */
5775 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
5776 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
5777 opcode, type);
5778 return -ETIMEDOUT;
5779 }
5780
5781 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
5782
5783 if (tag & HPSA_ERROR_BIT) {
5784 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
5785 opcode, type);
5786 return -EIO;
5787 }
5788
5789 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
5790 opcode, type);
5791 return 0;
5792}
5793
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005794#define hpsa_noop(p) hpsa_message(p, 3, 0)
5795
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005796static int hpsa_controller_hard_reset(struct pci_dev *pdev,
Don Brace42a91642014-11-14 17:26:27 -06005797 void __iomem *vaddr, u32 use_doorbell)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005798{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005799 u16 pmcsr;
5800 int pos;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005801
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005802 if (use_doorbell) {
5803 /* For everything after the P600, the PCI power state method
5804 * of resetting the controller doesn't work, so we have this
5805 * other way using the doorbell register.
5806 */
5807 dev_info(&pdev->dev, "using doorbell to reset controller\n");
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005808 writel(use_doorbell, vaddr + SA5_DOORBELL);
Stephen M. Cameron85009232013-09-23 13:33:36 -05005809
Justin Lindley00701a92014-05-29 10:52:47 -05005810 /* PMC hardware guys tell us we need a 10 second delay after
Stephen M. Cameron85009232013-09-23 13:33:36 -05005811 * doorbell reset and before any attempt to talk to the board
5812 * at all to ensure that this actually works and doesn't fall
5813 * over in some weird corner cases.
5814 */
Justin Lindley00701a92014-05-29 10:52:47 -05005815 msleep(10000);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005816 } else { /* Try to do it the PCI power state way */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005817
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005818 /* Quoting from the Open CISS Specification: "The Power
5819 * Management Control/Status Register (CSR) controls the power
5820 * state of the device. The normal operating state is D0,
5821 * CSR=00h. The software off state is D3, CSR=03h. To reset
5822 * the controller, place the interface device in D3 then to D0,
5823 * this causes a secondary PCI reset which will reset the
5824 * controller." */
5825
5826 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
5827 if (pos == 0) {
5828 dev_err(&pdev->dev,
5829 "hpsa_reset_controller: "
5830 "PCI PM not supported\n");
5831 return -ENODEV;
5832 }
5833 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5834 /* enter the D3hot power management state */
5835 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
5836 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5837 pmcsr |= PCI_D3hot;
5838 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
5839
5840 msleep(500);
5841
5842 /* enter the D0 power management state */
5843 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5844 pmcsr |= PCI_D0;
5845 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
Mike Millerc4853ef2011-10-21 08:19:43 +02005846
5847 /*
5848 * The P600 requires a small delay when changing states.
5849 * Otherwise we may think the board did not reset and we bail.
5850 * This for kdump only and is particular to the P600.
5851 */
5852 msleep(500);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005853 }
5854 return 0;
5855}
5856
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005857static void init_driver_version(char *driver_version, int len)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005858{
5859 memset(driver_version, 0, len);
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06005860 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005861}
5862
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005863static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005864{
5865 char *driver_version;
5866 int i, size = sizeof(cfgtable->driver_version);
5867
5868 driver_version = kmalloc(size, GFP_KERNEL);
5869 if (!driver_version)
5870 return -ENOMEM;
5871
5872 init_driver_version(driver_version, size);
5873 for (i = 0; i < size; i++)
5874 writeb(driver_version[i], &cfgtable->driver_version[i]);
5875 kfree(driver_version);
5876 return 0;
5877}
5878
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005879static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
5880 unsigned char *driver_ver)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005881{
5882 int i;
5883
5884 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
5885 driver_ver[i] = readb(&cfgtable->driver_version[i]);
5886}
5887
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005888static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005889{
5890
5891 char *driver_ver, *old_driver_ver;
5892 int rc, size = sizeof(cfgtable->driver_version);
5893
5894 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
5895 if (!old_driver_ver)
5896 return -ENOMEM;
5897 driver_ver = old_driver_ver + size;
5898
5899 /* After a reset, the 32 bytes of "driver version" in the cfgtable
5900 * should have been changed, otherwise we know the reset failed.
5901 */
5902 init_driver_version(old_driver_ver, size);
5903 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
5904 rc = !memcmp(driver_ver, old_driver_ver, size);
5905 kfree(old_driver_ver);
5906 return rc;
5907}
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005908/* This does a hard reset of the controller using PCI power management
5909 * states or the using the doorbell register.
5910 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005911static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005912{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005913 u64 cfg_offset;
5914 u32 cfg_base_addr;
5915 u64 cfg_base_addr_index;
5916 void __iomem *vaddr;
5917 unsigned long paddr;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005918 u32 misc_fw_support;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005919 int rc;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005920 struct CfgTable __iomem *cfgtable;
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005921 u32 use_doorbell;
Stephen M. Cameron18867652010-06-16 13:51:45 -05005922 u32 board_id;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005923 u16 command_register;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005924
5925 /* For controllers as old as the P600, this is very nearly
5926 * the same thing as
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005927 *
5928 * pci_save_state(pci_dev);
5929 * pci_set_power_state(pci_dev, PCI_D3hot);
5930 * pci_set_power_state(pci_dev, PCI_D0);
5931 * pci_restore_state(pci_dev);
5932 *
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005933 * For controllers newer than the P600, the pci power state
5934 * method of resetting doesn't work so we have another way
5935 * using the doorbell register.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005936 */
Stephen M. Cameron18867652010-06-16 13:51:45 -05005937
Stephen M. Cameron25c1e56a2011-01-06 14:48:18 -06005938 rc = hpsa_lookup_board_id(pdev, &board_id);
Stephen M. Cameron46380782011-05-03 15:00:01 -05005939 if (rc < 0 || !ctlr_is_resettable(board_id)) {
Stephen M. Cameron25c1e56a2011-01-06 14:48:18 -06005940 dev_warn(&pdev->dev, "Not resetting device.\n");
5941 return -ENODEV;
5942 }
Stephen M. Cameron46380782011-05-03 15:00:01 -05005943
5944 /* if controller is soft- but not hard resettable... */
5945 if (!ctlr_is_hard_resettable(board_id))
5946 return -ENOTSUPP; /* try soft reset later. */
Stephen M. Cameron18867652010-06-16 13:51:45 -05005947
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005948 /* Save the PCI command register */
5949 pci_read_config_word(pdev, 4, &command_register);
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005950 pci_save_state(pdev);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005951
5952 /* find the first memory BAR, so we can find the cfg table */
5953 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
5954 if (rc)
5955 return rc;
5956 vaddr = remap_pci_mem(paddr, 0x250);
5957 if (!vaddr)
5958 return -ENOMEM;
5959
5960 /* find cfgtable in order to check if reset via doorbell is supported */
5961 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
5962 &cfg_base_addr_index, &cfg_offset);
5963 if (rc)
5964 goto unmap_vaddr;
5965 cfgtable = remap_pci_mem(pci_resource_start(pdev,
5966 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
5967 if (!cfgtable) {
5968 rc = -ENOMEM;
5969 goto unmap_vaddr;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005970 }
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005971 rc = write_driver_ver_to_cfgtable(cfgtable);
5972 if (rc)
5973 goto unmap_vaddr;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005974
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005975 /* If reset via doorbell register is supported, use that.
5976 * There are two such methods. Favor the newest method.
5977 */
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005978 misc_fw_support = readl(&cfgtable->misc_fw_support);
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005979 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
5980 if (use_doorbell) {
5981 use_doorbell = DOORBELL_CTLR_RESET2;
5982 } else {
5983 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
5984 if (use_doorbell) {
Mike Millerfba63092011-10-13 11:44:06 -05005985 dev_warn(&pdev->dev, "Soft reset not supported. "
5986 "Firmware update is required.\n");
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005987 rc = -ENOTSUPP; /* try soft reset */
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005988 goto unmap_cfgtable;
5989 }
5990 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005991
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005992 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
5993 if (rc)
5994 goto unmap_cfgtable;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005995
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005996 pci_restore_state(pdev);
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005997 pci_write_config_word(pdev, 4, command_register);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005998
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005999 /* Some devices (notably the HP Smart Array 5i Controller)
6000 need a little pause here */
6001 msleep(HPSA_POST_RESET_PAUSE_MSECS);
6002
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006003 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6004 if (rc) {
6005 dev_warn(&pdev->dev,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006006 "failed waiting for board to become ready "
6007 "after hard reset\n");
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006008 goto unmap_cfgtable;
6009 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006010
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006011 rc = controller_reset_failed(vaddr);
6012 if (rc < 0)
6013 goto unmap_cfgtable;
6014 if (rc) {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006015 dev_warn(&pdev->dev, "Unable to successfully reset "
6016 "controller. Will try soft reset.\n");
6017 rc = -ENOTSUPP;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006018 } else {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006019 dev_info(&pdev->dev, "board ready after hard reset.\n");
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006020 }
6021
6022unmap_cfgtable:
6023 iounmap(cfgtable);
6024
6025unmap_vaddr:
6026 iounmap(vaddr);
6027 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006028}
6029
6030/*
6031 * We cannot read the structure directly, for portability we must use
6032 * the io functions.
6033 * This is for debug only.
6034 */
Don Brace42a91642014-11-14 17:26:27 -06006035static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006036{
Stephen M. Cameron58f86652010-05-27 15:13:58 -05006037#ifdef HPSA_DEBUG
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006038 int i;
6039 char temp_name[17];
6040
6041 dev_info(dev, "Controller Configuration information\n");
6042 dev_info(dev, "------------------------------------\n");
6043 for (i = 0; i < 4; i++)
6044 temp_name[i] = readb(&(tb->Signature[i]));
6045 temp_name[4] = '\0';
6046 dev_info(dev, " Signature = %s\n", temp_name);
6047 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
6048 dev_info(dev, " Transport methods supported = 0x%x\n",
6049 readl(&(tb->TransportSupport)));
6050 dev_info(dev, " Transport methods active = 0x%x\n",
6051 readl(&(tb->TransportActive)));
6052 dev_info(dev, " Requested transport Method = 0x%x\n",
6053 readl(&(tb->HostWrite.TransportRequest)));
6054 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
6055 readl(&(tb->HostWrite.CoalIntDelay)));
6056 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
6057 readl(&(tb->HostWrite.CoalIntCount)));
6058 dev_info(dev, " Max outstanding commands = 0x%d\n",
6059 readl(&(tb->CmdsOutMax)));
6060 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6061 for (i = 0; i < 16; i++)
6062 temp_name[i] = readb(&(tb->ServerName[i]));
6063 temp_name[16] = '\0';
6064 dev_info(dev, " Server Name = %s\n", temp_name);
6065 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6066 readl(&(tb->HeartBeat)));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006067#endif /* HPSA_DEBUG */
Stephen M. Cameron58f86652010-05-27 15:13:58 -05006068}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006069
6070static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6071{
6072 int i, offset, mem_type, bar_type;
6073
6074 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6075 return 0;
6076 offset = 0;
6077 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6078 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6079 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6080 offset += 4;
6081 else {
6082 mem_type = pci_resource_flags(pdev, i) &
6083 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6084 switch (mem_type) {
6085 case PCI_BASE_ADDRESS_MEM_TYPE_32:
6086 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6087 offset += 4; /* 32 bit */
6088 break;
6089 case PCI_BASE_ADDRESS_MEM_TYPE_64:
6090 offset += 8;
6091 break;
6092 default: /* reserved in PCI 2.2 */
6093 dev_warn(&pdev->dev,
6094 "base address is invalid\n");
6095 return -1;
6096 break;
6097 }
6098 }
6099 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6100 return i + 1;
6101 }
6102 return -1;
6103}
6104
6105/* If MSI/MSI-X is supported by the kernel we will try to enable it on
6106 * controllers that are capable. If not, we use IO-APIC mode.
6107 */
6108
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006109static void hpsa_interrupt_mode(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006110{
6111#ifdef CONFIG_PCI_MSI
Matt Gates254f7962012-05-01 11:43:06 -05006112 int err, i;
6113 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6114
6115 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6116 hpsa_msix_entries[i].vector = 0;
6117 hpsa_msix_entries[i].entry = i;
6118 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006119
6120 /* Some boards advertise MSI but don't really support it */
Stephen M. Cameron6b3f4c52010-05-27 15:13:02 -05006121 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6122 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006123 goto default_int_mode;
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006124 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
6125 dev_info(&h->pdev->dev, "MSIX\n");
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006126 h->msix_vector = MAX_REPLY_QUEUES;
Stephen M. Cameronf89439b2014-05-29 10:53:02 -05006127 if (h->msix_vector > num_online_cpus())
6128 h->msix_vector = num_online_cpus();
Alexander Gordeev18fce3c2014-08-18 08:01:42 +02006129 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
6130 1, h->msix_vector);
6131 if (err < 0) {
6132 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
6133 h->msix_vector = 0;
6134 goto single_msi_mode;
6135 } else if (err < h->msix_vector) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006136 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006137 "available\n", err);
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006138 }
Alexander Gordeev18fce3c2014-08-18 08:01:42 +02006139 h->msix_vector = err;
6140 for (i = 0; i < h->msix_vector; i++)
6141 h->intr[i] = hpsa_msix_entries[i].vector;
6142 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006143 }
Alexander Gordeev18fce3c2014-08-18 08:01:42 +02006144single_msi_mode:
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006145 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
6146 dev_info(&h->pdev->dev, "MSI\n");
6147 if (!pci_enable_msi(h->pdev))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006148 h->msi_vector = 1;
6149 else
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006150 dev_warn(&h->pdev->dev, "MSI init failed\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006151 }
6152default_int_mode:
6153#endif /* CONFIG_PCI_MSI */
6154 /* if we get here we're going to use the default interrupt mode */
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006155 h->intr[h->intr_mode] = h->pdev->irq;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006156}
6157
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006158static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006159{
6160 int i;
6161 u32 subsystem_vendor_id, subsystem_device_id;
6162
6163 subsystem_vendor_id = pdev->subsystem_vendor;
6164 subsystem_device_id = pdev->subsystem_device;
6165 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6166 subsystem_vendor_id;
6167
6168 for (i = 0; i < ARRAY_SIZE(products); i++)
6169 if (*board_id == products[i].board_id)
6170 return i;
6171
Stephen M. Cameron6798cc02010-06-16 13:51:20 -05006172 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6173 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6174 !hpsa_allow_any) {
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006175 dev_warn(&pdev->dev, "unrecognized board ID: "
6176 "0x%08x, ignoring.\n", *board_id);
6177 return -ENODEV;
6178 }
6179 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6180}
6181
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006182static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6183 unsigned long *memory_bar)
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006184{
6185 int i;
6186
6187 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006188 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006189 /* addressing mode bits already removed */
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006190 *memory_bar = pci_resource_start(pdev, i);
6191 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006192 *memory_bar);
6193 return 0;
6194 }
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006195 dev_warn(&pdev->dev, "no memory BAR found\n");
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006196 return -ENODEV;
6197}
6198
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006199static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6200 int wait_for_ready)
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006201{
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006202 int i, iterations;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006203 u32 scratchpad;
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006204 if (wait_for_ready)
6205 iterations = HPSA_BOARD_READY_ITERATIONS;
6206 else
6207 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006208
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006209 for (i = 0; i < iterations; i++) {
6210 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6211 if (wait_for_ready) {
6212 if (scratchpad == HPSA_FIRMWARE_READY)
6213 return 0;
6214 } else {
6215 if (scratchpad != HPSA_FIRMWARE_READY)
6216 return 0;
6217 }
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006218 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6219 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006220 dev_warn(&pdev->dev, "board not ready, timed out.\n");
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006221 return -ENODEV;
6222}
6223
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006224static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6225 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6226 u64 *cfg_offset)
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006227{
6228 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6229 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6230 *cfg_base_addr &= (u32) 0x0000ffff;
6231 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6232 if (*cfg_base_addr_index == -1) {
6233 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6234 return -ENODEV;
6235 }
6236 return 0;
6237}
6238
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006239static int hpsa_find_cfgtables(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006240{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06006241 u64 cfg_offset;
6242 u32 cfg_base_addr;
6243 u64 cfg_base_addr_index;
Don Brace303932f2010-02-04 08:42:40 -06006244 u32 trans_offset;
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006245 int rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006246
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006247 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6248 &cfg_base_addr_index, &cfg_offset);
6249 if (rc)
6250 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006251 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006252 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006253 if (!h->cfgtable)
6254 return -ENOMEM;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006255 rc = write_driver_ver_to_cfgtable(h->cfgtable);
6256 if (rc)
6257 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006258 /* Find performant mode table. */
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006259 trans_offset = readl(&h->cfgtable->TransMethodOffset);
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006260 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6261 cfg_base_addr_index)+cfg_offset+trans_offset,
6262 sizeof(*h->transtable));
6263 if (!h->transtable)
6264 return -ENOMEM;
6265 return 0;
6266}
6267
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006268static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05006269{
6270 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
Stephen M. Cameron72ceeae2011-01-06 14:48:13 -06006271
6272 /* Limit commands in memory limited kdump scenario. */
6273 if (reset_devices && h->max_commands > 32)
6274 h->max_commands = 32;
6275
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05006276 if (h->max_commands < 16) {
6277 dev_warn(&h->pdev->dev, "Controller reports "
6278 "max supported commands of %d, an obvious lie. "
6279 "Using 16. Ensure that firmware is up to date.\n",
6280 h->max_commands);
6281 h->max_commands = 16;
6282 }
6283}
6284
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006285/* Interrogate the hardware for some limits:
6286 * max commands, max SG elements without chaining, and with chaining,
6287 * SG chain block size, etc.
6288 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006289static void hpsa_find_board_params(struct ctlr_info *h)
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006290{
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05006291 hpsa_get_max_perf_mode_cmds(h);
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006292 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
6293 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006294 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006295 /*
6296 * Limit in-command s/g elements to 32 save dma'able memory.
6297 * Howvever spec says if 0, use 31
6298 */
6299 h->max_cmd_sg_entries = 31;
6300 if (h->maxsgentries > 512) {
6301 h->max_cmd_sg_entries = 32;
Webb Scales1a63ea62014-11-14 17:26:43 -06006302 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006303 h->maxsgentries--; /* save one for chain pointer */
6304 } else {
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006305 h->chainsize = 0;
Webb Scales1a63ea62014-11-14 17:26:43 -06006306 h->maxsgentries = 31; /* default to traditional values */
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006307 }
Stephen M. Cameron75167d22012-05-01 11:42:51 -05006308
6309 /* Find out what task management functions are supported and cache */
6310 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
Scott Teel0e7a7fc2014-02-18 13:55:59 -06006311 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6312 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6313 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6314 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006315}
6316
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05006317static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
6318{
Akinobu Mita0fc9fd42012-04-04 22:14:59 +09006319 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05006320 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
6321 return false;
6322 }
6323 return true;
6324}
6325
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006326static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05006327{
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006328 u32 driver_support;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05006329
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006330 driver_support = readl(&(h->cfgtable->driver_support));
Arnd Bergmann0b9e7b72014-06-26 15:44:52 +02006331 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6332#ifdef CONFIG_X86
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006333 driver_support |= ENABLE_SCSI_PREFETCH;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05006334#endif
Stephen M. Cameron28e13442013-12-04 17:10:21 -06006335 driver_support |= ENABLE_UNIT_ATTN;
6336 writel(driver_support, &(h->cfgtable->driver_support));
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05006337}
6338
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05006339/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
6340 * in a prefetch beyond physical memory.
6341 */
6342static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
6343{
6344 u32 dma_prefetch;
6345
6346 if (h->board_id != 0x3225103C)
6347 return;
6348 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
6349 dma_prefetch |= 0x8000;
6350 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
6351}
6352
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006353static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
6354{
6355 int i;
6356 u32 doorbell_value;
6357 unsigned long flags;
6358 /* wait until the clear_event_notify bit 6 is cleared by controller. */
6359 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6360 spin_lock_irqsave(&h->lock, flags);
6361 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6362 spin_unlock_irqrestore(&h->lock, flags);
6363 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
6364 break;
6365 /* delay and try again */
6366 msleep(20);
6367 }
6368}
6369
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006370static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006371{
6372 int i;
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06006373 u32 doorbell_value;
6374 unsigned long flags;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006375
6376 /* under certain very rare conditions, this can take awhile.
6377 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6378 * as we enter this code.)
6379 */
6380 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06006381 spin_lock_irqsave(&h->lock, flags);
6382 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6383 spin_unlock_irqrestore(&h->lock, flags);
Dan Carpenter382be662011-02-15 15:33:13 -06006384 if (!(doorbell_value & CFGTBL_ChangeReq))
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006385 break;
6386 /* delay and try again */
Stephen M. Cameron60d3f5b2011-01-06 14:48:34 -06006387 usleep_range(10000, 20000);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006388 }
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05006389}
6390
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006391static int hpsa_enter_simple_mode(struct ctlr_info *h)
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05006392{
6393 u32 trans_support;
6394
6395 trans_support = readl(&(h->cfgtable->TransportSupport));
6396 if (!(trans_support & SIMPLE_MODE))
6397 return -ENOTSUPP;
6398
6399 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006400
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05006401 /* Update the field, and then ring the doorbell */
6402 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06006403 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05006404 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6405 hpsa_wait_for_mode_change_ack(h);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006406 print_cfg_table(&h->pdev->dev, h->cfgtable);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006407 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6408 goto error;
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06006409 h->transMethod = CFGTBL_Trans_Simple;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006410 return 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006411error:
6412 dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
6413 return -ENODEV;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006414}
6415
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006416static int hpsa_pci_init(struct ctlr_info *h)
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006417{
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006418 int prod_index, err;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006419
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006420 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6421 if (prod_index < 0)
6422 return -ENODEV;
6423 h->product_name = products[prod_index].product_name;
6424 h->access = *(products[prod_index].access);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006425
Matthew Garrette5a44df2011-11-11 11:14:23 -05006426 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6427 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6428
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006429 err = pci_enable_device(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006430 if (err) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006431 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006432 return err;
6433 }
6434
Stephen M. Cameron5cb460a2012-05-01 11:42:20 -05006435 /* Enable bus mastering (pci_disable_device may disable this) */
6436 pci_set_master(h->pdev);
6437
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06006438 err = pci_request_regions(h->pdev, HPSA);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006439 if (err) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006440 dev_err(&h->pdev->dev,
6441 "cannot obtain PCI resources, aborting\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006442 return err;
6443 }
Stephen M. Cameron6b3f4c52010-05-27 15:13:02 -05006444 hpsa_interrupt_mode(h);
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006445 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006446 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006447 goto err_out_free_res;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006448 h->vaddr = remap_pci_mem(h->paddr, 0x250);
Stephen M. Cameron204892e2010-05-27 15:13:22 -05006449 if (!h->vaddr) {
6450 err = -ENOMEM;
6451 goto err_out_free_res;
6452 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006453 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006454 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006455 goto err_out_free_res;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006456 err = hpsa_find_cfgtables(h);
6457 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006458 goto err_out_free_res;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006459 hpsa_find_board_params(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006460
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05006461 if (!hpsa_CISS_signature_present(h)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006462 err = -ENODEV;
6463 goto err_out_free_res;
6464 }
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006465 hpsa_set_driver_support_bits(h);
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05006466 hpsa_p600_dma_prefetch_quirk(h);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006467 err = hpsa_enter_simple_mode(h);
6468 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006469 goto err_out_free_res;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006470 return 0;
6471
6472err_out_free_res:
Stephen M. Cameron204892e2010-05-27 15:13:22 -05006473 if (h->transtable)
6474 iounmap(h->transtable);
6475 if (h->cfgtable)
6476 iounmap(h->cfgtable);
6477 if (h->vaddr)
6478 iounmap(h->vaddr);
Stephen M. Cameronf0bd0b682012-05-01 11:42:09 -05006479 pci_disable_device(h->pdev);
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006480 pci_release_regions(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006481 return err;
6482}
6483
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006484static void hpsa_hba_inquiry(struct ctlr_info *h)
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06006485{
6486 int rc;
6487
6488#define HBA_INQUIRY_BYTE_COUNT 64
6489 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6490 if (!h->hba_inquiry_data)
6491 return;
6492 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6493 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6494 if (rc != 0) {
6495 kfree(h->hba_inquiry_data);
6496 h->hba_inquiry_data = NULL;
6497 }
6498}
6499
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006500static int hpsa_init_reset_devices(struct pci_dev *pdev)
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006501{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006502 int rc, i;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006503
6504 if (!reset_devices)
6505 return 0;
6506
Tomas Henzl132aa222014-08-14 16:12:39 +02006507 /* kdump kernel is loading, we don't know in which state is
6508 * the pci interface. The dev->enable_cnt is equal zero
6509 * so we call enable+disable, wait a while and switch it on.
6510 */
6511 rc = pci_enable_device(pdev);
6512 if (rc) {
6513 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
6514 return -ENODEV;
6515 }
6516 pci_disable_device(pdev);
6517 msleep(260); /* a randomly chosen number */
6518 rc = pci_enable_device(pdev);
6519 if (rc) {
6520 dev_warn(&pdev->dev, "failed to enable device.\n");
6521 return -ENODEV;
6522 }
Tomas Henzl859c75a2014-09-12 14:44:15 +02006523 pci_set_master(pdev);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006524 /* Reset the controller with a PCI power-cycle or via doorbell */
6525 rc = hpsa_kdump_hard_reset_controller(pdev);
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006526
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006527 /* -ENOTSUPP here means we cannot reset the controller
6528 * but it's already (and still) up and running in
Stephen M. Cameron18867652010-06-16 13:51:45 -05006529 * "performant mode". Or, it might be 640x, which can't reset
6530 * due to concerns about shared bbwc between 6402/6404 pair.
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006531 */
Tomas Henzl132aa222014-08-14 16:12:39 +02006532 if (rc) {
6533 if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
6534 rc = -ENODEV;
6535 goto out_disable;
6536 }
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006537
6538 /* Now try to get the controller to respond to a no-op */
Stephen M. Cameron2b870cb2011-05-03 14:59:36 -05006539 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006540 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6541 if (hpsa_noop(pdev) == 0)
6542 break;
6543 else
6544 dev_warn(&pdev->dev, "no-op failed%s\n",
6545 (i < 11 ? "; re-trying" : ""));
6546 }
Tomas Henzl132aa222014-08-14 16:12:39 +02006547
6548out_disable:
6549
6550 pci_disable_device(pdev);
6551 return rc;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006552}
6553
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006554static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006555{
6556 h->cmd_pool_bits = kzalloc(
6557 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6558 sizeof(unsigned long), GFP_KERNEL);
6559 h->cmd_pool = pci_alloc_consistent(h->pdev,
6560 h->nr_cmds * sizeof(*h->cmd_pool),
6561 &(h->cmd_pool_dhandle));
6562 h->errinfo_pool = pci_alloc_consistent(h->pdev,
6563 h->nr_cmds * sizeof(*h->errinfo_pool),
6564 &(h->errinfo_pool_dhandle));
6565 if ((h->cmd_pool_bits == NULL)
6566 || (h->cmd_pool == NULL)
6567 || (h->errinfo_pool == NULL)) {
6568 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6569 return -ENOMEM;
6570 }
6571 return 0;
6572}
6573
6574static void hpsa_free_cmd_pool(struct ctlr_info *h)
6575{
6576 kfree(h->cmd_pool_bits);
6577 if (h->cmd_pool)
6578 pci_free_consistent(h->pdev,
6579 h->nr_cmds * sizeof(struct CommandList),
6580 h->cmd_pool, h->cmd_pool_dhandle);
Stephen M. Cameronaca90122014-02-18 13:56:14 -06006581 if (h->ioaccel2_cmd_pool)
6582 pci_free_consistent(h->pdev,
6583 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6584 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006585 if (h->errinfo_pool)
6586 pci_free_consistent(h->pdev,
6587 h->nr_cmds * sizeof(struct ErrorInfo),
6588 h->errinfo_pool,
6589 h->errinfo_pool_dhandle);
Matt Gatese1f7de02014-02-18 13:55:17 -06006590 if (h->ioaccel_cmd_pool)
6591 pci_free_consistent(h->pdev,
6592 h->nr_cmds * sizeof(struct io_accel1_cmd),
6593 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006594}
6595
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05006596static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6597{
6598 int i, cpu, rc;
6599
6600 cpu = cpumask_first(cpu_online_mask);
6601 for (i = 0; i < h->msix_vector; i++) {
6602 rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
6603 cpu = cpumask_next(cpu, cpu_online_mask);
6604 }
6605}
6606
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05006607static int hpsa_request_irq(struct ctlr_info *h,
6608 irqreturn_t (*msixhandler)(int, void *),
6609 irqreturn_t (*intxhandler)(int, void *))
6610{
Matt Gates254f7962012-05-01 11:43:06 -05006611 int rc, i;
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05006612
Matt Gates254f7962012-05-01 11:43:06 -05006613 /*
6614 * initialize h->q[x] = x so that interrupt handlers know which
6615 * queue to process.
6616 */
6617 for (i = 0; i < MAX_REPLY_QUEUES; i++)
6618 h->q[i] = (u8) i;
6619
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006620 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
Matt Gates254f7962012-05-01 11:43:06 -05006621 /* If performant mode and MSI-X, use multiple reply queues */
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006622 for (i = 0; i < h->msix_vector; i++)
Matt Gates254f7962012-05-01 11:43:06 -05006623 rc = request_irq(h->intr[i], msixhandler,
6624 0, h->devname,
6625 &h->q[i]);
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05006626 hpsa_irq_affinity_hints(h);
Matt Gates254f7962012-05-01 11:43:06 -05006627 } else {
6628 /* Use single reply pool */
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006629 if (h->msix_vector > 0 || h->msi_vector) {
Matt Gates254f7962012-05-01 11:43:06 -05006630 rc = request_irq(h->intr[h->intr_mode],
6631 msixhandler, 0, h->devname,
6632 &h->q[h->intr_mode]);
6633 } else {
6634 rc = request_irq(h->intr[h->intr_mode],
6635 intxhandler, IRQF_SHARED, h->devname,
6636 &h->q[h->intr_mode]);
6637 }
6638 }
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05006639 if (rc) {
6640 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
6641 h->intr[h->intr_mode], h->devname);
6642 return -ENODEV;
6643 }
6644 return 0;
6645}
6646
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006647static int hpsa_kdump_soft_reset(struct ctlr_info *h)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006648{
6649 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
6650 HPSA_RESET_TYPE_CONTROLLER)) {
6651 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6652 return -EIO;
6653 }
6654
6655 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6656 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
6657 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6658 return -1;
6659 }
6660
6661 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6662 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
6663 dev_warn(&h->pdev->dev, "Board failed to become ready "
6664 "after soft reset.\n");
6665 return -1;
6666 }
6667
6668 return 0;
6669}
6670
Matt Gates254f7962012-05-01 11:43:06 -05006671static void free_irqs(struct ctlr_info *h)
6672{
6673 int i;
6674
6675 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6676 /* Single reply queue, only one irq to free */
6677 i = h->intr_mode;
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05006678 irq_set_affinity_hint(h->intr[i], NULL);
Matt Gates254f7962012-05-01 11:43:06 -05006679 free_irq(h->intr[i], &h->q[i]);
6680 return;
6681 }
6682
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05006683 for (i = 0; i < h->msix_vector; i++) {
6684 irq_set_affinity_hint(h->intr[i], NULL);
Matt Gates254f7962012-05-01 11:43:06 -05006685 free_irq(h->intr[i], &h->q[i]);
Stephen M. Cameron41b3cf02014-05-29 10:53:13 -05006686 }
Matt Gates254f7962012-05-01 11:43:06 -05006687}
6688
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05006689static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006690{
Matt Gates254f7962012-05-01 11:43:06 -05006691 free_irqs(h);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006692#ifdef CONFIG_PCI_MSI
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05006693 if (h->msix_vector) {
6694 if (h->pdev->msix_enabled)
6695 pci_disable_msix(h->pdev);
6696 } else if (h->msi_vector) {
6697 if (h->pdev->msi_enabled)
6698 pci_disable_msi(h->pdev);
6699 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006700#endif /* CONFIG_PCI_MSI */
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05006701}
6702
Stephen M. Cameron072b0512014-05-29 10:53:07 -05006703static void hpsa_free_reply_queues(struct ctlr_info *h)
6704{
6705 int i;
6706
6707 for (i = 0; i < h->nreply_queues; i++) {
6708 if (!h->reply_queue[i].head)
6709 continue;
6710 pci_free_consistent(h->pdev, h->reply_queue_size,
6711 h->reply_queue[i].head, h->reply_queue[i].busaddr);
6712 h->reply_queue[i].head = NULL;
6713 h->reply_queue[i].busaddr = 0;
6714 }
6715}
6716
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05006717static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6718{
6719 hpsa_free_irqs_and_disable_msix(h);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006720 hpsa_free_sg_chain_blocks(h);
6721 hpsa_free_cmd_pool(h);
Matt Gatese1f7de02014-02-18 13:55:17 -06006722 kfree(h->ioaccel1_blockFetchTable);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006723 kfree(h->blockFetchTable);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05006724 hpsa_free_reply_queues(h);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006725 if (h->vaddr)
6726 iounmap(h->vaddr);
6727 if (h->transtable)
6728 iounmap(h->transtable);
6729 if (h->cfgtable)
6730 iounmap(h->cfgtable);
Tomas Henzl132aa222014-08-14 16:12:39 +02006731 pci_disable_device(h->pdev);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006732 pci_release_regions(h->pdev);
6733 kfree(h);
6734}
6735
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006736/* Called when controller lockup detected. */
6737static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
6738{
6739 struct CommandList *c = NULL;
6740
6741 assert_spin_locked(&h->lock);
6742 /* Mark all outstanding commands as failed and complete them. */
6743 while (!list_empty(list)) {
6744 c = list_entry(list->next, struct CommandList, list);
6745 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
Stephen M. Cameron5a3d16f2012-05-01 11:42:46 -05006746 finish_cmd(c);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006747 }
6748}
6749
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006750static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6751{
6752 int i, cpu;
6753
6754 cpu = cpumask_first(cpu_online_mask);
6755 for (i = 0; i < num_online_cpus(); i++) {
6756 u32 *lockup_detected;
6757 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6758 *lockup_detected = value;
6759 cpu = cpumask_next(cpu, cpu_online_mask);
6760 }
6761 wmb(); /* be sure the per-cpu variables are out to memory */
6762}
6763
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006764static void controller_lockup_detected(struct ctlr_info *h)
6765{
6766 unsigned long flags;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006767 u32 lockup_detected;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006768
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006769 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6770 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006771 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6772 if (!lockup_detected) {
6773 /* no heartbeat, but controller gave us a zero. */
6774 dev_warn(&h->pdev->dev,
6775 "lockup detected but scratchpad register is zero\n");
6776 lockup_detected = 0xffffffff;
6777 }
6778 set_lockup_detected_for_all_cpus(h, lockup_detected);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006779 spin_unlock_irqrestore(&h->lock, flags);
6780 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006781 lockup_detected);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006782 pci_disable_device(h->pdev);
6783 spin_lock_irqsave(&h->lock, flags);
6784 fail_all_cmds_on_list(h, &h->cmpQ);
6785 fail_all_cmds_on_list(h, &h->reqQ);
6786 spin_unlock_irqrestore(&h->lock, flags);
6787}
6788
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006789static void detect_controller_lockup(struct ctlr_info *h)
6790{
6791 u64 now;
6792 u32 heartbeat;
6793 unsigned long flags;
6794
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006795 now = get_jiffies_64();
6796 /* If we've received an interrupt recently, we're ok. */
6797 if (time_after64(h->last_intr_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05006798 (h->heartbeat_sample_interval), now))
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006799 return;
6800
6801 /*
6802 * If we've already checked the heartbeat recently, we're ok.
6803 * This could happen if someone sends us a signal. We
6804 * otherwise don't care about signals in this thread.
6805 */
6806 if (time_after64(h->last_heartbeat_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05006807 (h->heartbeat_sample_interval), now))
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006808 return;
6809
6810 /* If heartbeat has not changed since we last looked, we're not ok. */
6811 spin_lock_irqsave(&h->lock, flags);
6812 heartbeat = readl(&h->cfgtable->HeartBeat);
6813 spin_unlock_irqrestore(&h->lock, flags);
6814 if (h->last_heartbeat == heartbeat) {
6815 controller_lockup_detected(h);
6816 return;
6817 }
6818
6819 /* We're ok. */
6820 h->last_heartbeat = heartbeat;
6821 h->last_heartbeat_timestamp = now;
6822}
6823
Stephen M. Cameron98465902014-02-21 16:25:00 -06006824static void hpsa_ack_ctlr_events(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006825{
6826 int i;
6827 char *event_type;
6828
Scott Teele863d682014-02-18 13:57:05 -06006829 /* Clear the driver-requested rescan flag */
6830 h->drv_req_rescan = 0;
6831
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006832 /* Ask the controller to clear the events we're handling. */
Stephen M. Cameron1f7cee82014-02-18 13:56:09 -06006833 if ((h->transMethod & (CFGTBL_Trans_io_accel1
6834 | CFGTBL_Trans_io_accel2)) &&
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006835 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
6836 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
6837
6838 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
6839 event_type = "state change";
6840 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
6841 event_type = "configuration change";
6842 /* Stop sending new RAID offload reqs via the IO accelerator */
6843 scsi_block_requests(h->scsi_host);
6844 for (i = 0; i < h->ndevices; i++)
6845 h->dev[i]->offload_enabled = 0;
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06006846 hpsa_drain_accel_commands(h);
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006847 /* Set 'accelerator path config change' bit */
6848 dev_warn(&h->pdev->dev,
6849 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6850 h->events, event_type);
6851 writel(h->events, &(h->cfgtable->clear_event_notify));
6852 /* Set the "clear event notify field update" bit 6 */
6853 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6854 /* Wait until ctlr clears 'clear event notify field', bit 6 */
6855 hpsa_wait_for_clear_event_notify_ack(h);
6856 scsi_unblock_requests(h->scsi_host);
6857 } else {
6858 /* Acknowledge controller notification events. */
6859 writel(h->events, &(h->cfgtable->clear_event_notify));
6860 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6861 hpsa_wait_for_clear_event_notify_ack(h);
6862#if 0
6863 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6864 hpsa_wait_for_mode_change_ack(h);
6865#endif
6866 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06006867 return;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006868}
6869
6870/* Check a register on the controller to see if there are configuration
6871 * changes (added/changed/removed logical drives, etc.) which mean that
Scott Teele863d682014-02-18 13:57:05 -06006872 * we should rescan the controller for devices.
6873 * Also check flag for driver-initiated rescan.
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006874 */
Stephen M. Cameron98465902014-02-21 16:25:00 -06006875static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006876{
Stephen M. Cameron98465902014-02-21 16:25:00 -06006877 if (h->drv_req_rescan)
6878 return 1;
6879
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006880 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
Stephen M. Cameron98465902014-02-21 16:25:00 -06006881 return 0;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006882
6883 h->events = readl(&(h->cfgtable->event_notify));
Stephen M. Cameron98465902014-02-21 16:25:00 -06006884 return h->events & RESCAN_REQUIRED_EVENT_BITS;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006885}
6886
Stephen M. Cameron98465902014-02-21 16:25:00 -06006887/*
6888 * Check if any of the offline devices have become ready
6889 */
6890static int hpsa_offline_devices_ready(struct ctlr_info *h)
6891{
6892 unsigned long flags;
6893 struct offline_device_entry *d;
6894 struct list_head *this, *tmp;
6895
6896 spin_lock_irqsave(&h->offline_device_lock, flags);
6897 list_for_each_safe(this, tmp, &h->offline_device_list) {
6898 d = list_entry(this, struct offline_device_entry,
6899 offline_list);
6900 spin_unlock_irqrestore(&h->offline_device_lock, flags);
Stephen M. Camerond1fea472014-07-03 10:17:58 -05006901 if (!hpsa_volume_offline(h, d->scsi3addr)) {
6902 spin_lock_irqsave(&h->offline_device_lock, flags);
6903 list_del(&d->offline_list);
6904 spin_unlock_irqrestore(&h->offline_device_lock, flags);
Stephen M. Cameron98465902014-02-21 16:25:00 -06006905 return 1;
Stephen M. Camerond1fea472014-07-03 10:17:58 -05006906 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06006907 spin_lock_irqsave(&h->offline_device_lock, flags);
6908 }
6909 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6910 return 0;
6911}
6912
6913
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006914static void hpsa_monitor_ctlr_worker(struct work_struct *work)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006915{
6916 unsigned long flags;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006917 struct ctlr_info *h = container_of(to_delayed_work(work),
6918 struct ctlr_info, monitor_ctlr_work);
6919 detect_controller_lockup(h);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006920 if (lockup_detected(h))
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006921 return;
Stephen M. Cameron98465902014-02-21 16:25:00 -06006922
6923 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6924 scsi_host_get(h->scsi_host);
6925 h->drv_req_rescan = 0;
6926 hpsa_ack_ctlr_events(h);
6927 hpsa_scan_start(h->scsi_host);
6928 scsi_host_put(h->scsi_host);
6929 }
6930
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006931 spin_lock_irqsave(&h->lock, flags);
6932 if (h->remove_in_progress) {
6933 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006934 return;
6935 }
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006936 schedule_delayed_work(&h->monitor_ctlr_work,
6937 h->heartbeat_sample_interval);
6938 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006939}
6940
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006941static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006942{
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006943 int dac, rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006944 struct ctlr_info *h;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006945 int try_soft_reset = 0;
6946 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006947
6948 if (number_of_controllers == 0)
6949 printk(KERN_INFO DRIVER_NAME "\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006950
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006951 rc = hpsa_init_reset_devices(pdev);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006952 if (rc) {
6953 if (rc != -ENOTSUPP)
6954 return rc;
6955 /* If the reset fails in a particular way (it has no way to do
6956 * a proper hard reset, so returns -ENOTSUPP) we can try to do
6957 * a soft reset once we get the controller configured up to the
6958 * point that it can accept a command.
6959 */
6960 try_soft_reset = 1;
6961 rc = 0;
6962 }
6963
6964reinit_after_soft_reset:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006965
Don Brace303932f2010-02-04 08:42:40 -06006966 /* Command structures must be aligned on a 32-byte boundary because
6967 * the 5 lower bits of the address are used by the hardware. and by
6968 * the driver. See comments in hpsa.h for more info.
6969 */
Don Brace303932f2010-02-04 08:42:40 -06006970 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006971 h = kzalloc(sizeof(*h), GFP_KERNEL);
6972 if (!h)
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06006973 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006974
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006975 h->pdev = pdev;
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006976 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06006977 INIT_LIST_HEAD(&h->cmpQ);
6978 INIT_LIST_HEAD(&h->reqQ);
Stephen M. Cameron98465902014-02-21 16:25:00 -06006979 INIT_LIST_HEAD(&h->offline_device_list);
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06006980 spin_lock_init(&h->lock);
Stephen M. Cameron98465902014-02-21 16:25:00 -06006981 spin_lock_init(&h->offline_device_lock);
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06006982 spin_lock_init(&h->scan_lock);
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006983 spin_lock_init(&h->passthru_count_lock);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006984
6985 /* Allocate and clear per-cpu variable lockup_detected */
6986 h->lockup_detected = alloc_percpu(u32);
Stephen M. Cameron2a5ac322014-07-03 10:18:08 -05006987 if (!h->lockup_detected) {
6988 rc = -ENOMEM;
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006989 goto clean1;
Stephen M. Cameron2a5ac322014-07-03 10:18:08 -05006990 }
Stephen M. Cameron094963d2014-05-29 10:53:18 -05006991 set_lockup_detected_for_all_cpus(h, 0);
6992
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006993 rc = hpsa_pci_init(h);
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06006994 if (rc != 0)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006995 goto clean1;
6996
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06006997 sprintf(h->devname, HPSA "%d", number_of_controllers);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006998 h->ctlr = number_of_controllers;
6999 number_of_controllers++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007000
7001 /* configure PCI DMA stuff */
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007002 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7003 if (rc == 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007004 dac = 1;
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007005 } else {
7006 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7007 if (rc == 0) {
7008 dac = 0;
7009 } else {
7010 dev_err(&pdev->dev, "no suitable DMA available\n");
7011 goto clean1;
7012 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007013 }
7014
7015 /* make sure the board interrupts are off */
7016 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05007017
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05007018 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007019 goto clean2;
Don Brace303932f2010-02-04 08:42:40 -06007020 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
7021 h->devname, pdev->device,
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06007022 h->intr[h->intr_mode], dac ? "" : " not");
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007023 if (hpsa_allocate_cmd_pool(h))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007024 goto clean4;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06007025 if (hpsa_allocate_sg_chain_blocks(h))
7026 goto clean4;
Stephen M. Camerona08a8472010-02-04 08:43:16 -06007027 init_waitqueue_head(&h->scan_wait_queue);
7028 h->scan_finished = 1; /* no scan currently in progress */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007029
7030 pci_set_drvdata(pdev, h);
Stephen M. Cameron9a413382011-05-03 14:59:41 -05007031 h->ndevices = 0;
Stephen M. Cameron316b2212014-02-21 16:25:15 -06007032 h->hba_mode_enabled = 0;
Stephen M. Cameron9a413382011-05-03 14:59:41 -05007033 h->scsi_host = NULL;
7034 spin_lock_init(&h->devlock);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007035 hpsa_put_ctlr_into_performant_mode(h);
7036
7037 /* At this point, the controller is ready to take commands.
7038 * Now, if reset_devices and the hard reset didn't work, try
7039 * the soft reset and see if that works.
7040 */
7041 if (try_soft_reset) {
7042
7043 /* This is kind of gross. We may or may not get a completion
7044 * from the soft reset command, and if we do, then the value
7045 * from the fifo may or may not be valid. So, we wait 10 secs
7046 * after the reset throwing away any completions we get during
7047 * that time. Unregister the interrupt handler and register
7048 * fake ones to scoop up any residual completions.
7049 */
7050 spin_lock_irqsave(&h->lock, flags);
7051 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7052 spin_unlock_irqrestore(&h->lock, flags);
Matt Gates254f7962012-05-01 11:43:06 -05007053 free_irqs(h);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05007054 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
7055 hpsa_intx_discard_completions);
7056 if (rc) {
7057 dev_warn(&h->pdev->dev, "Failed to request_irq after "
7058 "soft reset.\n");
7059 goto clean4;
7060 }
7061
7062 rc = hpsa_kdump_soft_reset(h);
7063 if (rc)
7064 /* Neither hard nor soft reset worked, we're hosed. */
7065 goto clean4;
7066
7067 dev_info(&h->pdev->dev, "Board READY.\n");
7068 dev_info(&h->pdev->dev,
7069 "Waiting for stale completions to drain.\n");
7070 h->access.set_intr_mask(h, HPSA_INTR_ON);
7071 msleep(10000);
7072 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7073
7074 rc = controller_reset_failed(h->cfgtable);
7075 if (rc)
7076 dev_info(&h->pdev->dev,
7077 "Soft reset appears to have failed.\n");
7078
7079 /* since the controller's reset, we have to go back and re-init
7080 * everything. Easiest to just forget what we've done and do it
7081 * all over again.
7082 */
7083 hpsa_undo_allocations_after_kdump_soft_reset(h);
7084 try_soft_reset = 0;
7085 if (rc)
7086 /* don't go to clean4, we already unallocated */
7087 return -ENODEV;
7088
7089 goto reinit_after_soft_reset;
7090 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007091
Stephen M. Cameron316b2212014-02-21 16:25:15 -06007092 /* Enable Accelerated IO path at driver layer */
7093 h->acciopath_status = 1;
Scott Teelda0697b2014-02-18 13:57:00 -06007094
Scott Teele863d682014-02-18 13:57:05 -06007095 h->drv_req_rescan = 0;
7096
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007097 /* Turn the interrupts on so we can service requests */
7098 h->access.set_intr_mask(h, HPSA_INTR_ON);
7099
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06007100 hpsa_hba_inquiry(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007101 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007102
7103 /* Monitor the controller for firmware lockups */
7104 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
7105 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
7106 schedule_delayed_work(&h->monitor_ctlr_work,
7107 h->heartbeat_sample_interval);
Stephen M. Cameron88bf6d62013-11-01 11:02:25 -05007108 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007109
7110clean4:
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06007111 hpsa_free_sg_chain_blocks(h);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05007112 hpsa_free_cmd_pool(h);
Matt Gates254f7962012-05-01 11:43:06 -05007113 free_irqs(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007114clean2:
7115clean1:
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007116 if (h->lockup_detected)
7117 free_percpu(h->lockup_detected);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007118 kfree(h);
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06007119 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007120}
7121
7122static void hpsa_flush_cache(struct ctlr_info *h)
7123{
7124 char *flush_buf;
7125 struct CommandList *c;
Stephen M. Cameron702890e2013-09-23 13:33:30 -05007126
7127 /* Don't bother trying to flush the cache if locked up */
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007128 if (unlikely(lockup_detected(h)))
Stephen M. Cameron702890e2013-09-23 13:33:30 -05007129 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007130 flush_buf = kzalloc(4, GFP_KERNEL);
7131 if (!flush_buf)
7132 return;
7133
7134 c = cmd_special_alloc(h);
7135 if (!c) {
7136 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
7137 goto out_of_memory;
7138 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06007139 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
7140 RAID_CTLR_LUNID, TYPE_CMD)) {
7141 goto out;
7142 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007143 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
7144 if (c->err_info->CommandStatus != 0)
Stephen M. Camerona2dac132013-02-20 11:24:41 -06007145out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007146 dev_warn(&h->pdev->dev,
7147 "error flushing cache on controller\n");
7148 cmd_special_free(h, c);
7149out_of_memory:
7150 kfree(flush_buf);
7151}
7152
7153static void hpsa_shutdown(struct pci_dev *pdev)
7154{
7155 struct ctlr_info *h;
7156
7157 h = pci_get_drvdata(pdev);
7158 /* Turn board interrupts off and send the flush cache command
7159 * sendcmd will turn off interrupt, and send the flush...
7160 * To write all data in the battery backed cache to disks
7161 */
7162 hpsa_flush_cache(h);
7163 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05007164 hpsa_free_irqs_and_disable_msix(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007165}
7166
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007167static void hpsa_free_device_info(struct ctlr_info *h)
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06007168{
7169 int i;
7170
7171 for (i = 0; i < h->ndevices; i++)
7172 kfree(h->dev[i]);
7173}
7174
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007175static void hpsa_remove_one(struct pci_dev *pdev)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007176{
7177 struct ctlr_info *h;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007178 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007179
7180 if (pci_get_drvdata(pdev) == NULL) {
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007181 dev_err(&pdev->dev, "unable to remove device\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007182 return;
7183 }
7184 h = pci_get_drvdata(pdev);
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007185
7186 /* Get rid of any controller monitoring work items */
7187 spin_lock_irqsave(&h->lock, flags);
7188 h->remove_in_progress = 1;
7189 cancel_delayed_work(&h->monitor_ctlr_work);
7190 spin_unlock_irqrestore(&h->lock, flags);
7191
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007192 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
7193 hpsa_shutdown(pdev);
7194 iounmap(h->vaddr);
Stephen M. Cameron204892e2010-05-27 15:13:22 -05007195 iounmap(h->transtable);
7196 iounmap(h->cfgtable);
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06007197 hpsa_free_device_info(h);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06007198 hpsa_free_sg_chain_blocks(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007199 pci_free_consistent(h->pdev,
7200 h->nr_cmds * sizeof(struct CommandList),
7201 h->cmd_pool, h->cmd_pool_dhandle);
7202 pci_free_consistent(h->pdev,
7203 h->nr_cmds * sizeof(struct ErrorInfo),
7204 h->errinfo_pool, h->errinfo_pool_dhandle);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007205 hpsa_free_reply_queues(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007206 kfree(h->cmd_pool_bits);
Don Brace303932f2010-02-04 08:42:40 -06007207 kfree(h->blockFetchTable);
Matt Gatese1f7de02014-02-18 13:55:17 -06007208 kfree(h->ioaccel1_blockFetchTable);
Stephen M. Cameronaca90122014-02-18 13:56:14 -06007209 kfree(h->ioaccel2_blockFetchTable);
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06007210 kfree(h->hba_inquiry_data);
Stephen M. Cameronf0bd0b682012-05-01 11:42:09 -05007211 pci_disable_device(pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007212 pci_release_regions(pdev);
Stephen M. Cameron094963d2014-05-29 10:53:18 -05007213 free_percpu(h->lockup_detected);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007214 kfree(h);
7215}
7216
7217static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7218 __attribute__((unused)) pm_message_t state)
7219{
7220 return -ENOSYS;
7221}
7222
7223static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
7224{
7225 return -ENOSYS;
7226}
7227
7228static struct pci_driver hpsa_pci_driver = {
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06007229 .name = HPSA,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007230 .probe = hpsa_init_one,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007231 .remove = hpsa_remove_one,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007232 .id_table = hpsa_pci_device_id, /* id_table */
7233 .shutdown = hpsa_shutdown,
7234 .suspend = hpsa_suspend,
7235 .resume = hpsa_resume,
7236};
7237
Don Brace303932f2010-02-04 08:42:40 -06007238/* Fill in bucket_map[], given nsgs (the max number of
7239 * scatter gather elements supported) and bucket[],
7240 * which is an array of 8 integers. The bucket[] array
7241 * contains 8 different DMA transfer sizes (in 16
7242 * byte increments) which the controller uses to fetch
7243 * commands. This function fills in bucket_map[], which
7244 * maps a given number of scatter gather elements to one of
7245 * the 8 DMA transfer sizes. The point of it is to allow the
7246 * controller to only do as much DMA as needed to fetch the
7247 * command, with the DMA transfer size encoded in the lower
7248 * bits of the command address.
7249 */
7250static void calc_bucket_map(int bucket[], int num_buckets,
Matt Gatese1f7de02014-02-18 13:55:17 -06007251 int nsgs, int min_blocks, int *bucket_map)
Don Brace303932f2010-02-04 08:42:40 -06007252{
7253 int i, j, b, size;
7254
Don Brace303932f2010-02-04 08:42:40 -06007255 /* Note, bucket_map must have nsgs+1 entries. */
7256 for (i = 0; i <= nsgs; i++) {
7257 /* Compute size of a command with i SG entries */
Matt Gatese1f7de02014-02-18 13:55:17 -06007258 size = i + min_blocks;
Don Brace303932f2010-02-04 08:42:40 -06007259 b = num_buckets; /* Assume the biggest bucket */
7260 /* Find the bucket that is just big enough */
Matt Gatese1f7de02014-02-18 13:55:17 -06007261 for (j = 0; j < num_buckets; j++) {
Don Brace303932f2010-02-04 08:42:40 -06007262 if (bucket[j] >= size) {
7263 b = j;
7264 break;
7265 }
7266 }
7267 /* for a command with i SG entries, use bucket b. */
7268 bucket_map[i] = b;
7269 }
7270}
7271
Matt Gatese1f7de02014-02-18 13:55:17 -06007272static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
Don Brace303932f2010-02-04 08:42:40 -06007273{
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007274 int i;
7275 unsigned long register_value;
Matt Gatese1f7de02014-02-18 13:55:17 -06007276 unsigned long transMethod = CFGTBL_Trans_Performant |
7277 (trans_support & CFGTBL_Trans_use_short_tags) |
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007278 CFGTBL_Trans_enable_directed_msix |
7279 (trans_support & (CFGTBL_Trans_io_accel1 |
7280 CFGTBL_Trans_io_accel2));
Matt Gatese1f7de02014-02-18 13:55:17 -06007281 struct access_method access = SA5_performant_access;
Stephen M. Camerondef342b2010-05-27 15:14:39 -05007282
7283 /* This is a bit complicated. There are 8 registers on
7284 * the controller which we write to to tell it 8 different
7285 * sizes of commands which there may be. It's a way of
7286 * reducing the DMA done to fetch each command. Encoded into
7287 * each command's tag are 3 bits which communicate to the controller
7288 * which of the eight sizes that command fits within. The size of
7289 * each command depends on how many scatter gather entries there are.
7290 * Each SG entry requires 16 bytes. The eight registers are programmed
7291 * with the number of 16-byte blocks a command of that size requires.
7292 * The smallest command possible requires 5 such 16 byte blocks.
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007293 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
Stephen M. Camerondef342b2010-05-27 15:14:39 -05007294 * blocks. Note, this only extends to the SG entries contained
7295 * within the command block, and does not extend to chained blocks
7296 * of SG elements. bft[] contains the eight values we write to
7297 * the registers. They are not evenly distributed, but have more
7298 * sizes for small commands, and fewer sizes for larger commands.
7299 */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007300 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007301#define MIN_IOACCEL2_BFT_ENTRY 5
7302#define HPSA_IOACCEL2_HEADER_SZ 4
7303 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7304 13, 14, 15, 16, 17, 18, 19,
7305 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7306 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7307 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7308 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7309 16 * MIN_IOACCEL2_BFT_ENTRY);
7310 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007311 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
Don Brace303932f2010-02-04 08:42:40 -06007312 /* 5 = 1 s/g entry or 4k
7313 * 6 = 2 s/g entry or 8k
7314 * 8 = 4 s/g entry or 16k
7315 * 10 = 6 s/g entry or 24k
7316 */
Don Brace303932f2010-02-04 08:42:40 -06007317
Stephen M. Cameronb3a52e72014-05-29 10:53:23 -05007318 /* If the controller supports either ioaccel method then
7319 * we can also use the RAID stack submit path that does not
7320 * perform the superfluous readl() after each command submission.
7321 */
7322 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
7323 access = SA5_performant_access_no_read;
7324
Don Brace303932f2010-02-04 08:42:40 -06007325 /* Controller spec: zero out this buffer. */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007326 for (i = 0; i < h->nreply_queues; i++)
7327 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
Don Brace303932f2010-02-04 08:42:40 -06007328
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007329 bft[7] = SG_ENTRIES_IN_CMD + 4;
7330 calc_bucket_map(bft, ARRAY_SIZE(bft),
Matt Gatese1f7de02014-02-18 13:55:17 -06007331 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
Don Brace303932f2010-02-04 08:42:40 -06007332 for (i = 0; i < 8; i++)
7333 writel(bft[i], &h->transtable->BlockFetch[i]);
7334
7335 /* size of controller ring buffer */
7336 writel(h->max_commands, &h->transtable->RepQSize);
Matt Gates254f7962012-05-01 11:43:06 -05007337 writel(h->nreply_queues, &h->transtable->RepQCount);
Don Brace303932f2010-02-04 08:42:40 -06007338 writel(0, &h->transtable->RepQCtrAddrLow32);
7339 writel(0, &h->transtable->RepQCtrAddrHigh32);
Matt Gates254f7962012-05-01 11:43:06 -05007340
7341 for (i = 0; i < h->nreply_queues; i++) {
7342 writel(0, &h->transtable->RepQAddr[i].upper);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007343 writel(h->reply_queue[i].busaddr,
Matt Gates254f7962012-05-01 11:43:06 -05007344 &h->transtable->RepQAddr[i].lower);
7345 }
7346
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007347 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Matt Gatese1f7de02014-02-18 13:55:17 -06007348 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
7349 /*
7350 * enable outbound interrupt coalescing in accelerator mode;
7351 */
7352 if (trans_support & CFGTBL_Trans_io_accel1) {
7353 access = SA5_ioaccel_mode1_access;
7354 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7355 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
Scott Teelc3497752014-02-18 13:56:34 -06007356 } else {
7357 if (trans_support & CFGTBL_Trans_io_accel2) {
7358 access = SA5_ioaccel_mode2_access;
7359 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7360 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7361 }
Matt Gatese1f7de02014-02-18 13:55:17 -06007362 }
Don Brace303932f2010-02-04 08:42:40 -06007363 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007364 hpsa_wait_for_mode_change_ack(h);
Don Brace303932f2010-02-04 08:42:40 -06007365 register_value = readl(&(h->cfgtable->TransportActive));
7366 if (!(register_value & CFGTBL_Trans_Performant)) {
7367 dev_warn(&h->pdev->dev, "unable to get board into"
7368 " performant mode\n");
7369 return;
7370 }
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06007371 /* Change the access methods to the performant access methods */
Matt Gatese1f7de02014-02-18 13:55:17 -06007372 h->access = access;
7373 h->transMethod = transMethod;
7374
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007375 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7376 (trans_support & CFGTBL_Trans_io_accel2)))
Matt Gatese1f7de02014-02-18 13:55:17 -06007377 return;
7378
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007379 if (trans_support & CFGTBL_Trans_io_accel1) {
7380 /* Set up I/O accelerator mode */
7381 for (i = 0; i < h->nreply_queues; i++) {
7382 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7383 h->reply_queue[i].current_entry =
7384 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7385 }
7386 bft[7] = h->ioaccel_maxsg + 8;
7387 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7388 h->ioaccel1_blockFetchTable);
7389
7390 /* initialize all reply queue entries to unused */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007391 for (i = 0; i < h->nreply_queues; i++)
7392 memset(h->reply_queue[i].head,
7393 (u8) IOACCEL_MODE1_REPLY_UNUSED,
7394 h->reply_queue_size);
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007395
7396 /* set all the constant fields in the accelerator command
7397 * frames once at init time to save CPU cycles later.
7398 */
7399 for (i = 0; i < h->nr_cmds; i++) {
7400 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7401
7402 cp->function = IOACCEL1_FUNCTION_SCSIIO;
7403 cp->err_info = (u32) (h->errinfo_pool_dhandle +
7404 (i * sizeof(struct ErrorInfo)));
7405 cp->err_info_len = sizeof(struct ErrorInfo);
7406 cp->sgl_offset = IOACCEL1_SGLOFFSET;
7407 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
7408 cp->timeout_sec = 0;
7409 cp->ReplyQueue = 0;
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06007410 cp->tag =
7411 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT) |
7412 DIRECT_LOOKUP_BIT);
7413 cp->host_addr =
7414 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007415 (i * sizeof(struct io_accel1_cmd)));
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007416 }
7417 } else if (trans_support & CFGTBL_Trans_io_accel2) {
7418 u64 cfg_offset, cfg_base_addr_index;
7419 u32 bft2_offset, cfg_base_addr;
7420 int rc;
7421
7422 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7423 &cfg_base_addr_index, &cfg_offset);
7424 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7425 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7426 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7427 4, h->ioaccel2_blockFetchTable);
7428 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7429 BUILD_BUG_ON(offsetof(struct CfgTable,
7430 io_accel_request_size_offset) != 0xb8);
7431 h->ioaccel2_bft2_regs =
7432 remap_pci_mem(pci_resource_start(h->pdev,
7433 cfg_base_addr_index) +
7434 cfg_offset + bft2_offset,
7435 ARRAY_SIZE(bft2) *
7436 sizeof(*h->ioaccel2_bft2_regs));
7437 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7438 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
Matt Gatese1f7de02014-02-18 13:55:17 -06007439 }
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007440 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7441 hpsa_wait_for_mode_change_ack(h);
Matt Gatese1f7de02014-02-18 13:55:17 -06007442}
7443
7444static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7445{
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007446 h->ioaccel_maxsg =
7447 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7448 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7449 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7450
Matt Gatese1f7de02014-02-18 13:55:17 -06007451 /* Command structures must be aligned on a 128-byte boundary
7452 * because the 7 lower bits of the address are used by the
7453 * hardware.
7454 */
Matt Gatese1f7de02014-02-18 13:55:17 -06007455 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7456 IOACCEL1_COMMANDLIST_ALIGNMENT);
7457 h->ioaccel_cmd_pool =
7458 pci_alloc_consistent(h->pdev,
7459 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7460 &(h->ioaccel_cmd_pool_dhandle));
7461
7462 h->ioaccel1_blockFetchTable =
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007463 kmalloc(((h->ioaccel_maxsg + 1) *
Matt Gatese1f7de02014-02-18 13:55:17 -06007464 sizeof(u32)), GFP_KERNEL);
7465
7466 if ((h->ioaccel_cmd_pool == NULL) ||
7467 (h->ioaccel1_blockFetchTable == NULL))
7468 goto clean_up;
7469
7470 memset(h->ioaccel_cmd_pool, 0,
7471 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7472 return 0;
7473
7474clean_up:
7475 if (h->ioaccel_cmd_pool)
7476 pci_free_consistent(h->pdev,
7477 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7478 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7479 kfree(h->ioaccel1_blockFetchTable);
7480 return 1;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007481}
7482
Stephen M. Cameronaca90122014-02-18 13:56:14 -06007483static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7484{
7485 /* Allocate ioaccel2 mode command blocks and block fetch table */
7486
7487 h->ioaccel_maxsg =
7488 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7489 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7490 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7491
Stephen M. Cameronaca90122014-02-18 13:56:14 -06007492 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7493 IOACCEL2_COMMANDLIST_ALIGNMENT);
7494 h->ioaccel2_cmd_pool =
7495 pci_alloc_consistent(h->pdev,
7496 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7497 &(h->ioaccel2_cmd_pool_dhandle));
7498
7499 h->ioaccel2_blockFetchTable =
7500 kmalloc(((h->ioaccel_maxsg + 1) *
7501 sizeof(u32)), GFP_KERNEL);
7502
7503 if ((h->ioaccel2_cmd_pool == NULL) ||
7504 (h->ioaccel2_blockFetchTable == NULL))
7505 goto clean_up;
7506
7507 memset(h->ioaccel2_cmd_pool, 0,
7508 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7509 return 0;
7510
7511clean_up:
7512 if (h->ioaccel2_cmd_pool)
7513 pci_free_consistent(h->pdev,
7514 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7515 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7516 kfree(h->ioaccel2_blockFetchTable);
7517 return 1;
7518}
7519
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007520static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007521{
7522 u32 trans_support;
Matt Gatese1f7de02014-02-18 13:55:17 -06007523 unsigned long transMethod = CFGTBL_Trans_Performant |
7524 CFGTBL_Trans_use_short_tags;
Matt Gates254f7962012-05-01 11:43:06 -05007525 int i;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007526
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -06007527 if (hpsa_simple_mode)
7528 return;
7529
scameron@beardog.cce.hp.com67c99a72014-04-14 14:01:09 -05007530 trans_support = readl(&(h->cfgtable->TransportSupport));
7531 if (!(trans_support & PERFORMANT_MODE))
7532 return;
7533
Matt Gatese1f7de02014-02-18 13:55:17 -06007534 /* Check for I/O accelerator mode support */
7535 if (trans_support & CFGTBL_Trans_io_accel1) {
7536 transMethod |= CFGTBL_Trans_io_accel1 |
7537 CFGTBL_Trans_enable_directed_msix;
7538 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7539 goto clean_up;
Stephen M. Cameronaca90122014-02-18 13:56:14 -06007540 } else {
7541 if (trans_support & CFGTBL_Trans_io_accel2) {
7542 transMethod |= CFGTBL_Trans_io_accel2 |
7543 CFGTBL_Trans_enable_directed_msix;
7544 if (ioaccel2_alloc_cmds_and_bft(h))
7545 goto clean_up;
7546 }
Matt Gatese1f7de02014-02-18 13:55:17 -06007547 }
7548
Hannes Reineckeeee0f032014-01-15 13:30:53 +01007549 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05007550 hpsa_get_max_perf_mode_cmds(h);
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007551 /* Performant mode ring buffer and supporting data structures */
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007552 h->reply_queue_size = h->max_commands * sizeof(u64);
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007553
Matt Gates254f7962012-05-01 11:43:06 -05007554 for (i = 0; i < h->nreply_queues; i++) {
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007555 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7556 h->reply_queue_size,
7557 &(h->reply_queue[i].busaddr));
7558 if (!h->reply_queue[i].head)
7559 goto clean_up;
Matt Gates254f7962012-05-01 11:43:06 -05007560 h->reply_queue[i].size = h->max_commands;
7561 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
7562 h->reply_queue[i].current_entry = 0;
7563 }
7564
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007565 /* Need a block fetch table for performant mode */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007566 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007567 sizeof(u32)), GFP_KERNEL);
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007568 if (!h->blockFetchTable)
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007569 goto clean_up;
7570
Matt Gatese1f7de02014-02-18 13:55:17 -06007571 hpsa_enter_performant_mode(h, trans_support);
Don Brace303932f2010-02-04 08:42:40 -06007572 return;
7573
7574clean_up:
Stephen M. Cameron072b0512014-05-29 10:53:07 -05007575 hpsa_free_reply_queues(h);
Don Brace303932f2010-02-04 08:42:40 -06007576 kfree(h->blockFetchTable);
7577}
7578
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007579static int is_accelerated_cmd(struct CommandList *c)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007580{
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007581 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7582}
7583
7584static void hpsa_drain_accel_commands(struct ctlr_info *h)
7585{
7586 struct CommandList *c = NULL;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007587 unsigned long flags;
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007588 int accel_cmds_out;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007589
7590 do { /* wait for all outstanding commands to drain out */
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007591 accel_cmds_out = 0;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007592 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007593 list_for_each_entry(c, &h->cmpQ, list)
7594 accel_cmds_out += is_accelerated_cmd(c);
7595 list_for_each_entry(c, &h->reqQ, list)
7596 accel_cmds_out += is_accelerated_cmd(c);
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007597 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007598 if (accel_cmds_out <= 0)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007599 break;
7600 msleep(100);
7601 } while (1);
7602}
7603
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007604/*
7605 * This is it. Register the PCI driver information for the cards we control
7606 * the OS will call our registered routines when it finds one of our cards.
7607 */
7608static int __init hpsa_init(void)
7609{
Mike Miller31468402010-02-25 14:03:12 -06007610 return pci_register_driver(&hpsa_pci_driver);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007611}
7612
7613static void __exit hpsa_cleanup(void)
7614{
7615 pci_unregister_driver(&hpsa_pci_driver);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007616}
7617
Matt Gatese1f7de02014-02-18 13:55:17 -06007618static void __attribute__((unused)) verify_offsets(void)
7619{
7620#define VERIFY_OFFSET(member, offset) \
Scott Teeldd0e19f2014-02-18 13:57:31 -06007621 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7622
7623 VERIFY_OFFSET(structure_size, 0);
7624 VERIFY_OFFSET(volume_blk_size, 4);
7625 VERIFY_OFFSET(volume_blk_cnt, 8);
7626 VERIFY_OFFSET(phys_blk_shift, 16);
7627 VERIFY_OFFSET(parity_rotation_shift, 17);
7628 VERIFY_OFFSET(strip_size, 18);
7629 VERIFY_OFFSET(disk_starting_blk, 20);
7630 VERIFY_OFFSET(disk_blk_cnt, 28);
7631 VERIFY_OFFSET(data_disks_per_row, 36);
7632 VERIFY_OFFSET(metadata_disks_per_row, 38);
7633 VERIFY_OFFSET(row_cnt, 40);
7634 VERIFY_OFFSET(layout_map_count, 42);
7635 VERIFY_OFFSET(flags, 44);
7636 VERIFY_OFFSET(dekindex, 46);
7637 /* VERIFY_OFFSET(reserved, 48 */
7638 VERIFY_OFFSET(data, 64);
7639
7640#undef VERIFY_OFFSET
7641
7642#define VERIFY_OFFSET(member, offset) \
Mike Millerb66cc252014-02-18 13:56:04 -06007643 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7644
7645 VERIFY_OFFSET(IU_type, 0);
7646 VERIFY_OFFSET(direction, 1);
7647 VERIFY_OFFSET(reply_queue, 2);
7648 /* VERIFY_OFFSET(reserved1, 3); */
7649 VERIFY_OFFSET(scsi_nexus, 4);
7650 VERIFY_OFFSET(Tag, 8);
7651 VERIFY_OFFSET(cdb, 16);
7652 VERIFY_OFFSET(cciss_lun, 32);
7653 VERIFY_OFFSET(data_len, 40);
7654 VERIFY_OFFSET(cmd_priority_task_attr, 44);
7655 VERIFY_OFFSET(sg_count, 45);
7656 /* VERIFY_OFFSET(reserved3 */
7657 VERIFY_OFFSET(err_ptr, 48);
7658 VERIFY_OFFSET(err_len, 56);
7659 /* VERIFY_OFFSET(reserved4 */
7660 VERIFY_OFFSET(sg, 64);
7661
7662#undef VERIFY_OFFSET
7663
7664#define VERIFY_OFFSET(member, offset) \
Matt Gatese1f7de02014-02-18 13:55:17 -06007665 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7666
7667 VERIFY_OFFSET(dev_handle, 0x00);
7668 VERIFY_OFFSET(reserved1, 0x02);
7669 VERIFY_OFFSET(function, 0x03);
7670 VERIFY_OFFSET(reserved2, 0x04);
7671 VERIFY_OFFSET(err_info, 0x0C);
7672 VERIFY_OFFSET(reserved3, 0x10);
7673 VERIFY_OFFSET(err_info_len, 0x12);
7674 VERIFY_OFFSET(reserved4, 0x13);
7675 VERIFY_OFFSET(sgl_offset, 0x14);
7676 VERIFY_OFFSET(reserved5, 0x15);
7677 VERIFY_OFFSET(transfer_len, 0x1C);
7678 VERIFY_OFFSET(reserved6, 0x20);
7679 VERIFY_OFFSET(io_flags, 0x24);
7680 VERIFY_OFFSET(reserved7, 0x26);
7681 VERIFY_OFFSET(LUN, 0x34);
7682 VERIFY_OFFSET(control, 0x3C);
7683 VERIFY_OFFSET(CDB, 0x40);
7684 VERIFY_OFFSET(reserved8, 0x50);
7685 VERIFY_OFFSET(host_context_flags, 0x60);
7686 VERIFY_OFFSET(timeout_sec, 0x62);
7687 VERIFY_OFFSET(ReplyQueue, 0x64);
7688 VERIFY_OFFSET(reserved9, 0x65);
Stephen M. Cameron50a0dec2014-11-14 17:26:59 -06007689 VERIFY_OFFSET(tag, 0x68);
Matt Gatese1f7de02014-02-18 13:55:17 -06007690 VERIFY_OFFSET(host_addr, 0x70);
7691 VERIFY_OFFSET(CISS_LUN, 0x78);
7692 VERIFY_OFFSET(SG, 0x78 + 8);
7693#undef VERIFY_OFFSET
7694}
7695
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007696module_init(hpsa_init);
7697module_exit(hpsa_cleanup);