blob: 74eb22af45060e6de5999412fade53b68d30a778 [file] [log] [blame]
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21#ifndef HPSA_H
22#define HPSA_H
23
24#include <scsi/scsicam.h>
25
26#define IO_OK 0
27#define IO_ERROR 1
28
29struct ctlr_info;
30
31struct access_method {
32 void (*submit_command)(struct ctlr_info *h,
33 struct CommandList *c);
34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
35 unsigned long (*fifo_full)(struct ctlr_info *h);
Stephen M. Cameron900c5442010-02-04 08:42:35 -060036 bool (*intr_pending)(struct ctlr_info *h);
Matt Gates254f7962012-05-01 11:43:06 -050037 unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -080038};
39
40struct hpsa_scsi_dev_t {
41 int devtype;
42 int bus, target, lun; /* as presented to the OS */
43 unsigned char scsi3addr[8]; /* as presented to the HW */
44#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
45 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
47 unsigned char model[16]; /* bytes 16-31 of inquiry data */
Stephen M. Cameronedd16362009-12-08 14:09:11 -080048 unsigned char raid_level; /* from inquiry page 0xC1 */
Matt Gatese1f7de02014-02-18 13:55:17 -060049 u32 ioaccel_handle;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -060050 int offload_config; /* I/O accel RAID offload configured */
51 int offload_enabled; /* I/O accel RAID offload enabled */
52 int offload_to_mirror; /* Send next I/O accelerator RAID
53 * offload request to mirror drive
54 */
55 struct raid_map_data raid_map; /* I/O accelerator RAID map */
56
Stephen M. Cameronedd16362009-12-08 14:09:11 -080057};
58
Matt Gates254f7962012-05-01 11:43:06 -050059struct reply_pool {
60 u64 *head;
61 size_t size;
62 u8 wraparound;
63 u32 current_entry;
64};
65
Stephen M. Cameronedd16362009-12-08 14:09:11 -080066struct ctlr_info {
67 int ctlr;
68 char devname[8];
69 char *product_name;
Stephen M. Cameronedd16362009-12-08 14:09:11 -080070 struct pci_dev *pdev;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -060071 u32 board_id;
Stephen M. Cameronedd16362009-12-08 14:09:11 -080072 void __iomem *vaddr;
73 unsigned long paddr;
74 int nr_cmds; /* Number of commands allowed on this controller */
75 struct CfgTable __iomem *cfgtable;
76 int interrupts_enabled;
77 int major;
78 int max_commands;
79 int commands_outstanding;
80 int max_outstanding; /* Debug */
81 int usage_count; /* number of opens all all minor devices */
Don Brace303932f2010-02-04 08:42:40 -060082# define PERF_MODE_INT 0
83# define DOORBELL_INT 1
Stephen M. Cameronedd16362009-12-08 14:09:11 -080084# define SIMPLE_MODE_INT 2
85# define MEMQ_MODE_INT 3
Matt Gates254f7962012-05-01 11:43:06 -050086 unsigned int intr[MAX_REPLY_QUEUES];
Stephen M. Cameronedd16362009-12-08 14:09:11 -080087 unsigned int msix_vector;
88 unsigned int msi_vector;
Stephen M. Camerona9a3a272011-02-15 15:32:53 -060089 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
Stephen M. Cameronedd16362009-12-08 14:09:11 -080090 struct access_method access;
91
92 /* queue and queue Info */
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -060093 struct list_head reqQ;
94 struct list_head cmpQ;
Stephen M. Cameronedd16362009-12-08 14:09:11 -080095 unsigned int Qdepth;
Stephen M. Cameronedd16362009-12-08 14:09:11 -080096 unsigned int maxSG;
97 spinlock_t lock;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -060098 int maxsgentries;
99 u8 max_cmd_sg_entries;
100 int chainsize;
101 struct SGDescriptor **cmd_sg_list;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800102
103 /* pointers to command and error info pool */
104 struct CommandList *cmd_pool;
105 dma_addr_t cmd_pool_dhandle;
Matt Gatese1f7de02014-02-18 13:55:17 -0600106 struct io_accel1_cmd *ioaccel_cmd_pool;
107 dma_addr_t ioaccel_cmd_pool_dhandle;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800108 struct ErrorInfo *errinfo_pool;
109 dma_addr_t errinfo_pool_dhandle;
110 unsigned long *cmd_pool_bits;
Stephen M. Camerona08a8472010-02-04 08:43:16 -0600111 int scan_finished;
112 spinlock_t scan_lock;
113 wait_queue_head_t scan_wait_queue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800114
115 struct Scsi_Host *scsi_host;
116 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
117 int ndevices; /* number of used elements in .dev[] array. */
Scott Teelcfe5bad2011-10-26 16:21:07 -0500118 struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
Don Brace303932f2010-02-04 08:42:40 -0600119 /*
120 * Performant mode tables.
121 */
122 u32 trans_support;
123 u32 trans_offset;
124 struct TransTable_struct *transtable;
125 unsigned long transMethod;
126
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -0500127 /* cap concurrent passthrus at some reasonable maximum */
128#define HPSA_MAX_CONCURRENT_PASSTHRUS (20)
129 spinlock_t passthru_count_lock; /* protects passthru_count */
130 int passthru_count;
131
Don Brace303932f2010-02-04 08:42:40 -0600132 /*
Matt Gates254f7962012-05-01 11:43:06 -0500133 * Performant mode completion buffers
Don Brace303932f2010-02-04 08:42:40 -0600134 */
135 u64 *reply_pool;
Don Brace303932f2010-02-04 08:42:40 -0600136 size_t reply_pool_size;
Matt Gates254f7962012-05-01 11:43:06 -0500137 struct reply_pool reply_queue[MAX_REPLY_QUEUES];
138 u8 nreply_queues;
139 dma_addr_t reply_pool_dhandle;
Don Brace303932f2010-02-04 08:42:40 -0600140 u32 *blockFetchTable;
Matt Gatese1f7de02014-02-18 13:55:17 -0600141 u32 *ioaccel1_blockFetchTable;
Stephen M. Cameron339b2b12010-02-04 08:42:50 -0600142 unsigned char *hba_inquiry_data;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -0600143 u32 driver_support;
144 u32 fw_support;
145 int ioaccel_support;
146 int ioaccel_maxsg;
Stephen M. Camerona0c12412011-10-26 16:22:04 -0500147 u64 last_intr_timestamp;
148 u32 last_heartbeat;
149 u64 last_heartbeat_timestamp;
Stephen M. Camerone85c5972012-05-01 11:43:42 -0500150 u32 heartbeat_sample_interval;
151 atomic_t firmware_flash_in_progress;
Stephen M. Camerona0c12412011-10-26 16:22:04 -0500152 u32 lockup_detected;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -0600153 struct delayed_work monitor_ctlr_work;
154 int remove_in_progress;
Stephen M. Cameron396883e2013-09-23 13:34:17 -0500155 u32 fifo_recently_full;
Matt Gates254f7962012-05-01 11:43:06 -0500156 /* Address of h->q[x] is passed to intr handler to know which queue */
157 u8 q[MAX_REPLY_QUEUES];
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500158 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
159#define HPSATMF_BITS_SUPPORTED (1 << 0)
160#define HPSATMF_PHYS_LUN_RESET (1 << 1)
161#define HPSATMF_PHYS_NEX_RESET (1 << 2)
162#define HPSATMF_PHYS_TASK_ABORT (1 << 3)
163#define HPSATMF_PHYS_TSET_ABORT (1 << 4)
164#define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
165#define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
166#define HPSATMF_PHYS_QRY_TASK (1 << 7)
167#define HPSATMF_PHYS_QRY_TSET (1 << 8)
168#define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
169#define HPSATMF_MASK_SUPPORTED (1 << 16)
170#define HPSATMF_LOG_LUN_RESET (1 << 17)
171#define HPSATMF_LOG_NEX_RESET (1 << 18)
172#define HPSATMF_LOG_TASK_ABORT (1 << 19)
173#define HPSATMF_LOG_TSET_ABORT (1 << 20)
174#define HPSATMF_LOG_CLEAR_ACA (1 << 21)
175#define HPSATMF_LOG_CLEAR_TSET (1 << 22)
176#define HPSATMF_LOG_QRY_TASK (1 << 23)
177#define HPSATMF_LOG_QRY_TSET (1 << 24)
178#define HPSATMF_LOG_QRY_ASYNC (1 << 25)
Stephen M. Cameron76438d02014-02-18 13:55:43 -0600179 u32 events;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800180};
181#define HPSA_ABORT_MSG 0
182#define HPSA_DEVICE_RESET_MSG 1
Stephen M. Cameron64670ac2011-05-03 14:59:51 -0500183#define HPSA_RESET_TYPE_CONTROLLER 0x00
184#define HPSA_RESET_TYPE_BUS 0x01
185#define HPSA_RESET_TYPE_TARGET 0x03
186#define HPSA_RESET_TYPE_LUN 0x04
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800187#define HPSA_MSG_SEND_RETRY_LIMIT 10
Stephen M. Cameron516fda42011-05-03 14:59:15 -0500188#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800189
190/* Maximum time in seconds driver will wait for command completions
191 * when polling before giving up.
192 */
193#define HPSA_MAX_POLL_TIME_SECS (20)
194
195/* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
196 * how many times to retry TEST UNIT READY on a device
197 * while waiting for it to become ready before giving up.
198 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
199 * between sending TURs while waiting for a device
200 * to become ready.
201 */
202#define HPSA_TUR_RETRY_LIMIT (20)
203#define HPSA_MAX_WAIT_INTERVAL_SECS (30)
204
205/* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
206 * to become ready, in seconds, before giving up on it.
207 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
208 * between polling the board to see if it is ready, in
209 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
210 * HPSA_BOARD_READY_ITERATIONS are derived from those.
211 */
212#define HPSA_BOARD_READY_WAIT_SECS (120)
Stephen M. Cameron2ed71272011-05-03 14:59:31 -0500213#define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800214#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
215#define HPSA_BOARD_READY_POLL_INTERVAL \
216 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
217#define HPSA_BOARD_READY_ITERATIONS \
218 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
219 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -0600220#define HPSA_BOARD_NOT_READY_ITERATIONS \
221 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
222 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800223#define HPSA_POST_RESET_PAUSE_MSECS (3000)
224#define HPSA_POST_RESET_NOOP_RETRIES (12)
225
226/* Defining the diffent access_menthods */
227/*
228 * Memory mapped FIFO interface (SMART 53xx cards)
229 */
230#define SA5_DOORBELL 0x20
231#define SA5_REQUEST_PORT_OFFSET 0x40
232#define SA5_REPLY_INTR_MASK_OFFSET 0x34
233#define SA5_REPLY_PORT_OFFSET 0x44
234#define SA5_INTR_STATUS 0x30
235#define SA5_SCRATCHPAD_OFFSET 0xB0
236
237#define SA5_CTCFG_OFFSET 0xB4
238#define SA5_CTMEM_OFFSET 0xB8
239
240#define SA5_INTR_OFF 0x08
241#define SA5B_INTR_OFF 0x04
242#define SA5_INTR_PENDING 0x08
243#define SA5B_INTR_PENDING 0x04
244#define FIFO_EMPTY 0xffffffff
245#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
246
247#define HPSA_ERROR_BIT 0x02
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800248
Don Brace303932f2010-02-04 08:42:40 -0600249/* Performant mode flags */
250#define SA5_PERF_INTR_PENDING 0x04
251#define SA5_PERF_INTR_OFF 0x05
252#define SA5_OUTDB_STATUS_PERF_BIT 0x01
253#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
254#define SA5_OUTDB_CLEAR 0xA0
255#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
256#define SA5_OUTDB_STATUS 0x9C
257
258
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800259#define HPSA_INTR_ON 1
260#define HPSA_INTR_OFF 0
Mike Millerb66cc252014-02-18 13:56:04 -0600261
262/*
263 * Inbound Post Queue offsets for IO Accelerator Mode 2
264 */
265#define IOACCEL2_INBOUND_POSTQ_32 0x48
266#define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
267#define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
268
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800269/*
270 Send the command to the hardware
271*/
272static void SA5_submit_command(struct ctlr_info *h,
273 struct CommandList *c)
274{
Don Brace303932f2010-02-04 08:42:40 -0600275 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
276 c->Header.Tag.lower);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800277 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
Stephen M. Cameronfec62c32011-07-21 13:16:05 -0500278 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800279}
280
281/*
282 * This card is the opposite of the other cards.
283 * 0 turns interrupts on...
284 * 0x08 turns them off...
285 */
286static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
287{
288 if (val) { /* Turn interrupts on */
289 h->interrupts_enabled = 1;
290 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
Stephen M. Cameron8cd21da2011-05-03 14:58:55 -0500291 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800292 } else { /* Turn them off */
293 h->interrupts_enabled = 0;
294 writel(SA5_INTR_OFF,
295 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
Stephen M. Cameron8cd21da2011-05-03 14:58:55 -0500296 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800297 }
298}
Don Brace303932f2010-02-04 08:42:40 -0600299
300static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
301{
302 if (val) { /* turn on interrupts */
303 h->interrupts_enabled = 1;
304 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
Stephen M. Cameron8cd21da2011-05-03 14:58:55 -0500305 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
Don Brace303932f2010-02-04 08:42:40 -0600306 } else {
307 h->interrupts_enabled = 0;
308 writel(SA5_PERF_INTR_OFF,
309 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
Stephen M. Cameron8cd21da2011-05-03 14:58:55 -0500310 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
Don Brace303932f2010-02-04 08:42:40 -0600311 }
312}
313
Matt Gates254f7962012-05-01 11:43:06 -0500314static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
Don Brace303932f2010-02-04 08:42:40 -0600315{
Matt Gates254f7962012-05-01 11:43:06 -0500316 struct reply_pool *rq = &h->reply_queue[q];
Matt Gatese16a33a2012-05-01 11:43:11 -0500317 unsigned long flags, register_value = FIFO_EMPTY;
Don Brace303932f2010-02-04 08:42:40 -0600318
Don Brace303932f2010-02-04 08:42:40 -0600319 /* msi auto clears the interrupt pending bit. */
320 if (!(h->msi_vector || h->msix_vector)) {
Stephen M. Cameron2c17d2d2012-05-01 11:42:30 -0500321 /* flush the controller write of the reply queue by reading
322 * outbound doorbell status register.
323 */
324 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
Don Brace303932f2010-02-04 08:42:40 -0600325 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
326 /* Do a read in order to flush the write to the controller
327 * (as per spec.)
328 */
329 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
330 }
331
Matt Gates254f7962012-05-01 11:43:06 -0500332 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
333 register_value = rq->head[rq->current_entry];
334 rq->current_entry++;
Matt Gatese16a33a2012-05-01 11:43:11 -0500335 spin_lock_irqsave(&h->lock, flags);
Don Brace303932f2010-02-04 08:42:40 -0600336 h->commands_outstanding--;
Matt Gatese16a33a2012-05-01 11:43:11 -0500337 spin_unlock_irqrestore(&h->lock, flags);
Don Brace303932f2010-02-04 08:42:40 -0600338 } else {
339 register_value = FIFO_EMPTY;
340 }
341 /* Check for wraparound */
Matt Gates254f7962012-05-01 11:43:06 -0500342 if (rq->current_entry == h->max_commands) {
343 rq->current_entry = 0;
344 rq->wraparound ^= 1;
Don Brace303932f2010-02-04 08:42:40 -0600345 }
Don Brace303932f2010-02-04 08:42:40 -0600346 return register_value;
347}
348
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800349/*
350 * Returns true if fifo is full.
351 *
352 */
353static unsigned long SA5_fifo_full(struct ctlr_info *h)
354{
355 if (h->commands_outstanding >= h->max_commands)
356 return 1;
357 else
358 return 0;
359
360}
361/*
362 * returns value read from hardware.
363 * returns FIFO_EMPTY if there is nothing to read
364 */
Matt Gates254f7962012-05-01 11:43:06 -0500365static unsigned long SA5_completed(struct ctlr_info *h,
366 __attribute__((unused)) u8 q)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800367{
368 unsigned long register_value
369 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
Matt Gatese16a33a2012-05-01 11:43:11 -0500370 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800371
Matt Gatese16a33a2012-05-01 11:43:11 -0500372 if (register_value != FIFO_EMPTY) {
373 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800374 h->commands_outstanding--;
Matt Gatese16a33a2012-05-01 11:43:11 -0500375 spin_unlock_irqrestore(&h->lock, flags);
376 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800377
378#ifdef HPSA_DEBUG
379 if (register_value != FIFO_EMPTY)
Stephen M. Cameron84ca0be2010-02-04 08:42:30 -0600380 dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800381 register_value);
382 else
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600383 dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800384#endif
385
386 return register_value;
387}
388/*
389 * Returns true if an interrupt is pending..
390 */
Stephen M. Cameron900c5442010-02-04 08:42:35 -0600391static bool SA5_intr_pending(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800392{
393 unsigned long register_value =
394 readl(h->vaddr + SA5_INTR_STATUS);
Stephen M. Cameron84ca0be2010-02-04 08:42:30 -0600395 dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);
Stephen M. Cameron900c5442010-02-04 08:42:35 -0600396 return register_value & SA5_INTR_PENDING;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800397}
398
Don Brace303932f2010-02-04 08:42:40 -0600399static bool SA5_performant_intr_pending(struct ctlr_info *h)
400{
401 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
402
403 if (!register_value)
404 return false;
405
406 if (h->msi_vector || h->msix_vector)
407 return true;
408
409 /* Read outbound doorbell to flush */
410 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
411 return register_value & SA5_OUTDB_STATUS_PERF_BIT;
412}
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800413
Matt Gatese1f7de02014-02-18 13:55:17 -0600414#define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
415
416static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
417{
418 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
419
420 return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
421 true : false;
422}
423
424#define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
425#define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
426#define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
427#define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
428
Stephen M. Cameron283b4a92014-02-18 13:55:33 -0600429static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
Matt Gatese1f7de02014-02-18 13:55:17 -0600430{
431 u64 register_value;
432 struct reply_pool *rq = &h->reply_queue[q];
433 unsigned long flags;
434
435 BUG_ON(q >= h->nreply_queues);
436
437 register_value = rq->head[rq->current_entry];
438 if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
439 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
440 if (++rq->current_entry == rq->size)
441 rq->current_entry = 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -0600442 /*
443 * @todo
444 *
445 * Don't really need to write the new index after each command,
446 * but with current driver design this is easiest.
447 */
448 wmb();
449 writel((q << 24) | rq->current_entry, h->vaddr +
450 IOACCEL_MODE1_CONSUMER_INDEX);
Matt Gatese1f7de02014-02-18 13:55:17 -0600451 spin_lock_irqsave(&h->lock, flags);
452 h->commands_outstanding--;
453 spin_unlock_irqrestore(&h->lock, flags);
Matt Gatese1f7de02014-02-18 13:55:17 -0600454 }
455 return (unsigned long) register_value;
456}
457
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800458static struct access_method SA5_access = {
459 SA5_submit_command,
460 SA5_intr_mask,
461 SA5_fifo_full,
462 SA5_intr_pending,
463 SA5_completed,
464};
465
Matt Gatese1f7de02014-02-18 13:55:17 -0600466static struct access_method SA5_ioaccel_mode1_access = {
467 SA5_submit_command,
468 SA5_performant_intr_mask,
469 SA5_fifo_full,
470 SA5_ioaccel_mode1_intr_pending,
471 SA5_ioaccel_mode1_completed,
472};
473
Don Brace303932f2010-02-04 08:42:40 -0600474static struct access_method SA5_performant_access = {
475 SA5_submit_command,
476 SA5_performant_intr_mask,
477 SA5_fifo_full,
478 SA5_performant_intr_pending,
479 SA5_performant_completed,
480};
481
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800482struct board_type {
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -0600483 u32 board_id;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800484 char *product_name;
485 struct access_method *access;
486};
487
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800488#endif /* HPSA_H */
489