blob: 5174303d7db722974ea820ebfd71bbcafce5e12e [file] [log] [blame]
Bart Van Asschebec9e8a2017-08-17 13:12:47 -07001/*
2 * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
3 * was acquired by Western Digital in 2012.
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004 *
Bart Van Asschebec9e8a2017-08-17 13:12:47 -07005 * Copyright 2012 sTec, Inc.
6 * Copyright (c) 2017 Western Digital Corporation or its affiliates.
7 *
8 * This file is part of the Linux kernel, and is made available under
9 * the terms of the GNU General Public License version 2.
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060010 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/blkdev.h>
19#include <linux/sched.h>
20#include <linux/interrupt.h>
21#include <linux/compiler.h>
22#include <linux/workqueue.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060023#include <linux/delay.h>
24#include <linux/time.h>
25#include <linux/hdreg.h>
26#include <linux/dma-mapping.h>
27#include <linux/completion.h>
28#include <linux/scatterlist.h>
29#include <linux/version.h>
30#include <linux/err.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060031#include <linux/aer.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060032#include <linux/wait.h>
33#include <linux/uio.h>
34#include <scsi/scsi.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060035#include <scsi/sg.h>
36#include <linux/io.h>
37#include <linux/uaccess.h>
Bartlomiej Zolnierkiewicz4ca90b52013-11-05 12:37:04 +010038#include <asm/unaligned.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060039
40#include "skd_s1120.h"
41
42static int skd_dbg_level;
43static int skd_isr_comp_limit = 4;
44
45enum {
46 STEC_LINK_2_5GTS = 0,
47 STEC_LINK_5GTS = 1,
48 STEC_LINK_8GTS = 2,
49 STEC_LINK_UNKNOWN = 0xFF
50};
51
52enum {
53 SKD_FLUSH_INITIALIZER,
54 SKD_FLUSH_ZERO_SIZE_FIRST,
55 SKD_FLUSH_DATA_SECOND,
56};
57
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060058#define SKD_ASSERT(expr) \
59 do { \
60 if (unlikely(!(expr))) { \
61 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
62 # expr, __FILE__, __func__, __LINE__); \
63 } \
64 } while (0)
65
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060066#define DRV_NAME "skd"
67#define DRV_VERSION "2.2.1"
68#define DRV_BUILD_ID "0260"
69#define PFX DRV_NAME ": "
70#define DRV_BIN_VERSION 0x100
71#define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
72
Bart Van Asschebec9e8a2017-08-17 13:12:47 -070073MODULE_LICENSE("GPL");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060074
Mike Snitzer38d4a1b2013-11-01 15:05:10 -040075MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060076MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
77
78#define PCI_VENDOR_ID_STEC 0x1B39
79#define PCI_DEVICE_ID_S1120 0x0001
80
81#define SKD_FUA_NV (1 << 1)
82#define SKD_MINORS_PER_DEVICE 16
83
84#define SKD_MAX_QUEUE_DEPTH 200u
85
86#define SKD_PAUSE_TIMEOUT (5 * 1000)
87
88#define SKD_N_FITMSG_BYTES (512u)
89
90#define SKD_N_SPECIAL_CONTEXT 32u
91#define SKD_N_SPECIAL_FITMSG_BYTES (128u)
92
93/* SG elements are 32 bytes, so we can make this 4096 and still be under the
94 * 128KB limit. That allows 4096*4K = 16M xfer size
95 */
96#define SKD_N_SG_PER_REQ_DEFAULT 256u
97#define SKD_N_SG_PER_SPECIAL 256u
98
99#define SKD_N_COMPLETION_ENTRY 256u
100#define SKD_N_READ_CAP_BYTES (8u)
101
102#define SKD_N_INTERNAL_BYTES (512u)
103
104/* 5 bits of uniqifier, 0xF800 */
105#define SKD_ID_INCR (0x400)
106#define SKD_ID_TABLE_MASK (3u << 8u)
107#define SKD_ID_RW_REQUEST (0u << 8u)
108#define SKD_ID_INTERNAL (1u << 8u)
109#define SKD_ID_SPECIAL_REQUEST (2u << 8u)
110#define SKD_ID_FIT_MSG (3u << 8u)
111#define SKD_ID_SLOT_MASK 0x00FFu
112#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
113
114#define SKD_N_TIMEOUT_SLOT 4u
115#define SKD_TIMEOUT_SLOT_MASK 3u
116
117#define SKD_N_MAX_SECTORS 2048u
118
119#define SKD_MAX_RETRIES 2u
120
121#define SKD_TIMER_SECONDS(seconds) (seconds)
122#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
123
124#define INQ_STD_NBYTES 36
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600125
126enum skd_drvr_state {
127 SKD_DRVR_STATE_LOAD,
128 SKD_DRVR_STATE_IDLE,
129 SKD_DRVR_STATE_BUSY,
130 SKD_DRVR_STATE_STARTING,
131 SKD_DRVR_STATE_ONLINE,
132 SKD_DRVR_STATE_PAUSING,
133 SKD_DRVR_STATE_PAUSED,
134 SKD_DRVR_STATE_DRAINING_TIMEOUT,
135 SKD_DRVR_STATE_RESTARTING,
136 SKD_DRVR_STATE_RESUMING,
137 SKD_DRVR_STATE_STOPPING,
138 SKD_DRVR_STATE_FAULT,
139 SKD_DRVR_STATE_DISAPPEARED,
140 SKD_DRVR_STATE_PROTOCOL_MISMATCH,
141 SKD_DRVR_STATE_BUSY_ERASE,
142 SKD_DRVR_STATE_BUSY_SANITIZE,
143 SKD_DRVR_STATE_BUSY_IMMINENT,
144 SKD_DRVR_STATE_WAIT_BOOT,
145 SKD_DRVR_STATE_SYNCING,
146};
147
148#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
149#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
150#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
151#define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
152#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
153#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
154#define SKD_START_WAIT_SECONDS 90u
155
156enum skd_req_state {
157 SKD_REQ_STATE_IDLE,
158 SKD_REQ_STATE_SETUP,
159 SKD_REQ_STATE_BUSY,
160 SKD_REQ_STATE_COMPLETED,
161 SKD_REQ_STATE_TIMEOUT,
162 SKD_REQ_STATE_ABORTED,
163};
164
165enum skd_fit_msg_state {
166 SKD_MSG_STATE_IDLE,
167 SKD_MSG_STATE_BUSY,
168};
169
170enum skd_check_status_action {
171 SKD_CHECK_STATUS_REPORT_GOOD,
172 SKD_CHECK_STATUS_REPORT_SMART_ALERT,
173 SKD_CHECK_STATUS_REQUEUE_REQUEST,
174 SKD_CHECK_STATUS_REPORT_ERROR,
175 SKD_CHECK_STATUS_BUSY_IMMINENT,
176};
177
178struct skd_fitmsg_context {
179 enum skd_fit_msg_state state;
180
181 struct skd_fitmsg_context *next;
182
183 u32 id;
184 u16 outstanding;
185
186 u32 length;
187 u32 offset;
188
189 u8 *msg_buf;
190 dma_addr_t mb_dma_address;
191};
192
193struct skd_request_context {
194 enum skd_req_state state;
195
196 struct skd_request_context *next;
197
198 u16 id;
199 u32 fitmsg_id;
200
201 struct request *req;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600202 u8 flush_cmd;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600203
204 u32 timeout_stamp;
205 u8 sg_data_dir;
206 struct scatterlist *sg;
207 u32 n_sg;
208 u32 sg_byte_count;
209
210 struct fit_sg_descriptor *sksg_list;
211 dma_addr_t sksg_dma_address;
212
213 struct fit_completion_entry_v1 completion;
214
215 struct fit_comp_error_info err_info;
216
217};
218#define SKD_DATA_DIR_HOST_TO_CARD 1
219#define SKD_DATA_DIR_CARD_TO_HOST 2
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600220
221struct skd_special_context {
222 struct skd_request_context req;
223
224 u8 orphaned;
225
226 void *data_buf;
227 dma_addr_t db_dma_address;
228
229 u8 *msg_buf;
230 dma_addr_t mb_dma_address;
231};
232
233struct skd_sg_io {
234 fmode_t mode;
235 void __user *argp;
236
237 struct sg_io_hdr sg;
238
239 u8 cdb[16];
240
241 u32 dxfer_len;
242 u32 iovcnt;
243 struct sg_iovec *iov;
244 struct sg_iovec no_iov_iov;
245
246 struct skd_special_context *skspcl;
247};
248
249typedef enum skd_irq_type {
250 SKD_IRQ_LEGACY,
251 SKD_IRQ_MSI,
252 SKD_IRQ_MSIX
253} skd_irq_type_t;
254
255#define SKD_MAX_BARS 2
256
257struct skd_device {
258 volatile void __iomem *mem_map[SKD_MAX_BARS];
259 resource_size_t mem_phys[SKD_MAX_BARS];
260 u32 mem_size[SKD_MAX_BARS];
261
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600262 struct skd_msix_entry *msix_entries;
263
264 struct pci_dev *pdev;
265 int pcie_error_reporting_is_enabled;
266
267 spinlock_t lock;
268 struct gendisk *disk;
269 struct request_queue *queue;
270 struct device *class_dev;
271 int gendisk_on;
272 int sync_done;
273
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600274 u32 devno;
275 u32 major;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600276 char isr_name[30];
277
278 enum skd_drvr_state state;
279 u32 drive_state;
280
281 u32 in_flight;
282 u32 cur_max_queue_depth;
283 u32 queue_low_water_mark;
284 u32 dev_max_queue_depth;
285
286 u32 num_fitmsg_context;
287 u32 num_req_context;
288
289 u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
290 u32 timeout_stamp;
291 struct skd_fitmsg_context *skmsg_free_list;
292 struct skd_fitmsg_context *skmsg_table;
293
294 struct skd_request_context *skreq_free_list;
295 struct skd_request_context *skreq_table;
296
297 struct skd_special_context *skspcl_free_list;
298 struct skd_special_context *skspcl_table;
299
300 struct skd_special_context internal_skspcl;
301 u32 read_cap_blocksize;
302 u32 read_cap_last_lba;
303 int read_cap_is_valid;
304 int inquiry_is_valid;
305 u8 inq_serial_num[13]; /*12 chars plus null term */
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600306
307 u8 skcomp_cycle;
308 u32 skcomp_ix;
309 struct fit_completion_entry_v1 *skcomp_table;
310 struct fit_comp_error_info *skerr_table;
311 dma_addr_t cq_dma_address;
312
313 wait_queue_head_t waitq;
314
315 struct timer_list timer;
316 u32 timer_countdown;
317 u32 timer_substate;
318
319 int n_special;
320 int sgs_per_request;
321 u32 last_mtd;
322
323 u32 proto_ver;
324
325 int dbg_level;
326 u32 connect_time_stamp;
327 int connect_retries;
328#define SKD_MAX_CONNECT_RETRIES 16
329 u32 drive_jiffies;
330
331 u32 timo_slot;
332
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600333 struct work_struct completion_worker;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600334};
335
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600336#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
337#define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
338#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
339
340static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
341{
Bart Van Assche14262a42017-08-17 13:12:57 -0700342 u32 val = readl(skdev->mem_map[1] + offset);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600343
Bart Van Assche14262a42017-08-17 13:12:57 -0700344 if (unlikely(skdev->dbg_level >= 2))
Bart Van Asschef98806d2017-08-17 13:12:58 -0700345 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
Bart Van Assche14262a42017-08-17 13:12:57 -0700346 return val;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600347}
348
349static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
350 u32 offset)
351{
Bart Van Assche14262a42017-08-17 13:12:57 -0700352 writel(val, skdev->mem_map[1] + offset);
353 if (unlikely(skdev->dbg_level >= 2))
Bart Van Asschef98806d2017-08-17 13:12:58 -0700354 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600355}
356
357static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
358 u32 offset)
359{
Bart Van Assche14262a42017-08-17 13:12:57 -0700360 writeq(val, skdev->mem_map[1] + offset);
361 if (unlikely(skdev->dbg_level >= 2))
Bart Van Asschef98806d2017-08-17 13:12:58 -0700362 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
363 val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600364}
365
366
367#define SKD_IRQ_DEFAULT SKD_IRQ_MSI
368static int skd_isr_type = SKD_IRQ_DEFAULT;
369
370module_param(skd_isr_type, int, 0444);
371MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
372 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
373
374#define SKD_MAX_REQ_PER_MSG_DEFAULT 1
375static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
376
377module_param(skd_max_req_per_msg, int, 0444);
378MODULE_PARM_DESC(skd_max_req_per_msg,
379 "Maximum SCSI requests packed in a single message."
380 " (1-14, default==1)");
381
382#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
383#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
384static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
385
386module_param(skd_max_queue_depth, int, 0444);
387MODULE_PARM_DESC(skd_max_queue_depth,
388 "Maximum SCSI requests issued to s1120."
389 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
390
391static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
392module_param(skd_sgs_per_request, int, 0444);
393MODULE_PARM_DESC(skd_sgs_per_request,
394 "Maximum SG elements per block request."
395 " (1-4096, default==256)");
396
397static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
398module_param(skd_max_pass_thru, int, 0444);
399MODULE_PARM_DESC(skd_max_pass_thru,
400 "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
401
402module_param(skd_dbg_level, int, 0444);
403MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
404
405module_param(skd_isr_comp_limit, int, 0444);
406MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
407
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600408/* Major device number dynamically assigned. */
409static u32 skd_major;
410
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600411static void skd_destruct(struct skd_device *skdev);
412static const struct block_device_operations skd_blockdev_ops;
413static void skd_send_fitmsg(struct skd_device *skdev,
414 struct skd_fitmsg_context *skmsg);
415static void skd_send_special_fitmsg(struct skd_device *skdev,
416 struct skd_special_context *skspcl);
417static void skd_request_fn(struct request_queue *rq);
418static void skd_end_request(struct skd_device *skdev,
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200419 struct skd_request_context *skreq, blk_status_t status);
420static bool skd_preop_sg_list(struct skd_device *skdev,
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600421 struct skd_request_context *skreq);
422static void skd_postop_sg_list(struct skd_device *skdev,
423 struct skd_request_context *skreq);
424
425static void skd_restart_device(struct skd_device *skdev);
426static int skd_quiesce_dev(struct skd_device *skdev);
427static int skd_unquiesce_dev(struct skd_device *skdev);
428static void skd_release_special(struct skd_device *skdev,
429 struct skd_special_context *skspcl);
430static void skd_disable_interrupts(struct skd_device *skdev);
431static void skd_isr_fwstate(struct skd_device *skdev);
432static void skd_recover_requests(struct skd_device *skdev, int requeue);
433static void skd_soft_reset(struct skd_device *skdev);
434
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600435const char *skd_drive_state_to_str(int state);
436const char *skd_skdev_state_to_str(enum skd_drvr_state state);
437static void skd_log_skdev(struct skd_device *skdev, const char *event);
438static void skd_log_skmsg(struct skd_device *skdev,
439 struct skd_fitmsg_context *skmsg, const char *event);
440static void skd_log_skreq(struct skd_device *skdev,
441 struct skd_request_context *skreq, const char *event);
442
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600443/*
444 *****************************************************************************
445 * READ/WRITE REQUESTS
446 *****************************************************************************
447 */
Jens Axboefcd37eb2013-11-01 10:14:56 -0600448static void skd_fail_all_pending(struct skd_device *skdev)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600449{
450 struct request_queue *q = skdev->queue;
451 struct request *req;
452
453 for (;; ) {
454 req = blk_peek_request(q);
455 if (req == NULL)
456 break;
457 blk_start_request(req);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200458 __blk_end_request_all(req, BLK_STS_IOERR);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600459 }
460}
461
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600462static void
463skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
464 int data_dir, unsigned lba,
465 unsigned count)
466{
467 if (data_dir == READ)
468 scsi_req->cdb[0] = 0x28;
469 else
470 scsi_req->cdb[0] = 0x2a;
471
472 scsi_req->cdb[1] = 0;
473 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
474 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
475 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
476 scsi_req->cdb[5] = (lba & 0xff);
477 scsi_req->cdb[6] = 0;
478 scsi_req->cdb[7] = (count & 0xff00) >> 8;
479 scsi_req->cdb[8] = count & 0xff;
480 scsi_req->cdb[9] = 0;
481}
482
483static void
484skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400485 struct skd_request_context *skreq)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600486{
487 skreq->flush_cmd = 1;
488
489 scsi_req->cdb[0] = 0x35;
490 scsi_req->cdb[1] = 0;
491 scsi_req->cdb[2] = 0;
492 scsi_req->cdb[3] = 0;
493 scsi_req->cdb[4] = 0;
494 scsi_req->cdb[5] = 0;
495 scsi_req->cdb[6] = 0;
496 scsi_req->cdb[7] = 0;
497 scsi_req->cdb[8] = 0;
498 scsi_req->cdb[9] = 0;
499}
500
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600501static void skd_request_fn_not_online(struct request_queue *q);
502
503static void skd_request_fn(struct request_queue *q)
504{
505 struct skd_device *skdev = q->queuedata;
506 struct skd_fitmsg_context *skmsg = NULL;
507 struct fit_msg_hdr *fmh = NULL;
508 struct skd_request_context *skreq;
509 struct request *req = NULL;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600510 struct skd_scsi_request *scsi_req;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600511 unsigned long io_flags;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600512 u32 lba;
513 u32 count;
514 int data_dir;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600515 u64 be_dmaa;
516 u64 cmdctxt;
517 u32 timo_slot;
518 void *cmd_ptr;
519 int flush, fua;
520
521 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
522 skd_request_fn_not_online(q);
523 return;
524 }
525
Jens Axboe6a5ec652013-11-01 10:38:45 -0600526 if (blk_queue_stopped(skdev->queue)) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600527 if (skdev->skmsg_free_list == NULL ||
528 skdev->skreq_free_list == NULL ||
529 skdev->in_flight >= skdev->queue_low_water_mark)
530 /* There is still some kind of shortage */
531 return;
532
Jens Axboe6a5ec652013-11-01 10:38:45 -0600533 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600534 }
535
536 /*
537 * Stop conditions:
538 * - There are no more native requests
539 * - There are already the maximum number of requests in progress
540 * - There are no more skd_request_context entries
541 * - There are no more FIT msg buffers
542 */
543 for (;; ) {
544
545 flush = fua = 0;
546
Jens Axboefcd37eb2013-11-01 10:14:56 -0600547 req = blk_peek_request(q);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600548
Jens Axboefcd37eb2013-11-01 10:14:56 -0600549 /* Are there any native requests to start? */
550 if (req == NULL)
551 break;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600552
Jens Axboefcd37eb2013-11-01 10:14:56 -0600553 lba = (u32)blk_rq_pos(req);
554 count = blk_rq_sectors(req);
555 data_dir = rq_data_dir(req);
556 io_flags = req->cmd_flags;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600557
Mike Christie3a5e02c2016-06-05 14:32:23 -0500558 if (req_op(req) == REQ_OP_FLUSH)
Jens Axboefcd37eb2013-11-01 10:14:56 -0600559 flush++;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600560
Jens Axboefcd37eb2013-11-01 10:14:56 -0600561 if (io_flags & REQ_FUA)
562 fua++;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600563
Bart Van Asschef98806d2017-08-17 13:12:58 -0700564 dev_dbg(&skdev->pdev->dev,
565 "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
566 req, lba, lba, count, count, data_dir);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600567
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400568 /* At this point we know there is a request */
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600569
570 /* Are too many requets already in progress? */
571 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700572 dev_dbg(&skdev->pdev->dev, "qdepth %d, limit %d\n",
573 skdev->in_flight, skdev->cur_max_queue_depth);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600574 break;
575 }
576
577 /* Is a skd_request_context available? */
578 skreq = skdev->skreq_free_list;
579 if (skreq == NULL) {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700580 dev_dbg(&skdev->pdev->dev, "Out of req=%p\n", q);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600581 break;
582 }
583 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
584 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
585
586 /* Now we check to see if we can get a fit msg */
587 if (skmsg == NULL) {
588 if (skdev->skmsg_free_list == NULL) {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700589 dev_dbg(&skdev->pdev->dev, "Out of msg\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600590 break;
591 }
592 }
593
594 skreq->flush_cmd = 0;
595 skreq->n_sg = 0;
596 skreq->sg_byte_count = 0;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600597
598 /*
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400599 * OK to now dequeue request from q.
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600600 *
601 * At this point we are comitted to either start or reject
602 * the native request. Note that skd_request_context is
603 * available but is still at the head of the free list.
604 */
Jens Axboefcd37eb2013-11-01 10:14:56 -0600605 blk_start_request(req);
606 skreq->req = req;
607 skreq->fitmsg_id = 0;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600608
609 /* Either a FIT msg is in progress or we have to start one. */
610 if (skmsg == NULL) {
611 /* Are there any FIT msg buffers available? */
612 skmsg = skdev->skmsg_free_list;
613 if (skmsg == NULL) {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700614 dev_dbg(&skdev->pdev->dev,
615 "Out of msg skdev=%p\n",
616 skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600617 break;
618 }
619 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
620 SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
621
622 skdev->skmsg_free_list = skmsg->next;
623
624 skmsg->state = SKD_MSG_STATE_BUSY;
625 skmsg->id += SKD_ID_INCR;
626
627 /* Initialize the FIT msg header */
628 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
629 memset(fmh, 0, sizeof(*fmh));
630 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
631 skmsg->length = sizeof(*fmh);
632 }
633
634 skreq->fitmsg_id = skmsg->id;
635
636 /*
637 * Note that a FIT msg may have just been started
638 * but contains no SoFIT requests yet.
639 */
640
641 /*
642 * Transcode the request, checking as we go. The outcome of
643 * the transcoding is represented by the error variable.
644 */
645 cmd_ptr = &skmsg->msg_buf[skmsg->length];
646 memset(cmd_ptr, 0, 32);
647
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600648 be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
649 cmdctxt = skreq->id + SKD_ID_INCR;
650
651 scsi_req = cmd_ptr;
652 scsi_req->hdr.tag = cmdctxt;
653 scsi_req->hdr.sg_list_dma_address = be_dmaa;
654
655 if (data_dir == READ)
656 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
657 else
658 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
659
Jeff Moyer49bdedb2016-04-25 19:12:38 -0600660 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600661 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
662 SKD_ASSERT(skreq->flush_cmd == 1);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600663 } else {
664 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
665 }
666
667 if (fua)
668 scsi_req->cdb[1] |= SKD_FUA_NV;
669
Jens Axboefcd37eb2013-11-01 10:14:56 -0600670 if (!req->bio)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600671 goto skip_sg;
672
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200673 if (!skd_preop_sg_list(skdev, skreq)) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600674 /*
675 * Complete the native request with error.
676 * Note that the request context is still at the
677 * head of the free list, and that the SoFIT request
678 * was encoded into the FIT msg buffer but the FIT
679 * msg length has not been updated. In short, the
680 * only resource that has been allocated but might
681 * not be used is that the FIT msg could be empty.
682 */
Bart Van Asschef98806d2017-08-17 13:12:58 -0700683 dev_dbg(&skdev->pdev->dev, "error Out\n");
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200684 skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600685 continue;
686 }
687
688skip_sg:
689 scsi_req->hdr.sg_list_len_bytes =
690 cpu_to_be32(skreq->sg_byte_count);
691
692 /* Complete resource allocations. */
693 skdev->skreq_free_list = skreq->next;
694 skreq->state = SKD_REQ_STATE_BUSY;
695 skreq->id += SKD_ID_INCR;
696
697 skmsg->length += sizeof(struct skd_scsi_request);
698 fmh->num_protocol_cmds_coalesced++;
699
700 /*
701 * Update the active request counts.
702 * Capture the timeout timestamp.
703 */
704 skreq->timeout_stamp = skdev->timeout_stamp;
705 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
706 skdev->timeout_slot[timo_slot]++;
707 skdev->in_flight++;
Bart Van Asschef98806d2017-08-17 13:12:58 -0700708 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
709 skdev->in_flight);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600710
711 /*
712 * If the FIT msg buffer is full send it.
713 */
714 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
715 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
716 skd_send_fitmsg(skdev, skmsg);
717 skmsg = NULL;
718 fmh = NULL;
719 }
720 }
721
722 /*
723 * Is a FIT msg in progress? If it is empty put the buffer back
724 * on the free list. If it is non-empty send what we got.
725 * This minimizes latency when there are fewer requests than
726 * what fits in a FIT msg.
727 */
728 if (skmsg != NULL) {
729 /* Bigger than just a FIT msg header? */
730 if (skmsg->length > sizeof(struct fit_msg_hdr)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700731 dev_dbg(&skdev->pdev->dev, "sending msg=%p, len %d\n",
732 skmsg, skmsg->length);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600733 skd_send_fitmsg(skdev, skmsg);
734 } else {
735 /*
736 * The FIT msg is empty. It means we got started
737 * on the msg, but the requests were rejected.
738 */
739 skmsg->state = SKD_MSG_STATE_IDLE;
740 skmsg->id += SKD_ID_INCR;
741 skmsg->next = skdev->skmsg_free_list;
742 skdev->skmsg_free_list = skmsg;
743 }
744 skmsg = NULL;
745 fmh = NULL;
746 }
747
748 /*
749 * If req is non-NULL it means there is something to do but
750 * we are out of a resource.
751 */
Jens Axboefcd37eb2013-11-01 10:14:56 -0600752 if (req)
Jens Axboe6a5ec652013-11-01 10:38:45 -0600753 blk_stop_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600754}
755
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400756static void skd_end_request(struct skd_device *skdev,
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200757 struct skd_request_context *skreq, blk_status_t error)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600758{
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600759 if (unlikely(error)) {
760 struct request *req = skreq->req;
761 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
762 u32 lba = (u32)blk_rq_pos(req);
763 u32 count = blk_rq_sectors(req);
764
Bart Van Asschef98806d2017-08-17 13:12:58 -0700765 dev_err(&skdev->pdev->dev,
766 "Error cmd=%s sect=%u count=%u id=0x%x\n", cmd, lba,
767 count, skreq->id);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600768 } else
Bart Van Asschef98806d2017-08-17 13:12:58 -0700769 dev_dbg(&skdev->pdev->dev, "id=0x%x error=%d\n", skreq->id,
770 error);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600771
772 __blk_end_request_all(skreq->req, error);
773}
774
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200775static bool skd_preop_sg_list(struct skd_device *skdev,
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400776 struct skd_request_context *skreq)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600777{
778 struct request *req = skreq->req;
779 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
780 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
781 struct scatterlist *sg = &skreq->sg[0];
782 int n_sg;
783 int i;
784
785 skreq->sg_byte_count = 0;
786
787 /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
788 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
789
790 n_sg = blk_rq_map_sg(skdev->queue, req, sg);
791 if (n_sg <= 0)
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200792 return false;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600793
794 /*
795 * Map scatterlist to PCI bus addresses.
796 * Note PCI might change the number of entries.
797 */
798 n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
799 if (n_sg <= 0)
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200800 return false;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600801
802 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
803
804 skreq->n_sg = n_sg;
805
806 for (i = 0; i < n_sg; i++) {
807 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
808 u32 cnt = sg_dma_len(&sg[i]);
809 uint64_t dma_addr = sg_dma_address(&sg[i]);
810
811 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
812 sgd->byte_count = cnt;
813 skreq->sg_byte_count += cnt;
814 sgd->host_side_addr = dma_addr;
815 sgd->dev_side_addr = 0;
816 }
817
818 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
819 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
820
821 if (unlikely(skdev->dbg_level > 1)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700822 dev_dbg(&skdev->pdev->dev,
823 "skreq=%x sksg_list=%p sksg_dma=%llx\n",
824 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600825 for (i = 0; i < n_sg; i++) {
826 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
Bart Van Asschef98806d2017-08-17 13:12:58 -0700827
828 dev_dbg(&skdev->pdev->dev,
829 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
830 i, sgd->byte_count, sgd->control,
831 sgd->host_side_addr, sgd->next_desc_ptr);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600832 }
833 }
834
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200835 return true;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600836}
837
Jens Axboefcd37eb2013-11-01 10:14:56 -0600838static void skd_postop_sg_list(struct skd_device *skdev,
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400839 struct skd_request_context *skreq)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600840{
841 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
842 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
843
844 /*
845 * restore the next ptr for next IO request so we
846 * don't have to set it every time.
847 */
848 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
849 skreq->sksg_dma_address +
850 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
851 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
852}
853
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600854static void skd_request_fn_not_online(struct request_queue *q)
855{
856 struct skd_device *skdev = q->queuedata;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600857
858 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
859
860 skd_log_skdev(skdev, "req_not_online");
861 switch (skdev->state) {
862 case SKD_DRVR_STATE_PAUSING:
863 case SKD_DRVR_STATE_PAUSED:
864 case SKD_DRVR_STATE_STARTING:
865 case SKD_DRVR_STATE_RESTARTING:
866 case SKD_DRVR_STATE_WAIT_BOOT:
867 /* In case of starting, we haven't started the queue,
868 * so we can't get here... but requests are
869 * possibly hanging out waiting for us because we
870 * reported the dev/skd0 already. They'll wait
871 * forever if connect doesn't complete.
872 * What to do??? delay dev/skd0 ??
873 */
874 case SKD_DRVR_STATE_BUSY:
875 case SKD_DRVR_STATE_BUSY_IMMINENT:
876 case SKD_DRVR_STATE_BUSY_ERASE:
877 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
878 return;
879
880 case SKD_DRVR_STATE_BUSY_SANITIZE:
881 case SKD_DRVR_STATE_STOPPING:
882 case SKD_DRVR_STATE_SYNCING:
883 case SKD_DRVR_STATE_FAULT:
884 case SKD_DRVR_STATE_DISAPPEARED:
885 default:
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600886 break;
887 }
888
889 /* If we get here, terminate all pending block requeusts
890 * with EIO and any scsi pass thru with appropriate sense
891 */
892
893 skd_fail_all_pending(skdev);
894}
895
896/*
897 *****************************************************************************
898 * TIMER
899 *****************************************************************************
900 */
901
902static void skd_timer_tick_not_online(struct skd_device *skdev);
903
904static void skd_timer_tick(ulong arg)
905{
906 struct skd_device *skdev = (struct skd_device *)arg;
907
908 u32 timo_slot;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600909 unsigned long reqflags;
910 u32 state;
911
912 if (skdev->state == SKD_DRVR_STATE_FAULT)
913 /* The driver has declared fault, and we want it to
914 * stay that way until driver is reloaded.
915 */
916 return;
917
918 spin_lock_irqsave(&skdev->lock, reqflags);
919
920 state = SKD_READL(skdev, FIT_STATUS);
921 state &= FIT_SR_DRIVE_STATE_MASK;
922 if (state != skdev->drive_state)
923 skd_isr_fwstate(skdev);
924
925 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
926 skd_timer_tick_not_online(skdev);
927 goto timer_func_out;
928 }
929 skdev->timeout_stamp++;
930 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
931
932 /*
933 * All requests that happened during the previous use of
934 * this slot should be done by now. The previous use was
935 * over 7 seconds ago.
936 */
937 if (skdev->timeout_slot[timo_slot] == 0)
938 goto timer_func_out;
939
940 /* Something is overdue */
Bart Van Asschef98806d2017-08-17 13:12:58 -0700941 dev_dbg(&skdev->pdev->dev, "found %d timeouts, draining busy=%d\n",
942 skdev->timeout_slot[timo_slot], skdev->in_flight);
943 dev_err(&skdev->pdev->dev, "Overdue IOs (%d), busy %d\n",
944 skdev->timeout_slot[timo_slot], skdev->in_flight);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600945
946 skdev->timer_countdown = SKD_DRAINING_TIMO;
947 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
948 skdev->timo_slot = timo_slot;
Jens Axboe6a5ec652013-11-01 10:38:45 -0600949 blk_stop_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600950
951timer_func_out:
952 mod_timer(&skdev->timer, (jiffies + HZ));
953
954 spin_unlock_irqrestore(&skdev->lock, reqflags);
955}
956
957static void skd_timer_tick_not_online(struct skd_device *skdev)
958{
959 switch (skdev->state) {
960 case SKD_DRVR_STATE_IDLE:
961 case SKD_DRVR_STATE_LOAD:
962 break;
963 case SKD_DRVR_STATE_BUSY_SANITIZE:
Bart Van Asschef98806d2017-08-17 13:12:58 -0700964 dev_dbg(&skdev->pdev->dev,
965 "drive busy sanitize[%x], driver[%x]\n",
966 skdev->drive_state, skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600967 /* If we've been in sanitize for 3 seconds, we figure we're not
968 * going to get anymore completions, so recover requests now
969 */
970 if (skdev->timer_countdown > 0) {
971 skdev->timer_countdown--;
972 return;
973 }
974 skd_recover_requests(skdev, 0);
975 break;
976
977 case SKD_DRVR_STATE_BUSY:
978 case SKD_DRVR_STATE_BUSY_IMMINENT:
979 case SKD_DRVR_STATE_BUSY_ERASE:
Bart Van Asschef98806d2017-08-17 13:12:58 -0700980 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
981 skdev->state, skdev->timer_countdown);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600982 if (skdev->timer_countdown > 0) {
983 skdev->timer_countdown--;
984 return;
985 }
Bart Van Asschef98806d2017-08-17 13:12:58 -0700986 dev_dbg(&skdev->pdev->dev,
987 "busy[%x], timedout=%d, restarting device.",
988 skdev->state, skdev->timer_countdown);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600989 skd_restart_device(skdev);
990 break;
991
992 case SKD_DRVR_STATE_WAIT_BOOT:
993 case SKD_DRVR_STATE_STARTING:
994 if (skdev->timer_countdown > 0) {
995 skdev->timer_countdown--;
996 return;
997 }
998 /* For now, we fault the drive. Could attempt resets to
999 * revcover at some point. */
1000 skdev->state = SKD_DRVR_STATE_FAULT;
1001
Bart Van Asschef98806d2017-08-17 13:12:58 -07001002 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
1003 skdev->drive_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001004
1005 /*start the queue so we can respond with error to requests */
1006 /* wakeup anyone waiting for startup complete */
Jens Axboe6a5ec652013-11-01 10:38:45 -06001007 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001008 skdev->gendisk_on = -1;
1009 wake_up_interruptible(&skdev->waitq);
1010 break;
1011
1012 case SKD_DRVR_STATE_ONLINE:
1013 /* shouldn't get here. */
1014 break;
1015
1016 case SKD_DRVR_STATE_PAUSING:
1017 case SKD_DRVR_STATE_PAUSED:
1018 break;
1019
1020 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
Bart Van Asschef98806d2017-08-17 13:12:58 -07001021 dev_dbg(&skdev->pdev->dev,
1022 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1023 skdev->timo_slot, skdev->timer_countdown,
1024 skdev->in_flight,
1025 skdev->timeout_slot[skdev->timo_slot]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001026 /* if the slot has cleared we can let the I/O continue */
1027 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001028 dev_dbg(&skdev->pdev->dev,
1029 "Slot drained, starting queue.\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001030 skdev->state = SKD_DRVR_STATE_ONLINE;
Jens Axboe6a5ec652013-11-01 10:38:45 -06001031 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001032 return;
1033 }
1034 if (skdev->timer_countdown > 0) {
1035 skdev->timer_countdown--;
1036 return;
1037 }
1038 skd_restart_device(skdev);
1039 break;
1040
1041 case SKD_DRVR_STATE_RESTARTING:
1042 if (skdev->timer_countdown > 0) {
1043 skdev->timer_countdown--;
1044 return;
1045 }
1046 /* For now, we fault the drive. Could attempt resets to
1047 * revcover at some point. */
1048 skdev->state = SKD_DRVR_STATE_FAULT;
Bart Van Asschef98806d2017-08-17 13:12:58 -07001049 dev_err(&skdev->pdev->dev,
1050 "DriveFault Reconnect Timeout (%x)\n",
1051 skdev->drive_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001052
1053 /*
1054 * Recovering does two things:
1055 * 1. completes IO with error
1056 * 2. reclaims dma resources
1057 * When is it safe to recover requests?
1058 * - if the drive state is faulted
1059 * - if the state is still soft reset after out timeout
1060 * - if the drive registers are dead (state = FF)
1061 * If it is "unsafe", we still need to recover, so we will
1062 * disable pci bus mastering and disable our interrupts.
1063 */
1064
1065 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1066 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1067 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1068 /* It never came out of soft reset. Try to
1069 * recover the requests and then let them
1070 * fail. This is to mitigate hung processes. */
1071 skd_recover_requests(skdev, 0);
1072 else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001073 dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
1074 skdev->drive_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001075 pci_disable_device(skdev->pdev);
1076 skd_disable_interrupts(skdev);
1077 skd_recover_requests(skdev, 0);
1078 }
1079
1080 /*start the queue so we can respond with error to requests */
1081 /* wakeup anyone waiting for startup complete */
Jens Axboe6a5ec652013-11-01 10:38:45 -06001082 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001083 skdev->gendisk_on = -1;
1084 wake_up_interruptible(&skdev->waitq);
1085 break;
1086
1087 case SKD_DRVR_STATE_RESUMING:
1088 case SKD_DRVR_STATE_STOPPING:
1089 case SKD_DRVR_STATE_SYNCING:
1090 case SKD_DRVR_STATE_FAULT:
1091 case SKD_DRVR_STATE_DISAPPEARED:
1092 default:
1093 break;
1094 }
1095}
1096
1097static int skd_start_timer(struct skd_device *skdev)
1098{
1099 int rc;
1100
1101 init_timer(&skdev->timer);
1102 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1103
1104 rc = mod_timer(&skdev->timer, (jiffies + HZ));
1105 if (rc)
Bart Van Asschef98806d2017-08-17 13:12:58 -07001106 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001107 return rc;
1108}
1109
1110static void skd_kill_timer(struct skd_device *skdev)
1111{
1112 del_timer_sync(&skdev->timer);
1113}
1114
1115/*
1116 *****************************************************************************
1117 * IOCTL
1118 *****************************************************************************
1119 */
1120static int skd_ioctl_sg_io(struct skd_device *skdev,
1121 fmode_t mode, void __user *argp);
1122static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1123 struct skd_sg_io *sksgio);
1124static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1125 struct skd_sg_io *sksgio);
1126static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1127 struct skd_sg_io *sksgio);
1128static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1129 struct skd_sg_io *sksgio, int dxfer_dir);
1130static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1131 struct skd_sg_io *sksgio);
1132static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1133static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1134 struct skd_sg_io *sksgio);
1135static int skd_sg_io_put_status(struct skd_device *skdev,
1136 struct skd_sg_io *sksgio);
1137
1138static void skd_complete_special(struct skd_device *skdev,
1139 volatile struct fit_completion_entry_v1
1140 *skcomp,
1141 volatile struct fit_comp_error_info *skerr,
1142 struct skd_special_context *skspcl);
1143
1144static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1145 uint cmd_in, ulong arg)
1146{
Christoph Hellwig3719fa82017-01-28 09:32:50 +01001147 static const int sg_version_num = 30527;
1148 int rc = 0, timeout;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001149 struct gendisk *disk = bdev->bd_disk;
1150 struct skd_device *skdev = disk->private_data;
Christoph Hellwig3719fa82017-01-28 09:32:50 +01001151 int __user *p = (int __user *)arg;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001152
Bart Van Asschef98806d2017-08-17 13:12:58 -07001153 dev_dbg(&skdev->pdev->dev,
1154 "%s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
1155 disk->disk_name, current->comm, mode, cmd_in, arg);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001156
1157 if (!capable(CAP_SYS_ADMIN))
1158 return -EPERM;
1159
1160 switch (cmd_in) {
1161 case SG_SET_TIMEOUT:
Christoph Hellwig3719fa82017-01-28 09:32:50 +01001162 rc = get_user(timeout, p);
1163 if (!rc)
1164 disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
1165 break;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001166 case SG_GET_TIMEOUT:
Christoph Hellwig3719fa82017-01-28 09:32:50 +01001167 rc = jiffies_to_clock_t(disk->queue->sg_timeout);
1168 break;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001169 case SG_GET_VERSION_NUM:
Christoph Hellwig3719fa82017-01-28 09:32:50 +01001170 rc = put_user(sg_version_num, p);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001171 break;
1172 case SG_IO:
Christoph Hellwig3719fa82017-01-28 09:32:50 +01001173 rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001174 break;
1175
1176 default:
1177 rc = -ENOTTY;
1178 break;
1179 }
1180
Bart Van Asschef98806d2017-08-17 13:12:58 -07001181 dev_dbg(&skdev->pdev->dev, "%s: completion rc %d\n", disk->disk_name,
1182 rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001183 return rc;
1184}
1185
1186static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1187 void __user *argp)
1188{
1189 int rc;
1190 struct skd_sg_io sksgio;
1191
1192 memset(&sksgio, 0, sizeof(sksgio));
1193 sksgio.mode = mode;
1194 sksgio.argp = argp;
1195 sksgio.iov = &sksgio.no_iov_iov;
1196
1197 switch (skdev->state) {
1198 case SKD_DRVR_STATE_ONLINE:
1199 case SKD_DRVR_STATE_BUSY_IMMINENT:
1200 break;
1201
1202 default:
Bart Van Asschef98806d2017-08-17 13:12:58 -07001203 dev_dbg(&skdev->pdev->dev, "drive not online\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001204 rc = -ENXIO;
1205 goto out;
1206 }
1207
Akhil Bhansalif721bb02013-10-23 13:00:08 +01001208 rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1209 if (rc)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001210 goto out;
1211
Akhil Bhansalif721bb02013-10-23 13:00:08 +01001212 rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1213 if (rc)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001214 goto out;
1215
Akhil Bhansalif721bb02013-10-23 13:00:08 +01001216 rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1217 if (rc)
1218 goto out;
1219
1220 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1221 if (rc)
1222 goto out;
1223
1224 rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1225 if (rc)
1226 goto out;
1227
1228 rc = skd_sg_io_await(skdev, &sksgio);
1229 if (rc)
1230 goto out;
1231
1232 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1233 if (rc)
1234 goto out;
1235
1236 rc = skd_sg_io_put_status(skdev, &sksgio);
1237 if (rc)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001238 goto out;
1239
1240 rc = 0;
1241
1242out:
1243 skd_sg_io_release_skspcl(skdev, &sksgio);
1244
1245 if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1246 kfree(sksgio.iov);
1247 return rc;
1248}
1249
1250static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1251 struct skd_sg_io *sksgio)
1252{
1253 struct sg_io_hdr *sgp = &sksgio->sg;
Bart Van Assche95895e12017-08-17 13:12:55 -07001254 int i, __maybe_unused acc;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001255
1256 if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001257 dev_dbg(&skdev->pdev->dev, "access sg failed %p\n",
1258 sksgio->argp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001259 return -EFAULT;
1260 }
1261
1262 if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001263 dev_dbg(&skdev->pdev->dev, "copy_from_user sg failed %p\n",
1264 sksgio->argp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001265 return -EFAULT;
1266 }
1267
1268 if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001269 dev_dbg(&skdev->pdev->dev, "interface_id invalid 0x%x\n",
1270 sgp->interface_id);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001271 return -EINVAL;
1272 }
1273
1274 if (sgp->cmd_len > sizeof(sksgio->cdb)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001275 dev_dbg(&skdev->pdev->dev, "cmd_len invalid %d\n",
1276 sgp->cmd_len);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001277 return -EINVAL;
1278 }
1279
1280 if (sgp->iovec_count > 256) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001281 dev_dbg(&skdev->pdev->dev, "iovec_count invalid %d\n",
1282 sgp->iovec_count);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001283 return -EINVAL;
1284 }
1285
1286 if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001287 dev_dbg(&skdev->pdev->dev, "dxfer_len invalid %d\n",
1288 sgp->dxfer_len);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001289 return -EINVAL;
1290 }
1291
1292 switch (sgp->dxfer_direction) {
1293 case SG_DXFER_NONE:
1294 acc = -1;
1295 break;
1296
1297 case SG_DXFER_TO_DEV:
1298 acc = VERIFY_READ;
1299 break;
1300
1301 case SG_DXFER_FROM_DEV:
1302 case SG_DXFER_TO_FROM_DEV:
1303 acc = VERIFY_WRITE;
1304 break;
1305
1306 default:
Bart Van Asschef98806d2017-08-17 13:12:58 -07001307 dev_dbg(&skdev->pdev->dev, "dxfer_dir invalid %d\n",
1308 sgp->dxfer_direction);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001309 return -EINVAL;
1310 }
1311
1312 if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001313 dev_dbg(&skdev->pdev->dev, "copy_from_user cmdp failed %p\n",
1314 sgp->cmdp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001315 return -EFAULT;
1316 }
1317
1318 if (sgp->mx_sb_len != 0) {
1319 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001320 dev_dbg(&skdev->pdev->dev, "access sbp failed %p\n",
1321 sgp->sbp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001322 return -EFAULT;
1323 }
1324 }
1325
1326 if (sgp->iovec_count == 0) {
1327 sksgio->iov[0].iov_base = sgp->dxferp;
1328 sksgio->iov[0].iov_len = sgp->dxfer_len;
1329 sksgio->iovcnt = 1;
1330 sksgio->dxfer_len = sgp->dxfer_len;
1331 } else {
1332 struct sg_iovec *iov;
1333 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1334 size_t iov_data_len;
1335
1336 iov = kmalloc(nbytes, GFP_KERNEL);
1337 if (iov == NULL) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001338 dev_dbg(&skdev->pdev->dev, "alloc iovec failed %d\n",
1339 sgp->iovec_count);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001340 return -ENOMEM;
1341 }
1342 sksgio->iov = iov;
1343 sksgio->iovcnt = sgp->iovec_count;
1344
1345 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001346 dev_dbg(&skdev->pdev->dev,
1347 "copy_from_user iovec failed %p\n",
1348 sgp->dxferp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001349 return -EFAULT;
1350 }
1351
1352 /*
1353 * Sum up the vecs, making sure they don't overflow
1354 */
1355 iov_data_len = 0;
1356 for (i = 0; i < sgp->iovec_count; i++) {
1357 if (iov_data_len + iov[i].iov_len < iov_data_len)
1358 return -EINVAL;
1359 iov_data_len += iov[i].iov_len;
1360 }
1361
1362 /* SG_IO howto says that the shorter of the two wins */
1363 if (sgp->dxfer_len < iov_data_len) {
1364 sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1365 sgp->iovec_count,
1366 sgp->dxfer_len);
1367 sksgio->dxfer_len = sgp->dxfer_len;
1368 } else
1369 sksgio->dxfer_len = iov_data_len;
1370 }
1371
1372 if (sgp->dxfer_direction != SG_DXFER_NONE) {
1373 struct sg_iovec *iov = sksgio->iov;
1374 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1375 if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001376 dev_dbg(&skdev->pdev->dev,
1377 "access data failed %p/%zd\n",
1378 iov->iov_base, iov->iov_len);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001379 return -EFAULT;
1380 }
1381 }
1382 }
1383
1384 return 0;
1385}
1386
1387static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1388 struct skd_sg_io *sksgio)
1389{
1390 struct skd_special_context *skspcl = NULL;
1391 int rc;
1392
Mike Snitzer38d4a1b2013-11-01 15:05:10 -04001393 for (;;) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001394 ulong flags;
1395
1396 spin_lock_irqsave(&skdev->lock, flags);
1397 skspcl = skdev->skspcl_free_list;
1398 if (skspcl != NULL) {
1399 skdev->skspcl_free_list =
1400 (struct skd_special_context *)skspcl->req.next;
1401 skspcl->req.id += SKD_ID_INCR;
1402 skspcl->req.state = SKD_REQ_STATE_SETUP;
1403 skspcl->orphaned = 0;
1404 skspcl->req.n_sg = 0;
1405 }
1406 spin_unlock_irqrestore(&skdev->lock, flags);
1407
1408 if (skspcl != NULL) {
1409 rc = 0;
1410 break;
1411 }
1412
Bart Van Asschef98806d2017-08-17 13:12:58 -07001413 dev_dbg(&skdev->pdev->dev, "blocking\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001414
1415 rc = wait_event_interruptible_timeout(
1416 skdev->waitq,
1417 (skdev->skspcl_free_list != NULL),
1418 msecs_to_jiffies(sksgio->sg.timeout));
1419
Bart Van Asschef98806d2017-08-17 13:12:58 -07001420 dev_dbg(&skdev->pdev->dev, "unblocking, rc=%d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001421
1422 if (rc <= 0) {
1423 if (rc == 0)
1424 rc = -ETIMEDOUT;
1425 else
1426 rc = -EINTR;
1427 break;
1428 }
1429 /*
1430 * If we get here rc > 0 meaning the timeout to
1431 * wait_event_interruptible_timeout() had time left, hence the
1432 * sought event -- non-empty free list -- happened.
1433 * Retry the allocation.
1434 */
1435 }
1436 sksgio->skspcl = skspcl;
1437
1438 return rc;
1439}
1440
1441static int skd_skreq_prep_buffering(struct skd_device *skdev,
1442 struct skd_request_context *skreq,
1443 u32 dxfer_len)
1444{
1445 u32 resid = dxfer_len;
1446
1447 /*
1448 * The DMA engine must have aligned addresses and byte counts.
1449 */
1450 resid += (-resid) & 3;
1451 skreq->sg_byte_count = resid;
1452
1453 skreq->n_sg = 0;
1454
1455 while (resid > 0) {
1456 u32 nbytes = PAGE_SIZE;
1457 u32 ix = skreq->n_sg;
1458 struct scatterlist *sg = &skreq->sg[ix];
1459 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1460 struct page *page;
1461
1462 if (nbytes > resid)
1463 nbytes = resid;
1464
1465 page = alloc_page(GFP_KERNEL);
1466 if (page == NULL)
1467 return -ENOMEM;
1468
1469 sg_set_page(sg, page, nbytes, 0);
1470
1471 /* TODO: This should be going through a pci_???()
1472 * routine to do proper mapping. */
1473 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1474 sksg->byte_count = nbytes;
1475
1476 sksg->host_side_addr = sg_phys(sg);
1477
1478 sksg->dev_side_addr = 0;
1479 sksg->next_desc_ptr = skreq->sksg_dma_address +
1480 (ix + 1) * sizeof(*sksg);
1481
1482 skreq->n_sg++;
1483 resid -= nbytes;
1484 }
1485
1486 if (skreq->n_sg > 0) {
1487 u32 ix = skreq->n_sg - 1;
1488 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1489
1490 sksg->control = FIT_SGD_CONTROL_LAST;
1491 sksg->next_desc_ptr = 0;
1492 }
1493
1494 if (unlikely(skdev->dbg_level > 1)) {
1495 u32 i;
1496
Bart Van Asschef98806d2017-08-17 13:12:58 -07001497 dev_dbg(&skdev->pdev->dev,
1498 "skreq=%x sksg_list=%p sksg_dma=%llx\n",
1499 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001500 for (i = 0; i < skreq->n_sg; i++) {
1501 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1502
Bart Van Asschef98806d2017-08-17 13:12:58 -07001503 dev_dbg(&skdev->pdev->dev,
1504 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
1505 i, sgd->byte_count, sgd->control,
1506 sgd->host_side_addr, sgd->next_desc_ptr);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001507 }
1508 }
1509
1510 return 0;
1511}
1512
1513static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1514 struct skd_sg_io *sksgio)
1515{
1516 struct skd_special_context *skspcl = sksgio->skspcl;
1517 struct skd_request_context *skreq = &skspcl->req;
1518 u32 dxfer_len = sksgio->dxfer_len;
1519 int rc;
1520
1521 rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1522 /*
1523 * Eventually, errors or not, skd_release_special() is called
1524 * to recover allocations including partial allocations.
1525 */
1526 return rc;
1527}
1528
1529static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1530 struct skd_sg_io *sksgio, int dxfer_dir)
1531{
1532 struct skd_special_context *skspcl = sksgio->skspcl;
1533 u32 iov_ix = 0;
1534 struct sg_iovec curiov;
1535 u32 sksg_ix = 0;
1536 u8 *bufp = NULL;
1537 u32 buf_len = 0;
1538 u32 resid = sksgio->dxfer_len;
1539 int rc;
1540
1541 curiov.iov_len = 0;
1542 curiov.iov_base = NULL;
1543
1544 if (dxfer_dir != sksgio->sg.dxfer_direction) {
1545 if (dxfer_dir != SG_DXFER_TO_DEV ||
1546 sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1547 return 0;
1548 }
1549
1550 while (resid > 0) {
1551 u32 nbytes = PAGE_SIZE;
1552
1553 if (curiov.iov_len == 0) {
1554 curiov = sksgio->iov[iov_ix++];
1555 continue;
1556 }
1557
1558 if (buf_len == 0) {
1559 struct page *page;
1560 page = sg_page(&skspcl->req.sg[sksg_ix++]);
1561 bufp = page_address(page);
1562 buf_len = PAGE_SIZE;
1563 }
1564
1565 nbytes = min_t(u32, nbytes, resid);
1566 nbytes = min_t(u32, nbytes, curiov.iov_len);
1567 nbytes = min_t(u32, nbytes, buf_len);
1568
1569 if (dxfer_dir == SG_DXFER_TO_DEV)
1570 rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1571 else
1572 rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1573
1574 if (rc)
1575 return -EFAULT;
1576
1577 resid -= nbytes;
1578 curiov.iov_len -= nbytes;
1579 curiov.iov_base += nbytes;
1580 buf_len -= nbytes;
1581 }
1582
1583 return 0;
1584}
1585
1586static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1587 struct skd_sg_io *sksgio)
1588{
1589 struct skd_special_context *skspcl = sksgio->skspcl;
1590 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
1591 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
1592
1593 memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1594
1595 /* Initialize the FIT msg header */
1596 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1597 fmh->num_protocol_cmds_coalesced = 1;
1598
1599 /* Initialize the SCSI request */
1600 if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1601 scsi_req->hdr.sg_list_dma_address =
1602 cpu_to_be64(skspcl->req.sksg_dma_address);
1603 scsi_req->hdr.tag = skspcl->req.id;
1604 scsi_req->hdr.sg_list_len_bytes =
1605 cpu_to_be32(skspcl->req.sg_byte_count);
1606 memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1607
1608 skspcl->req.state = SKD_REQ_STATE_BUSY;
1609 skd_send_special_fitmsg(skdev, skspcl);
1610
1611 return 0;
1612}
1613
1614static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1615{
1616 unsigned long flags;
1617 int rc;
1618
1619 rc = wait_event_interruptible_timeout(skdev->waitq,
1620 (sksgio->skspcl->req.state !=
1621 SKD_REQ_STATE_BUSY),
1622 msecs_to_jiffies(sksgio->sg.
1623 timeout));
1624
1625 spin_lock_irqsave(&skdev->lock, flags);
1626
1627 if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001628 dev_dbg(&skdev->pdev->dev, "skspcl %p aborted\n",
1629 sksgio->skspcl);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001630
1631 /* Build check cond, sense and let command finish. */
1632 /* For a timeout, we must fabricate completion and sense
1633 * data to complete the command */
1634 sksgio->skspcl->req.completion.status =
1635 SAM_STAT_CHECK_CONDITION;
1636
1637 memset(&sksgio->skspcl->req.err_info, 0,
1638 sizeof(sksgio->skspcl->req.err_info));
1639 sksgio->skspcl->req.err_info.type = 0x70;
1640 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1641 sksgio->skspcl->req.err_info.code = 0x44;
1642 sksgio->skspcl->req.err_info.qual = 0;
1643 rc = 0;
1644 } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1645 /* No longer on the adapter. We finish. */
1646 rc = 0;
1647 else {
1648 /* Something's gone wrong. Still busy. Timeout or
1649 * user interrupted (control-C). Mark as an orphan
1650 * so it will be disposed when completed. */
1651 sksgio->skspcl->orphaned = 1;
1652 sksgio->skspcl = NULL;
1653 if (rc == 0) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001654 dev_dbg(&skdev->pdev->dev, "timed out %p (%u ms)\n",
1655 sksgio, sksgio->sg.timeout);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001656 rc = -ETIMEDOUT;
1657 } else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001658 dev_dbg(&skdev->pdev->dev, "cntlc %p\n", sksgio);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001659 rc = -EINTR;
1660 }
1661 }
1662
1663 spin_unlock_irqrestore(&skdev->lock, flags);
1664
1665 return rc;
1666}
1667
1668static int skd_sg_io_put_status(struct skd_device *skdev,
1669 struct skd_sg_io *sksgio)
1670{
1671 struct sg_io_hdr *sgp = &sksgio->sg;
1672 struct skd_special_context *skspcl = sksgio->skspcl;
1673 int resid = 0;
1674
1675 u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1676
1677 sgp->status = skspcl->req.completion.status;
1678 resid = sksgio->dxfer_len - nb;
1679
1680 sgp->masked_status = sgp->status & STATUS_MASK;
1681 sgp->msg_status = 0;
1682 sgp->host_status = 0;
1683 sgp->driver_status = 0;
1684 sgp->resid = resid;
1685 if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1686 sgp->info |= SG_INFO_CHECK;
1687
Bart Van Asschef98806d2017-08-17 13:12:58 -07001688 dev_dbg(&skdev->pdev->dev, "status %x masked %x resid 0x%x\n",
1689 sgp->status, sgp->masked_status, sgp->resid);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001690
1691 if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1692 if (sgp->mx_sb_len > 0) {
1693 struct fit_comp_error_info *ei = &skspcl->req.err_info;
1694 u32 nbytes = sizeof(*ei);
1695
1696 nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1697
1698 sgp->sb_len_wr = nbytes;
1699
1700 if (__copy_to_user(sgp->sbp, ei, nbytes)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001701 dev_dbg(&skdev->pdev->dev,
1702 "copy_to_user sense failed %p\n",
1703 sgp->sbp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001704 return -EFAULT;
1705 }
1706 }
1707 }
1708
1709 if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001710 dev_dbg(&skdev->pdev->dev, "copy_to_user sg failed %p\n",
1711 sksgio->argp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001712 return -EFAULT;
1713 }
1714
1715 return 0;
1716}
1717
1718static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1719 struct skd_sg_io *sksgio)
1720{
1721 struct skd_special_context *skspcl = sksgio->skspcl;
1722
1723 if (skspcl != NULL) {
1724 ulong flags;
1725
1726 sksgio->skspcl = NULL;
1727
1728 spin_lock_irqsave(&skdev->lock, flags);
1729 skd_release_special(skdev, skspcl);
1730 spin_unlock_irqrestore(&skdev->lock, flags);
1731 }
1732
1733 return 0;
1734}
1735
1736/*
1737 *****************************************************************************
1738 * INTERNAL REQUESTS -- generated by driver itself
1739 *****************************************************************************
1740 */
1741
1742static int skd_format_internal_skspcl(struct skd_device *skdev)
1743{
1744 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1745 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1746 struct fit_msg_hdr *fmh;
1747 uint64_t dma_address;
1748 struct skd_scsi_request *scsi;
1749
1750 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
1751 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1752 fmh->num_protocol_cmds_coalesced = 1;
1753
1754 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1755 memset(scsi, 0, sizeof(*scsi));
1756 dma_address = skspcl->req.sksg_dma_address;
1757 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1758 sgd->control = FIT_SGD_CONTROL_LAST;
1759 sgd->byte_count = 0;
1760 sgd->host_side_addr = skspcl->db_dma_address;
1761 sgd->dev_side_addr = 0;
1762 sgd->next_desc_ptr = 0LL;
1763
1764 return 1;
1765}
1766
1767#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1768
1769static void skd_send_internal_skspcl(struct skd_device *skdev,
1770 struct skd_special_context *skspcl,
1771 u8 opcode)
1772{
1773 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1774 struct skd_scsi_request *scsi;
1775 unsigned char *buf = skspcl->data_buf;
1776 int i;
1777
1778 if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1779 /*
1780 * A refresh is already in progress.
1781 * Just wait for it to finish.
1782 */
1783 return;
1784
1785 SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1786 skspcl->req.state = SKD_REQ_STATE_BUSY;
1787 skspcl->req.id += SKD_ID_INCR;
1788
1789 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1790 scsi->hdr.tag = skspcl->req.id;
1791
1792 memset(scsi->cdb, 0, sizeof(scsi->cdb));
1793
1794 switch (opcode) {
1795 case TEST_UNIT_READY:
1796 scsi->cdb[0] = TEST_UNIT_READY;
1797 sgd->byte_count = 0;
1798 scsi->hdr.sg_list_len_bytes = 0;
1799 break;
1800
1801 case READ_CAPACITY:
1802 scsi->cdb[0] = READ_CAPACITY;
1803 sgd->byte_count = SKD_N_READ_CAP_BYTES;
1804 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1805 break;
1806
1807 case INQUIRY:
1808 scsi->cdb[0] = INQUIRY;
1809 scsi->cdb[1] = 0x01; /* evpd */
1810 scsi->cdb[2] = 0x80; /* serial number page */
1811 scsi->cdb[4] = 0x10;
1812 sgd->byte_count = 16;
1813 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1814 break;
1815
1816 case SYNCHRONIZE_CACHE:
1817 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1818 sgd->byte_count = 0;
1819 scsi->hdr.sg_list_len_bytes = 0;
1820 break;
1821
1822 case WRITE_BUFFER:
1823 scsi->cdb[0] = WRITE_BUFFER;
1824 scsi->cdb[1] = 0x02;
1825 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1826 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1827 sgd->byte_count = WR_BUF_SIZE;
1828 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1829 /* fill incrementing byte pattern */
1830 for (i = 0; i < sgd->byte_count; i++)
1831 buf[i] = i & 0xFF;
1832 break;
1833
1834 case READ_BUFFER:
1835 scsi->cdb[0] = READ_BUFFER;
1836 scsi->cdb[1] = 0x02;
1837 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1838 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1839 sgd->byte_count = WR_BUF_SIZE;
1840 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1841 memset(skspcl->data_buf, 0, sgd->byte_count);
1842 break;
1843
1844 default:
1845 SKD_ASSERT("Don't know what to send");
1846 return;
1847
1848 }
1849 skd_send_special_fitmsg(skdev, skspcl);
1850}
1851
1852static void skd_refresh_device_data(struct skd_device *skdev)
1853{
1854 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1855
1856 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1857}
1858
1859static int skd_chk_read_buf(struct skd_device *skdev,
1860 struct skd_special_context *skspcl)
1861{
1862 unsigned char *buf = skspcl->data_buf;
1863 int i;
1864
1865 /* check for incrementing byte pattern */
1866 for (i = 0; i < WR_BUF_SIZE; i++)
1867 if (buf[i] != (i & 0xFF))
1868 return 1;
1869
1870 return 0;
1871}
1872
1873static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1874 u8 code, u8 qual, u8 fruc)
1875{
1876 /* If the check condition is of special interest, log a message */
1877 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1878 && (code == 0x04) && (qual == 0x06)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001879 dev_err(&skdev->pdev->dev,
1880 "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1881 key, code, qual, fruc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001882 }
1883}
1884
1885static void skd_complete_internal(struct skd_device *skdev,
1886 volatile struct fit_completion_entry_v1
1887 *skcomp,
1888 volatile struct fit_comp_error_info *skerr,
1889 struct skd_special_context *skspcl)
1890{
1891 u8 *buf = skspcl->data_buf;
1892 u8 status;
1893 int i;
1894 struct skd_scsi_request *scsi =
1895 (struct skd_scsi_request *)&skspcl->msg_buf[64];
1896
1897 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1898
Bart Van Asschef98806d2017-08-17 13:12:58 -07001899 dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001900
1901 skspcl->req.completion = *skcomp;
1902 skspcl->req.state = SKD_REQ_STATE_IDLE;
1903 skspcl->req.id += SKD_ID_INCR;
1904
1905 status = skspcl->req.completion.status;
1906
1907 skd_log_check_status(skdev, status, skerr->key, skerr->code,
1908 skerr->qual, skerr->fruc);
1909
1910 switch (scsi->cdb[0]) {
1911 case TEST_UNIT_READY:
1912 if (status == SAM_STAT_GOOD)
1913 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1914 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1915 (skerr->key == MEDIUM_ERROR))
1916 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1917 else {
1918 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001919 dev_dbg(&skdev->pdev->dev,
1920 "TUR failed, don't send anymore state 0x%x\n",
1921 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001922 return;
1923 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07001924 dev_dbg(&skdev->pdev->dev,
1925 "**** TUR failed, retry skerr\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001926 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1927 }
1928 break;
1929
1930 case WRITE_BUFFER:
1931 if (status == SAM_STAT_GOOD)
1932 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1933 else {
1934 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001935 dev_dbg(&skdev->pdev->dev,
1936 "write buffer failed, don't send anymore state 0x%x\n",
1937 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001938 return;
1939 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07001940 dev_dbg(&skdev->pdev->dev,
1941 "**** write buffer failed, retry skerr\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001942 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1943 }
1944 break;
1945
1946 case READ_BUFFER:
1947 if (status == SAM_STAT_GOOD) {
1948 if (skd_chk_read_buf(skdev, skspcl) == 0)
1949 skd_send_internal_skspcl(skdev, skspcl,
1950 READ_CAPACITY);
1951 else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001952 dev_err(&skdev->pdev->dev,
1953 "*** W/R Buffer mismatch %d ***\n",
1954 skdev->connect_retries);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001955 if (skdev->connect_retries <
1956 SKD_MAX_CONNECT_RETRIES) {
1957 skdev->connect_retries++;
1958 skd_soft_reset(skdev);
1959 } else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001960 dev_err(&skdev->pdev->dev,
1961 "W/R Buffer Connect Error\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001962 return;
1963 }
1964 }
1965
1966 } else {
1967 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001968 dev_dbg(&skdev->pdev->dev,
1969 "read buffer failed, don't send anymore state 0x%x\n",
1970 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001971 return;
1972 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07001973 dev_dbg(&skdev->pdev->dev,
1974 "**** read buffer failed, retry skerr\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001975 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1976 }
1977 break;
1978
1979 case READ_CAPACITY:
1980 skdev->read_cap_is_valid = 0;
1981 if (status == SAM_STAT_GOOD) {
1982 skdev->read_cap_last_lba =
1983 (buf[0] << 24) | (buf[1] << 16) |
1984 (buf[2] << 8) | buf[3];
1985 skdev->read_cap_blocksize =
1986 (buf[4] << 24) | (buf[5] << 16) |
1987 (buf[6] << 8) | buf[7];
1988
Bart Van Asschef98806d2017-08-17 13:12:58 -07001989 dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
1990 skdev->read_cap_last_lba,
1991 skdev->read_cap_blocksize);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001992
1993 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1994
1995 skdev->read_cap_is_valid = 1;
1996
1997 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1998 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
1999 (skerr->key == MEDIUM_ERROR)) {
2000 skdev->read_cap_last_lba = ~0;
2001 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002002 dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002003 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2004 } else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002005 dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002006 skd_send_internal_skspcl(skdev, skspcl,
2007 TEST_UNIT_READY);
2008 }
2009 break;
2010
2011 case INQUIRY:
2012 skdev->inquiry_is_valid = 0;
2013 if (status == SAM_STAT_GOOD) {
2014 skdev->inquiry_is_valid = 1;
2015
2016 for (i = 0; i < 12; i++)
2017 skdev->inq_serial_num[i] = buf[i + 4];
2018 skdev->inq_serial_num[12] = 0;
2019 }
2020
2021 if (skd_unquiesce_dev(skdev) < 0)
Bart Van Asschef98806d2017-08-17 13:12:58 -07002022 dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002023 /* connection is complete */
2024 skdev->connect_retries = 0;
2025 break;
2026
2027 case SYNCHRONIZE_CACHE:
2028 if (status == SAM_STAT_GOOD)
2029 skdev->sync_done = 1;
2030 else
2031 skdev->sync_done = -1;
2032 wake_up_interruptible(&skdev->waitq);
2033 break;
2034
2035 default:
2036 SKD_ASSERT("we didn't send this");
2037 }
2038}
2039
2040/*
2041 *****************************************************************************
2042 * FIT MESSAGES
2043 *****************************************************************************
2044 */
2045
2046static void skd_send_fitmsg(struct skd_device *skdev,
2047 struct skd_fitmsg_context *skmsg)
2048{
2049 u64 qcmd;
2050 struct fit_msg_hdr *fmh;
2051
Bart Van Asschef98806d2017-08-17 13:12:58 -07002052 dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
2053 skmsg->mb_dma_address, skdev->in_flight);
2054 dev_dbg(&skdev->pdev->dev, "msg_buf 0x%p, offset %x\n", skmsg->msg_buf,
2055 skmsg->offset);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002056
2057 qcmd = skmsg->mb_dma_address;
2058 qcmd |= FIT_QCMD_QID_NORMAL;
2059
2060 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2061 skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2062
2063 if (unlikely(skdev->dbg_level > 1)) {
2064 u8 *bp = (u8 *)skmsg->msg_buf;
2065 int i;
2066 for (i = 0; i < skmsg->length; i += 8) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002067 dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
2068 &bp[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002069 if (i == 0)
2070 i = 64 - 8;
2071 }
2072 }
2073
2074 if (skmsg->length > 256)
2075 qcmd |= FIT_QCMD_MSGSIZE_512;
2076 else if (skmsg->length > 128)
2077 qcmd |= FIT_QCMD_MSGSIZE_256;
2078 else if (skmsg->length > 64)
2079 qcmd |= FIT_QCMD_MSGSIZE_128;
2080 else
2081 /*
2082 * This makes no sense because the FIT msg header is
2083 * 64 bytes. If the msg is only 64 bytes long it has
2084 * no payload.
2085 */
2086 qcmd |= FIT_QCMD_MSGSIZE_64;
2087
Bart Van Assche5fbd5452017-08-17 13:12:46 -07002088 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2089 smp_wmb();
2090
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002091 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002092}
2093
2094static void skd_send_special_fitmsg(struct skd_device *skdev,
2095 struct skd_special_context *skspcl)
2096{
2097 u64 qcmd;
2098
2099 if (unlikely(skdev->dbg_level > 1)) {
2100 u8 *bp = (u8 *)skspcl->msg_buf;
2101 int i;
2102
2103 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002104 dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
2105 &bp[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002106 if (i == 0)
2107 i = 64 - 8;
2108 }
2109
Bart Van Asschef98806d2017-08-17 13:12:58 -07002110 dev_dbg(&skdev->pdev->dev,
2111 "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2112 skspcl, skspcl->req.id, skspcl->req.sksg_list,
2113 skspcl->req.sksg_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002114 for (i = 0; i < skspcl->req.n_sg; i++) {
2115 struct fit_sg_descriptor *sgd =
2116 &skspcl->req.sksg_list[i];
2117
Bart Van Asschef98806d2017-08-17 13:12:58 -07002118 dev_dbg(&skdev->pdev->dev,
2119 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
2120 i, sgd->byte_count, sgd->control,
2121 sgd->host_side_addr, sgd->next_desc_ptr);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002122 }
2123 }
2124
2125 /*
2126 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2127 * and one 64-byte SSDI command.
2128 */
2129 qcmd = skspcl->mb_dma_address;
2130 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2131
Bart Van Assche5fbd5452017-08-17 13:12:46 -07002132 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2133 smp_wmb();
2134
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002135 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2136}
2137
2138/*
2139 *****************************************************************************
2140 * COMPLETION QUEUE
2141 *****************************************************************************
2142 */
2143
2144static void skd_complete_other(struct skd_device *skdev,
2145 volatile struct fit_completion_entry_v1 *skcomp,
2146 volatile struct fit_comp_error_info *skerr);
2147
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002148struct sns_info {
2149 u8 type;
2150 u8 stat;
2151 u8 key;
2152 u8 asc;
2153 u8 ascq;
2154 u8 mask;
2155 enum skd_check_status_action action;
2156};
2157
2158static struct sns_info skd_chkstat_table[] = {
2159 /* Good */
2160 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
2161 SKD_CHECK_STATUS_REPORT_GOOD },
2162
2163 /* Smart alerts */
2164 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
2165 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2166 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
2167 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2168 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
2169 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2170
2171 /* Retry (with limits) */
2172 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
2173 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2174 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
2175 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2176 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
2177 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2178 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
2179 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2180
2181 /* Busy (or about to be) */
2182 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
2183 SKD_CHECK_STATUS_BUSY_IMMINENT },
2184};
2185
2186/*
2187 * Look up status and sense data to decide how to handle the error
2188 * from the device.
2189 * mask says which fields must match e.g., mask=0x18 means check
2190 * type and stat, ignore key, asc, ascq.
2191 */
2192
Mike Snitzer38d4a1b2013-11-01 15:05:10 -04002193static enum skd_check_status_action
2194skd_check_status(struct skd_device *skdev,
2195 u8 cmp_status, volatile struct fit_comp_error_info *skerr)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002196{
2197 int i, n;
2198
Bart Van Asschef98806d2017-08-17 13:12:58 -07002199 dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2200 skerr->key, skerr->code, skerr->qual, skerr->fruc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002201
Bart Van Asschef98806d2017-08-17 13:12:58 -07002202 dev_dbg(&skdev->pdev->dev,
2203 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2204 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
2205 skerr->fruc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002206
2207 /* Does the info match an entry in the good category? */
2208 n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2209 for (i = 0; i < n; i++) {
2210 struct sns_info *sns = &skd_chkstat_table[i];
2211
2212 if (sns->mask & 0x10)
2213 if (skerr->type != sns->type)
2214 continue;
2215
2216 if (sns->mask & 0x08)
2217 if (cmp_status != sns->stat)
2218 continue;
2219
2220 if (sns->mask & 0x04)
2221 if (skerr->key != sns->key)
2222 continue;
2223
2224 if (sns->mask & 0x02)
2225 if (skerr->code != sns->asc)
2226 continue;
2227
2228 if (sns->mask & 0x01)
2229 if (skerr->qual != sns->ascq)
2230 continue;
2231
2232 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002233 dev_err(&skdev->pdev->dev,
2234 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
2235 skerr->key, skerr->code, skerr->qual);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002236 }
2237 return sns->action;
2238 }
2239
2240 /* No other match, so nonzero status means error,
2241 * zero status means good
2242 */
2243 if (cmp_status) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002244 dev_dbg(&skdev->pdev->dev, "status check: error\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002245 return SKD_CHECK_STATUS_REPORT_ERROR;
2246 }
2247
Bart Van Asschef98806d2017-08-17 13:12:58 -07002248 dev_dbg(&skdev->pdev->dev, "status check good default\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002249 return SKD_CHECK_STATUS_REPORT_GOOD;
2250}
2251
2252static void skd_resolve_req_exception(struct skd_device *skdev,
2253 struct skd_request_context *skreq)
2254{
2255 u8 cmp_status = skreq->completion.status;
2256
2257 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2258 case SKD_CHECK_STATUS_REPORT_GOOD:
2259 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02002260 skd_end_request(skdev, skreq, BLK_STS_OK);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002261 break;
2262
2263 case SKD_CHECK_STATUS_BUSY_IMMINENT:
2264 skd_log_skreq(skdev, skreq, "retry(busy)");
Mike Snitzer38d4a1b2013-11-01 15:05:10 -04002265 blk_requeue_request(skdev->queue, skreq->req);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002266 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002267 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2268 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2269 skd_quiesce_dev(skdev);
2270 break;
2271
2272 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
Jens Axboefcd37eb2013-11-01 10:14:56 -06002273 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2274 skd_log_skreq(skdev, skreq, "retry");
Mike Snitzer38d4a1b2013-11-01 15:05:10 -04002275 blk_requeue_request(skdev->queue, skreq->req);
Jens Axboefcd37eb2013-11-01 10:14:56 -06002276 break;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002277 }
Bart Van Asschece6882b2017-08-17 13:12:52 -07002278 /* fall through */
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002279
2280 case SKD_CHECK_STATUS_REPORT_ERROR:
2281 default:
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02002282 skd_end_request(skdev, skreq, BLK_STS_IOERR);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002283 break;
2284 }
2285}
2286
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002287/* assume spinlock is already held */
2288static void skd_release_skreq(struct skd_device *skdev,
2289 struct skd_request_context *skreq)
2290{
2291 u32 msg_slot;
2292 struct skd_fitmsg_context *skmsg;
2293
2294 u32 timo_slot;
2295
2296 /*
2297 * Reclaim the FIT msg buffer if this is
2298 * the first of the requests it carried to
2299 * be completed. The FIT msg buffer used to
2300 * send this request cannot be reused until
2301 * we are sure the s1120 card has copied
2302 * it to its memory. The FIT msg might have
2303 * contained several requests. As soon as
2304 * any of them are completed we know that
2305 * the entire FIT msg was transferred.
2306 * Only the first completed request will
2307 * match the FIT msg buffer id. The FIT
2308 * msg buffer id is immediately updated.
2309 * When subsequent requests complete the FIT
2310 * msg buffer id won't match, so we know
2311 * quite cheaply that it is already done.
2312 */
2313 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2314 SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2315
2316 skmsg = &skdev->skmsg_table[msg_slot];
2317 if (skmsg->id == skreq->fitmsg_id) {
2318 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2319 SKD_ASSERT(skmsg->outstanding > 0);
2320 skmsg->outstanding--;
2321 if (skmsg->outstanding == 0) {
2322 skmsg->state = SKD_MSG_STATE_IDLE;
2323 skmsg->id += SKD_ID_INCR;
2324 skmsg->next = skdev->skmsg_free_list;
2325 skdev->skmsg_free_list = skmsg;
2326 }
2327 }
2328
2329 /*
2330 * Decrease the number of active requests.
2331 * Also decrements the count in the timeout slot.
2332 */
2333 SKD_ASSERT(skdev->in_flight > 0);
2334 skdev->in_flight -= 1;
2335
2336 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2337 SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2338 skdev->timeout_slot[timo_slot] -= 1;
2339
2340 /*
2341 * Reset backpointer
2342 */
Jens Axboefcd37eb2013-11-01 10:14:56 -06002343 skreq->req = NULL;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002344
2345 /*
2346 * Reclaim the skd_request_context
2347 */
2348 skreq->state = SKD_REQ_STATE_IDLE;
2349 skreq->id += SKD_ID_INCR;
2350 skreq->next = skdev->skreq_free_list;
2351 skdev->skreq_free_list = skreq;
2352}
2353
2354#define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
2355
2356static void skd_do_inq_page_00(struct skd_device *skdev,
2357 volatile struct fit_completion_entry_v1 *skcomp,
2358 volatile struct fit_comp_error_info *skerr,
2359 uint8_t *cdb, uint8_t *buf)
2360{
2361 uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2362
2363 /* Caller requested "supported pages". The driver needs to insert
2364 * its page.
2365 */
Bart Van Asschef98806d2017-08-17 13:12:58 -07002366 dev_dbg(&skdev->pdev->dev,
2367 "skd_do_driver_inquiry: modify supported pages.\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002368
2369 /* If the device rejected the request because the CDB was
2370 * improperly formed, then just leave.
2371 */
2372 if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2373 skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2374 return;
2375
2376 /* Get the amount of space the caller allocated */
2377 max_bytes = (cdb[3] << 8) | cdb[4];
2378
2379 /* Get the number of pages actually returned by the device */
2380 drive_pages = (buf[2] << 8) | buf[3];
2381 drive_bytes = drive_pages + 4;
2382 new_size = drive_pages + 1;
2383
2384 /* Supported pages must be in numerical order, so find where
2385 * the driver page needs to be inserted into the list of
2386 * pages returned by the device.
2387 */
2388 for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2389 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2390 return; /* Device using this page code. abort */
2391 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2392 break;
2393 }
2394
2395 if (insert_pt < max_bytes) {
2396 uint16_t u;
2397
2398 /* Shift everything up one byte to make room. */
2399 for (u = new_size + 3; u > insert_pt; u--)
2400 buf[u] = buf[u - 1];
2401 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2402
2403 /* SCSI byte order increment of num_returned_bytes by 1 */
2404 skcomp->num_returned_bytes =
2405 be32_to_cpu(skcomp->num_returned_bytes) + 1;
2406 skcomp->num_returned_bytes =
2407 be32_to_cpu(skcomp->num_returned_bytes);
2408 }
2409
2410 /* update page length field to reflect the driver's page too */
2411 buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2412 buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2413}
2414
2415static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2416{
2417 int pcie_reg;
2418 u16 pci_bus_speed;
2419 u8 pci_lanes;
2420
2421 pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2422 if (pcie_reg) {
2423 u16 linksta;
2424 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2425
2426 pci_bus_speed = linksta & 0xF;
2427 pci_lanes = (linksta & 0x3F0) >> 4;
2428 } else {
2429 *speed = STEC_LINK_UNKNOWN;
2430 *width = 0xFF;
2431 return;
2432 }
2433
2434 switch (pci_bus_speed) {
2435 case 1:
2436 *speed = STEC_LINK_2_5GTS;
2437 break;
2438 case 2:
2439 *speed = STEC_LINK_5GTS;
2440 break;
2441 case 3:
2442 *speed = STEC_LINK_8GTS;
2443 break;
2444 default:
2445 *speed = STEC_LINK_UNKNOWN;
2446 break;
2447 }
2448
2449 if (pci_lanes <= 0x20)
2450 *width = pci_lanes;
2451 else
2452 *width = 0xFF;
2453}
2454
2455static void skd_do_inq_page_da(struct skd_device *skdev,
2456 volatile struct fit_completion_entry_v1 *skcomp,
2457 volatile struct fit_comp_error_info *skerr,
2458 uint8_t *cdb, uint8_t *buf)
2459{
Bartlomiej Zolnierkiewiczfec23f62013-11-05 12:37:07 +01002460 struct pci_dev *pdev = skdev->pdev;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002461 unsigned max_bytes;
2462 struct driver_inquiry_data inq;
2463 u16 val;
2464
Bart Van Asschef98806d2017-08-17 13:12:58 -07002465 dev_dbg(&skdev->pdev->dev, "skd_do_driver_inquiry: return driver page\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002466
2467 memset(&inq, 0, sizeof(inq));
2468
2469 inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2470
Bartlomiej Zolnierkiewiczfec23f62013-11-05 12:37:07 +01002471 skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
2472 inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
2473 inq.pcie_device_number = PCI_SLOT(pdev->devfn);
2474 inq.pcie_function_number = PCI_FUNC(pdev->devfn);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002475
Bartlomiej Zolnierkiewiczfec23f62013-11-05 12:37:07 +01002476 pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
2477 inq.pcie_vendor_id = cpu_to_be16(val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002478
Bartlomiej Zolnierkiewiczfec23f62013-11-05 12:37:07 +01002479 pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
2480 inq.pcie_device_id = cpu_to_be16(val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002481
Bartlomiej Zolnierkiewiczfec23f62013-11-05 12:37:07 +01002482 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
2483 inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002484
Bartlomiej Zolnierkiewiczfec23f62013-11-05 12:37:07 +01002485 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
2486 inq.pcie_subsystem_device_id = cpu_to_be16(val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002487
2488 /* Driver version, fixed lenth, padded with spaces on the right */
2489 inq.driver_version_length = sizeof(inq.driver_version);
2490 memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2491 memcpy(inq.driver_version, DRV_VER_COMPL,
2492 min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2493
2494 inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2495
2496 /* Clear the error set by the device */
2497 skcomp->status = SAM_STAT_GOOD;
2498 memset((void *)skerr, 0, sizeof(*skerr));
2499
2500 /* copy response into output buffer */
2501 max_bytes = (cdb[3] << 8) | cdb[4];
2502 memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2503
2504 skcomp->num_returned_bytes =
2505 be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
2506}
2507
2508static void skd_do_driver_inq(struct skd_device *skdev,
2509 volatile struct fit_completion_entry_v1 *skcomp,
2510 volatile struct fit_comp_error_info *skerr,
2511 uint8_t *cdb, uint8_t *buf)
2512{
2513 if (!buf)
2514 return;
2515 else if (cdb[0] != INQUIRY)
2516 return; /* Not an INQUIRY */
2517 else if ((cdb[1] & 1) == 0)
2518 return; /* EVPD not set */
2519 else if (cdb[2] == 0)
2520 /* Need to add driver's page to supported pages list */
2521 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2522 else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2523 /* Caller requested driver's page */
2524 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2525}
2526
2527static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2528{
2529 if (!sg)
2530 return NULL;
2531 if (!sg_page(sg))
2532 return NULL;
2533 return sg_virt(sg);
2534}
2535
2536static void skd_process_scsi_inq(struct skd_device *skdev,
2537 volatile struct fit_completion_entry_v1
2538 *skcomp,
2539 volatile struct fit_comp_error_info *skerr,
2540 struct skd_special_context *skspcl)
2541{
2542 uint8_t *buf;
2543 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2544 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2545
2546 dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2547 skspcl->req.sg_data_dir);
2548 buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2549
2550 if (buf)
2551 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2552}
2553
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002554static int skd_isr_completion_posted(struct skd_device *skdev,
2555 int limit, int *enqueued)
2556{
2557 volatile struct fit_completion_entry_v1 *skcmp = NULL;
2558 volatile struct fit_comp_error_info *skerr;
2559 u16 req_id;
2560 u32 req_slot;
2561 struct skd_request_context *skreq;
2562 u16 cmp_cntxt = 0;
2563 u8 cmp_status = 0;
2564 u8 cmp_cycle = 0;
2565 u32 cmp_bytes = 0;
2566 int rc = 0;
2567 int processed = 0;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002568
2569 for (;; ) {
2570 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2571
2572 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2573 cmp_cycle = skcmp->cycle;
2574 cmp_cntxt = skcmp->tag;
2575 cmp_status = skcmp->status;
2576 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2577
2578 skerr = &skdev->skerr_table[skdev->skcomp_ix];
2579
Bart Van Asschef98806d2017-08-17 13:12:58 -07002580 dev_dbg(&skdev->pdev->dev,
2581 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
2582 skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
2583 cmp_cntxt, cmp_status, skdev->in_flight, cmp_bytes,
2584 skdev->proto_ver);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002585
2586 if (cmp_cycle != skdev->skcomp_cycle) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002587 dev_dbg(&skdev->pdev->dev, "end of completions\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002588 break;
2589 }
2590 /*
2591 * Update the completion queue head index and possibly
2592 * the completion cycle count. 8-bit wrap-around.
2593 */
2594 skdev->skcomp_ix++;
2595 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2596 skdev->skcomp_ix = 0;
2597 skdev->skcomp_cycle++;
2598 }
2599
2600 /*
2601 * The command context is a unique 32-bit ID. The low order
2602 * bits help locate the request. The request is usually a
2603 * r/w request (see skd_start() above) or a special request.
2604 */
2605 req_id = cmp_cntxt;
2606 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2607
2608 /* Is this other than a r/w request? */
2609 if (req_slot >= skdev->num_req_context) {
2610 /*
2611 * This is not a completion for a r/w request.
2612 */
2613 skd_complete_other(skdev, skcmp, skerr);
2614 continue;
2615 }
2616
2617 skreq = &skdev->skreq_table[req_slot];
2618
2619 /*
2620 * Make sure the request ID for the slot matches.
2621 */
2622 if (skreq->id != req_id) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002623 dev_dbg(&skdev->pdev->dev,
2624 "mismatch comp_id=0x%x req_id=0x%x\n", req_id,
2625 skreq->id);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002626 {
2627 u16 new_id = cmp_cntxt;
Bart Van Asschef98806d2017-08-17 13:12:58 -07002628 dev_err(&skdev->pdev->dev,
2629 "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2630 req_id, skreq->id, new_id);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002631
2632 continue;
2633 }
2634 }
2635
2636 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2637
2638 if (skreq->state == SKD_REQ_STATE_ABORTED) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002639 dev_dbg(&skdev->pdev->dev, "reclaim req %p id=%04x\n",
2640 skreq, skreq->id);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002641 /* a previously timed out command can
2642 * now be cleaned up */
2643 skd_release_skreq(skdev, skreq);
2644 continue;
2645 }
2646
2647 skreq->completion = *skcmp;
2648 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2649 skreq->err_info = *skerr;
2650 skd_log_check_status(skdev, cmp_status, skerr->key,
2651 skerr->code, skerr->qual,
2652 skerr->fruc);
2653 }
2654 /* Release DMA resources for the request. */
2655 if (skreq->n_sg > 0)
2656 skd_postop_sg_list(skdev, skreq);
2657
Jens Axboefcd37eb2013-11-01 10:14:56 -06002658 if (!skreq->req) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002659 dev_dbg(&skdev->pdev->dev,
2660 "NULL backptr skdreq %p, req=0x%x req_id=0x%x\n",
2661 skreq, skreq->id, req_id);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002662 } else {
2663 /*
2664 * Capture the outcome and post it back to the
2665 * native request.
2666 */
Jens Axboefcd37eb2013-11-01 10:14:56 -06002667 if (likely(cmp_status == SAM_STAT_GOOD))
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02002668 skd_end_request(skdev, skreq, BLK_STS_OK);
Jens Axboefcd37eb2013-11-01 10:14:56 -06002669 else
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002670 skd_resolve_req_exception(skdev, skreq);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002671 }
2672
2673 /*
2674 * Release the skreq, its FIT msg (if one), timeout slot,
2675 * and queue depth.
2676 */
2677 skd_release_skreq(skdev, skreq);
2678
2679 /* skd_isr_comp_limit equal zero means no limit */
2680 if (limit) {
2681 if (++processed >= limit) {
2682 rc = 1;
2683 break;
2684 }
2685 }
2686 }
2687
2688 if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2689 && (skdev->in_flight) == 0) {
2690 skdev->state = SKD_DRVR_STATE_PAUSED;
2691 wake_up_interruptible(&skdev->waitq);
2692 }
2693
2694 return rc;
2695}
2696
2697static void skd_complete_other(struct skd_device *skdev,
2698 volatile struct fit_completion_entry_v1 *skcomp,
2699 volatile struct fit_comp_error_info *skerr)
2700{
2701 u32 req_id = 0;
2702 u32 req_table;
2703 u32 req_slot;
2704 struct skd_special_context *skspcl;
2705
2706 req_id = skcomp->tag;
2707 req_table = req_id & SKD_ID_TABLE_MASK;
2708 req_slot = req_id & SKD_ID_SLOT_MASK;
2709
Bart Van Asschef98806d2017-08-17 13:12:58 -07002710 dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
2711 req_id, req_slot);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002712
2713 /*
2714 * Based on the request id, determine how to dispatch this completion.
2715 * This swich/case is finding the good cases and forwarding the
2716 * completion entry. Errors are reported below the switch.
2717 */
2718 switch (req_table) {
2719 case SKD_ID_RW_REQUEST:
2720 /*
Bart Van Asschee1d06f22017-08-17 13:12:54 -07002721 * The caller, skd_isr_completion_posted() above,
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002722 * handles r/w requests. The only way we get here
2723 * is if the req_slot is out of bounds.
2724 */
2725 break;
2726
2727 case SKD_ID_SPECIAL_REQUEST:
2728 /*
2729 * Make sure the req_slot is in bounds and that the id
2730 * matches.
2731 */
2732 if (req_slot < skdev->n_special) {
2733 skspcl = &skdev->skspcl_table[req_slot];
2734 if (skspcl->req.id == req_id &&
2735 skspcl->req.state == SKD_REQ_STATE_BUSY) {
2736 skd_complete_special(skdev,
2737 skcomp, skerr, skspcl);
2738 return;
2739 }
2740 }
2741 break;
2742
2743 case SKD_ID_INTERNAL:
2744 if (req_slot == 0) {
2745 skspcl = &skdev->internal_skspcl;
2746 if (skspcl->req.id == req_id &&
2747 skspcl->req.state == SKD_REQ_STATE_BUSY) {
2748 skd_complete_internal(skdev,
2749 skcomp, skerr, skspcl);
2750 return;
2751 }
2752 }
2753 break;
2754
2755 case SKD_ID_FIT_MSG:
2756 /*
2757 * These id's should never appear in a completion record.
2758 */
2759 break;
2760
2761 default:
2762 /*
2763 * These id's should never appear anywhere;
2764 */
2765 break;
2766 }
2767
2768 /*
2769 * If we get here it is a bad or stale id.
2770 */
2771}
2772
2773static void skd_complete_special(struct skd_device *skdev,
2774 volatile struct fit_completion_entry_v1
2775 *skcomp,
2776 volatile struct fit_comp_error_info *skerr,
2777 struct skd_special_context *skspcl)
2778{
Bart Van Asschef98806d2017-08-17 13:12:58 -07002779 dev_dbg(&skdev->pdev->dev, " completing special request %p\n", skspcl);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002780 if (skspcl->orphaned) {
2781 /* Discard orphaned request */
2782 /* ?: Can this release directly or does it need
2783 * to use a worker? */
Bart Van Asschef98806d2017-08-17 13:12:58 -07002784 dev_dbg(&skdev->pdev->dev, "release orphaned %p\n", skspcl);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002785 skd_release_special(skdev, skspcl);
2786 return;
2787 }
2788
2789 skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2790
2791 skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2792 skspcl->req.completion = *skcomp;
2793 skspcl->req.err_info = *skerr;
2794
2795 skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2796 skerr->code, skerr->qual, skerr->fruc);
2797
2798 wake_up_interruptible(&skdev->waitq);
2799}
2800
2801/* assume spinlock is already held */
2802static void skd_release_special(struct skd_device *skdev,
2803 struct skd_special_context *skspcl)
2804{
2805 int i, was_depleted;
2806
2807 for (i = 0; i < skspcl->req.n_sg; i++) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002808 struct page *page = sg_page(&skspcl->req.sg[i]);
2809 __free_page(page);
2810 }
2811
2812 was_depleted = (skdev->skspcl_free_list == NULL);
2813
2814 skspcl->req.state = SKD_REQ_STATE_IDLE;
2815 skspcl->req.id += SKD_ID_INCR;
2816 skspcl->req.next =
2817 (struct skd_request_context *)skdev->skspcl_free_list;
2818 skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2819
2820 if (was_depleted) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002821 dev_dbg(&skdev->pdev->dev, "skspcl was depleted\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002822 /* Free list was depleted. Their might be waiters. */
2823 wake_up_interruptible(&skdev->waitq);
2824 }
2825}
2826
2827static void skd_reset_skcomp(struct skd_device *skdev)
2828{
2829 u32 nbytes;
2830 struct fit_completion_entry_v1 *skcomp;
2831
2832 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
2833 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2834
2835 memset(skdev->skcomp_table, 0, nbytes);
2836
2837 skdev->skcomp_ix = 0;
2838 skdev->skcomp_cycle = 1;
2839}
2840
2841/*
2842 *****************************************************************************
2843 * INTERRUPTS
2844 *****************************************************************************
2845 */
2846static void skd_completion_worker(struct work_struct *work)
2847{
2848 struct skd_device *skdev =
2849 container_of(work, struct skd_device, completion_worker);
2850 unsigned long flags;
2851 int flush_enqueued = 0;
2852
2853 spin_lock_irqsave(&skdev->lock, flags);
2854
2855 /*
2856 * pass in limit=0, which means no limit..
2857 * process everything in compq
2858 */
2859 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
2860 skd_request_fn(skdev->queue);
2861
2862 spin_unlock_irqrestore(&skdev->lock, flags);
2863}
2864
2865static void skd_isr_msg_from_dev(struct skd_device *skdev);
2866
Arnd Bergmann41c94992016-11-09 13:55:35 +01002867static irqreturn_t
2868skd_isr(int irq, void *ptr)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002869{
2870 struct skd_device *skdev;
2871 u32 intstat;
2872 u32 ack;
2873 int rc = 0;
2874 int deferred = 0;
2875 int flush_enqueued = 0;
2876
2877 skdev = (struct skd_device *)ptr;
2878 spin_lock(&skdev->lock);
2879
2880 for (;; ) {
2881 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2882
2883 ack = FIT_INT_DEF_MASK;
2884 ack &= intstat;
2885
Bart Van Asschef98806d2017-08-17 13:12:58 -07002886 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
2887 ack);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002888
2889 /* As long as there is an int pending on device, keep
2890 * running loop. When none, get out, but if we've never
2891 * done any processing, call completion handler?
2892 */
2893 if (ack == 0) {
2894 /* No interrupts on device, but run the completion
2895 * processor anyway?
2896 */
2897 if (rc == 0)
2898 if (likely (skdev->state
2899 == SKD_DRVR_STATE_ONLINE))
2900 deferred = 1;
2901 break;
2902 }
2903
2904 rc = IRQ_HANDLED;
2905
2906 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
2907
2908 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
2909 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
2910 if (intstat & FIT_ISH_COMPLETION_POSTED) {
2911 /*
2912 * If we have already deferred completion
2913 * processing, don't bother running it again
2914 */
2915 if (deferred == 0)
2916 deferred =
2917 skd_isr_completion_posted(skdev,
2918 skd_isr_comp_limit, &flush_enqueued);
2919 }
2920
2921 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
2922 skd_isr_fwstate(skdev);
2923 if (skdev->state == SKD_DRVR_STATE_FAULT ||
2924 skdev->state ==
2925 SKD_DRVR_STATE_DISAPPEARED) {
2926 spin_unlock(&skdev->lock);
2927 return rc;
2928 }
2929 }
2930
2931 if (intstat & FIT_ISH_MSG_FROM_DEV)
2932 skd_isr_msg_from_dev(skdev);
2933 }
2934 }
2935
2936 if (unlikely(flush_enqueued))
2937 skd_request_fn(skdev->queue);
2938
2939 if (deferred)
2940 schedule_work(&skdev->completion_worker);
2941 else if (!flush_enqueued)
2942 skd_request_fn(skdev->queue);
2943
2944 spin_unlock(&skdev->lock);
2945
2946 return rc;
2947}
2948
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002949static void skd_drive_fault(struct skd_device *skdev)
2950{
2951 skdev->state = SKD_DRVR_STATE_FAULT;
Bart Van Asschef98806d2017-08-17 13:12:58 -07002952 dev_err(&skdev->pdev->dev, "Drive FAULT\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002953}
2954
2955static void skd_drive_disappeared(struct skd_device *skdev)
2956{
2957 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
Bart Van Asschef98806d2017-08-17 13:12:58 -07002958 dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002959}
2960
2961static void skd_isr_fwstate(struct skd_device *skdev)
2962{
2963 u32 sense;
2964 u32 state;
2965 u32 mtd;
2966 int prev_driver_state = skdev->state;
2967
2968 sense = SKD_READL(skdev, FIT_STATUS);
2969 state = sense & FIT_SR_DRIVE_STATE_MASK;
2970
Bart Van Asschef98806d2017-08-17 13:12:58 -07002971 dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
2972 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
2973 skd_drive_state_to_str(state), state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002974
2975 skdev->drive_state = state;
2976
2977 switch (skdev->drive_state) {
2978 case FIT_SR_DRIVE_INIT:
2979 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
2980 skd_disable_interrupts(skdev);
2981 break;
2982 }
2983 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
2984 skd_recover_requests(skdev, 0);
2985 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
2986 skdev->timer_countdown = SKD_STARTING_TIMO;
2987 skdev->state = SKD_DRVR_STATE_STARTING;
2988 skd_soft_reset(skdev);
2989 break;
2990 }
2991 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
2992 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2993 skdev->last_mtd = mtd;
2994 break;
2995
2996 case FIT_SR_DRIVE_ONLINE:
2997 skdev->cur_max_queue_depth = skd_max_queue_depth;
2998 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
2999 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3000
3001 skdev->queue_low_water_mark =
3002 skdev->cur_max_queue_depth * 2 / 3 + 1;
3003 if (skdev->queue_low_water_mark < 1)
3004 skdev->queue_low_water_mark = 1;
Bart Van Asschef98806d2017-08-17 13:12:58 -07003005 dev_info(&skdev->pdev->dev,
3006 "Queue depth limit=%d dev=%d lowat=%d\n",
3007 skdev->cur_max_queue_depth,
3008 skdev->dev_max_queue_depth,
3009 skdev->queue_low_water_mark);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003010
3011 skd_refresh_device_data(skdev);
3012 break;
3013
3014 case FIT_SR_DRIVE_BUSY:
3015 skdev->state = SKD_DRVR_STATE_BUSY;
3016 skdev->timer_countdown = SKD_BUSY_TIMO;
3017 skd_quiesce_dev(skdev);
3018 break;
3019 case FIT_SR_DRIVE_BUSY_SANITIZE:
3020 /* set timer for 3 seconds, we'll abort any unfinished
3021 * commands after that expires
3022 */
3023 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3024 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
Jens Axboe6a5ec652013-11-01 10:38:45 -06003025 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003026 break;
3027 case FIT_SR_DRIVE_BUSY_ERASE:
3028 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3029 skdev->timer_countdown = SKD_BUSY_TIMO;
3030 break;
3031 case FIT_SR_DRIVE_OFFLINE:
3032 skdev->state = SKD_DRVR_STATE_IDLE;
3033 break;
3034 case FIT_SR_DRIVE_SOFT_RESET:
3035 switch (skdev->state) {
3036 case SKD_DRVR_STATE_STARTING:
3037 case SKD_DRVR_STATE_RESTARTING:
3038 /* Expected by a caller of skd_soft_reset() */
3039 break;
3040 default:
3041 skdev->state = SKD_DRVR_STATE_RESTARTING;
3042 break;
3043 }
3044 break;
3045 case FIT_SR_DRIVE_FW_BOOTING:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003046 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003047 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3048 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3049 break;
3050
3051 case FIT_SR_DRIVE_DEGRADED:
3052 case FIT_SR_PCIE_LINK_DOWN:
3053 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3054 break;
3055
3056 case FIT_SR_DRIVE_FAULT:
3057 skd_drive_fault(skdev);
3058 skd_recover_requests(skdev, 0);
Jens Axboe6a5ec652013-11-01 10:38:45 -06003059 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003060 break;
3061
3062 /* PCIe bus returned all Fs? */
3063 case 0xFF:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003064 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
3065 sense);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003066 skd_drive_disappeared(skdev);
3067 skd_recover_requests(skdev, 0);
Jens Axboe6a5ec652013-11-01 10:38:45 -06003068 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003069 break;
3070 default:
3071 /*
3072 * Uknown FW State. Wait for a state we recognize.
3073 */
3074 break;
3075 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07003076 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
3077 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3078 skd_skdev_state_to_str(skdev->state), skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003079}
3080
3081static void skd_recover_requests(struct skd_device *skdev, int requeue)
3082{
3083 int i;
3084
3085 for (i = 0; i < skdev->num_req_context; i++) {
3086 struct skd_request_context *skreq = &skdev->skreq_table[i];
3087
3088 if (skreq->state == SKD_REQ_STATE_BUSY) {
3089 skd_log_skreq(skdev, skreq, "recover");
3090
3091 SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
Jens Axboefcd37eb2013-11-01 10:14:56 -06003092 SKD_ASSERT(skreq->req != NULL);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003093
3094 /* Release DMA resources for the request. */
3095 if (skreq->n_sg > 0)
3096 skd_postop_sg_list(skdev, skreq);
3097
Jens Axboefcd37eb2013-11-01 10:14:56 -06003098 if (requeue &&
3099 (unsigned long) ++skreq->req->special <
3100 SKD_MAX_RETRIES)
Mike Snitzer38d4a1b2013-11-01 15:05:10 -04003101 blk_requeue_request(skdev->queue, skreq->req);
Jens Axboefcd37eb2013-11-01 10:14:56 -06003102 else
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02003103 skd_end_request(skdev, skreq, BLK_STS_IOERR);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003104
Jens Axboefcd37eb2013-11-01 10:14:56 -06003105 skreq->req = NULL;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003106
3107 skreq->state = SKD_REQ_STATE_IDLE;
3108 skreq->id += SKD_ID_INCR;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003109 }
3110 if (i > 0)
3111 skreq[-1].next = skreq;
3112 skreq->next = NULL;
3113 }
3114 skdev->skreq_free_list = skdev->skreq_table;
3115
3116 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3117 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3118
3119 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3120 skd_log_skmsg(skdev, skmsg, "salvaged");
3121 SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3122 skmsg->state = SKD_MSG_STATE_IDLE;
3123 skmsg->id += SKD_ID_INCR;
3124 }
3125 if (i > 0)
3126 skmsg[-1].next = skmsg;
3127 skmsg->next = NULL;
3128 }
3129 skdev->skmsg_free_list = skdev->skmsg_table;
3130
3131 for (i = 0; i < skdev->n_special; i++) {
3132 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3133
3134 /* If orphaned, reclaim it because it has already been reported
3135 * to the process as an error (it was just waiting for
3136 * a completion that didn't come, and now it will never come)
3137 * If busy, change to a state that will cause it to error
3138 * out in the wait routine and let it do the normal
3139 * reporting and reclaiming
3140 */
3141 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3142 if (skspcl->orphaned) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003143 dev_dbg(&skdev->pdev->dev, "orphaned %p\n",
3144 skspcl);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003145 skd_release_special(skdev, skspcl);
3146 } else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003147 dev_dbg(&skdev->pdev->dev, "not orphaned %p\n",
3148 skspcl);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003149 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3150 }
3151 }
3152 }
3153 skdev->skspcl_free_list = skdev->skspcl_table;
3154
3155 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3156 skdev->timeout_slot[i] = 0;
3157
3158 skdev->in_flight = 0;
3159}
3160
3161static void skd_isr_msg_from_dev(struct skd_device *skdev)
3162{
3163 u32 mfd;
3164 u32 mtd;
3165 u32 data;
3166
3167 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3168
Bart Van Asschef98806d2017-08-17 13:12:58 -07003169 dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
3170 skdev->last_mtd);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003171
3172 /* ignore any mtd that is an ack for something we didn't send */
3173 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3174 return;
3175
3176 switch (FIT_MXD_TYPE(mfd)) {
3177 case FIT_MTD_FITFW_INIT:
3178 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3179
3180 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003181 dev_err(&skdev->pdev->dev, "protocol mismatch\n");
3182 dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
3183 skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
3184 dev_err(&skdev->pdev->dev, " please upgrade driver\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003185 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3186 skd_soft_reset(skdev);
3187 break;
3188 }
3189 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3190 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3191 skdev->last_mtd = mtd;
3192 break;
3193
3194 case FIT_MTD_GET_CMDQ_DEPTH:
3195 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3196 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3197 SKD_N_COMPLETION_ENTRY);
3198 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3199 skdev->last_mtd = mtd;
3200 break;
3201
3202 case FIT_MTD_SET_COMPQ_DEPTH:
3203 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3204 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3205 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3206 skdev->last_mtd = mtd;
3207 break;
3208
3209 case FIT_MTD_SET_COMPQ_ADDR:
3210 skd_reset_skcomp(skdev);
3211 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3212 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3213 skdev->last_mtd = mtd;
3214 break;
3215
3216 case FIT_MTD_CMD_LOG_HOST_ID:
3217 skdev->connect_time_stamp = get_seconds();
3218 data = skdev->connect_time_stamp & 0xFFFF;
3219 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3220 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3221 skdev->last_mtd = mtd;
3222 break;
3223
3224 case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3225 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3226 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3227 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3228 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3229 skdev->last_mtd = mtd;
3230 break;
3231
3232 case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3233 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3234 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3235 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3236 skdev->last_mtd = mtd;
3237
Bart Van Asschef98806d2017-08-17 13:12:58 -07003238 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
3239 skdev->connect_time_stamp, skdev->drive_jiffies);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003240 break;
3241
3242 case FIT_MTD_ARM_QUEUE:
3243 skdev->last_mtd = 0;
3244 /*
3245 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3246 */
3247 break;
3248
3249 default:
3250 break;
3251 }
3252}
3253
3254static void skd_disable_interrupts(struct skd_device *skdev)
3255{
3256 u32 sense;
3257
3258 sense = SKD_READL(skdev, FIT_CONTROL);
3259 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3260 SKD_WRITEL(skdev, sense, FIT_CONTROL);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003261 dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003262
3263 /* Note that the 1s is written. A 1-bit means
3264 * disable, a 0 means enable.
3265 */
3266 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3267}
3268
3269static void skd_enable_interrupts(struct skd_device *skdev)
3270{
3271 u32 val;
3272
3273 /* unmask interrupts first */
3274 val = FIT_ISH_FW_STATE_CHANGE +
3275 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3276
3277 /* Note that the compliment of mask is written. A 1-bit means
3278 * disable, a 0 means enable. */
3279 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003280 dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003281
3282 val = SKD_READL(skdev, FIT_CONTROL);
3283 val |= FIT_CR_ENABLE_INTERRUPTS;
Bart Van Asschef98806d2017-08-17 13:12:58 -07003284 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003285 SKD_WRITEL(skdev, val, FIT_CONTROL);
3286}
3287
3288/*
3289 *****************************************************************************
3290 * START, STOP, RESTART, QUIESCE, UNQUIESCE
3291 *****************************************************************************
3292 */
3293
3294static void skd_soft_reset(struct skd_device *skdev)
3295{
3296 u32 val;
3297
3298 val = SKD_READL(skdev, FIT_CONTROL);
3299 val |= (FIT_CR_SOFT_RESET);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003300 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003301 SKD_WRITEL(skdev, val, FIT_CONTROL);
3302}
3303
3304static void skd_start_device(struct skd_device *skdev)
3305{
3306 unsigned long flags;
3307 u32 sense;
3308 u32 state;
3309
3310 spin_lock_irqsave(&skdev->lock, flags);
3311
3312 /* ack all ghost interrupts */
3313 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3314
3315 sense = SKD_READL(skdev, FIT_STATUS);
3316
Bart Van Asschef98806d2017-08-17 13:12:58 -07003317 dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003318
3319 state = sense & FIT_SR_DRIVE_STATE_MASK;
3320 skdev->drive_state = state;
3321 skdev->last_mtd = 0;
3322
3323 skdev->state = SKD_DRVR_STATE_STARTING;
3324 skdev->timer_countdown = SKD_STARTING_TIMO;
3325
3326 skd_enable_interrupts(skdev);
3327
3328 switch (skdev->drive_state) {
3329 case FIT_SR_DRIVE_OFFLINE:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003330 dev_err(&skdev->pdev->dev, "Drive offline...\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003331 break;
3332
3333 case FIT_SR_DRIVE_FW_BOOTING:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003334 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003335 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3336 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3337 break;
3338
3339 case FIT_SR_DRIVE_BUSY_SANITIZE:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003340 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003341 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3342 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3343 break;
3344
3345 case FIT_SR_DRIVE_BUSY_ERASE:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003346 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003347 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3348 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3349 break;
3350
3351 case FIT_SR_DRIVE_INIT:
3352 case FIT_SR_DRIVE_ONLINE:
3353 skd_soft_reset(skdev);
3354 break;
3355
3356 case FIT_SR_DRIVE_BUSY:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003357 dev_err(&skdev->pdev->dev, "Drive Busy...\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003358 skdev->state = SKD_DRVR_STATE_BUSY;
3359 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3360 break;
3361
3362 case FIT_SR_DRIVE_SOFT_RESET:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003363 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003364 break;
3365
3366 case FIT_SR_DRIVE_FAULT:
3367 /* Fault state is bad...soft reset won't do it...
3368 * Hard reset, maybe, but does it work on device?
3369 * For now, just fault so the system doesn't hang.
3370 */
3371 skd_drive_fault(skdev);
3372 /*start the queue so we can respond with error to requests */
Bart Van Asschef98806d2017-08-17 13:12:58 -07003373 dev_dbg(&skdev->pdev->dev, "starting queue\n");
Jens Axboe6a5ec652013-11-01 10:38:45 -06003374 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003375 skdev->gendisk_on = -1;
3376 wake_up_interruptible(&skdev->waitq);
3377 break;
3378
3379 case 0xFF:
3380 /* Most likely the device isn't there or isn't responding
3381 * to the BAR1 addresses. */
3382 skd_drive_disappeared(skdev);
3383 /*start the queue so we can respond with error to requests */
Bart Van Asschef98806d2017-08-17 13:12:58 -07003384 dev_dbg(&skdev->pdev->dev,
3385 "starting queue to error-out reqs\n");
Jens Axboe6a5ec652013-11-01 10:38:45 -06003386 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003387 skdev->gendisk_on = -1;
3388 wake_up_interruptible(&skdev->waitq);
3389 break;
3390
3391 default:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003392 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
3393 skdev->drive_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003394 break;
3395 }
3396
3397 state = SKD_READL(skdev, FIT_CONTROL);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003398 dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003399
3400 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003401 dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003402
3403 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003404 dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003405
3406 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003407 dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003408
3409 state = SKD_READL(skdev, FIT_HW_VERSION);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003410 dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003411
3412 spin_unlock_irqrestore(&skdev->lock, flags);
3413}
3414
3415static void skd_stop_device(struct skd_device *skdev)
3416{
3417 unsigned long flags;
3418 struct skd_special_context *skspcl = &skdev->internal_skspcl;
3419 u32 dev_state;
3420 int i;
3421
3422 spin_lock_irqsave(&skdev->lock, flags);
3423
3424 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003425 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003426 goto stop_out;
3427 }
3428
3429 if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003430 dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003431 goto stop_out;
3432 }
3433
3434 skdev->state = SKD_DRVR_STATE_SYNCING;
3435 skdev->sync_done = 0;
3436
3437 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3438
3439 spin_unlock_irqrestore(&skdev->lock, flags);
3440
3441 wait_event_interruptible_timeout(skdev->waitq,
3442 (skdev->sync_done), (10 * HZ));
3443
3444 spin_lock_irqsave(&skdev->lock, flags);
3445
3446 switch (skdev->sync_done) {
3447 case 0:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003448 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003449 break;
3450 case 1:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003451 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003452 break;
3453 default:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003454 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003455 }
3456
3457stop_out:
3458 skdev->state = SKD_DRVR_STATE_STOPPING;
3459 spin_unlock_irqrestore(&skdev->lock, flags);
3460
3461 skd_kill_timer(skdev);
3462
3463 spin_lock_irqsave(&skdev->lock, flags);
3464 skd_disable_interrupts(skdev);
3465
3466 /* ensure all ints on device are cleared */
3467 /* soft reset the device to unload with a clean slate */
3468 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3469 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3470
3471 spin_unlock_irqrestore(&skdev->lock, flags);
3472
3473 /* poll every 100ms, 1 second timeout */
3474 for (i = 0; i < 10; i++) {
3475 dev_state =
3476 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3477 if (dev_state == FIT_SR_DRIVE_INIT)
3478 break;
3479 set_current_state(TASK_INTERRUPTIBLE);
3480 schedule_timeout(msecs_to_jiffies(100));
3481 }
3482
3483 if (dev_state != FIT_SR_DRIVE_INIT)
Bart Van Asschef98806d2017-08-17 13:12:58 -07003484 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
3485 dev_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003486}
3487
3488/* assume spinlock is held */
3489static void skd_restart_device(struct skd_device *skdev)
3490{
3491 u32 state;
3492
3493 /* ack all ghost interrupts */
3494 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3495
3496 state = SKD_READL(skdev, FIT_STATUS);
3497
Bart Van Asschef98806d2017-08-17 13:12:58 -07003498 dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003499
3500 state &= FIT_SR_DRIVE_STATE_MASK;
3501 skdev->drive_state = state;
3502 skdev->last_mtd = 0;
3503
3504 skdev->state = SKD_DRVR_STATE_RESTARTING;
3505 skdev->timer_countdown = SKD_RESTARTING_TIMO;
3506
3507 skd_soft_reset(skdev);
3508}
3509
3510/* assume spinlock is held */
3511static int skd_quiesce_dev(struct skd_device *skdev)
3512{
3513 int rc = 0;
3514
3515 switch (skdev->state) {
3516 case SKD_DRVR_STATE_BUSY:
3517 case SKD_DRVR_STATE_BUSY_IMMINENT:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003518 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
Jens Axboe6a5ec652013-11-01 10:38:45 -06003519 blk_stop_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003520 break;
3521 case SKD_DRVR_STATE_ONLINE:
3522 case SKD_DRVR_STATE_STOPPING:
3523 case SKD_DRVR_STATE_SYNCING:
3524 case SKD_DRVR_STATE_PAUSING:
3525 case SKD_DRVR_STATE_PAUSED:
3526 case SKD_DRVR_STATE_STARTING:
3527 case SKD_DRVR_STATE_RESTARTING:
3528 case SKD_DRVR_STATE_RESUMING:
3529 default:
3530 rc = -EINVAL;
Bart Van Asschef98806d2017-08-17 13:12:58 -07003531 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
3532 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003533 }
3534 return rc;
3535}
3536
3537/* assume spinlock is held */
3538static int skd_unquiesce_dev(struct skd_device *skdev)
3539{
3540 int prev_driver_state = skdev->state;
3541
3542 skd_log_skdev(skdev, "unquiesce");
3543 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003544 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003545 return 0;
3546 }
3547 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3548 /*
3549 * If there has been an state change to other than
3550 * ONLINE, we will rely on controller state change
3551 * to come back online and restart the queue.
3552 * The BUSY state means that driver is ready to
3553 * continue normal processing but waiting for controller
3554 * to become available.
3555 */
3556 skdev->state = SKD_DRVR_STATE_BUSY;
Bart Van Asschef98806d2017-08-17 13:12:58 -07003557 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003558 return 0;
3559 }
3560
3561 /*
3562 * Drive has just come online, driver is either in startup,
3563 * paused performing a task, or bust waiting for hardware.
3564 */
3565 switch (skdev->state) {
3566 case SKD_DRVR_STATE_PAUSED:
3567 case SKD_DRVR_STATE_BUSY:
3568 case SKD_DRVR_STATE_BUSY_IMMINENT:
3569 case SKD_DRVR_STATE_BUSY_ERASE:
3570 case SKD_DRVR_STATE_STARTING:
3571 case SKD_DRVR_STATE_RESTARTING:
3572 case SKD_DRVR_STATE_FAULT:
3573 case SKD_DRVR_STATE_IDLE:
3574 case SKD_DRVR_STATE_LOAD:
3575 skdev->state = SKD_DRVR_STATE_ONLINE;
Bart Van Asschef98806d2017-08-17 13:12:58 -07003576 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
3577 skd_skdev_state_to_str(prev_driver_state),
3578 prev_driver_state, skd_skdev_state_to_str(skdev->state),
3579 skdev->state);
3580 dev_dbg(&skdev->pdev->dev,
3581 "**** device ONLINE...starting block queue\n");
3582 dev_dbg(&skdev->pdev->dev, "starting queue\n");
3583 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
Jens Axboe6a5ec652013-11-01 10:38:45 -06003584 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003585 skdev->gendisk_on = 1;
3586 wake_up_interruptible(&skdev->waitq);
3587 break;
3588
3589 case SKD_DRVR_STATE_DISAPPEARED:
3590 default:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003591 dev_dbg(&skdev->pdev->dev,
3592 "**** driver state %d, not implemented\n",
3593 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003594 return -EBUSY;
3595 }
3596 return 0;
3597}
3598
3599/*
3600 *****************************************************************************
3601 * PCIe MSI/MSI-X INTERRUPT HANDLERS
3602 *****************************************************************************
3603 */
3604
3605static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3606{
3607 struct skd_device *skdev = skd_host_data;
3608 unsigned long flags;
3609
3610 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003611 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3612 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3613 dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
3614 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003615 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3616 spin_unlock_irqrestore(&skdev->lock, flags);
3617 return IRQ_HANDLED;
3618}
3619
3620static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3621{
3622 struct skd_device *skdev = skd_host_data;
3623 unsigned long flags;
3624
3625 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003626 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3627 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003628 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3629 skd_isr_fwstate(skdev);
3630 spin_unlock_irqrestore(&skdev->lock, flags);
3631 return IRQ_HANDLED;
3632}
3633
3634static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3635{
3636 struct skd_device *skdev = skd_host_data;
3637 unsigned long flags;
3638 int flush_enqueued = 0;
3639 int deferred;
3640
3641 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003642 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3643 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003644 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3645 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3646 &flush_enqueued);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003647 if (flush_enqueued)
3648 skd_request_fn(skdev->queue);
3649
3650 if (deferred)
3651 schedule_work(&skdev->completion_worker);
3652 else if (!flush_enqueued)
3653 skd_request_fn(skdev->queue);
3654
3655 spin_unlock_irqrestore(&skdev->lock, flags);
3656
3657 return IRQ_HANDLED;
3658}
3659
3660static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3661{
3662 struct skd_device *skdev = skd_host_data;
3663 unsigned long flags;
3664
3665 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003666 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3667 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003668 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3669 skd_isr_msg_from_dev(skdev);
3670 spin_unlock_irqrestore(&skdev->lock, flags);
3671 return IRQ_HANDLED;
3672}
3673
3674static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3675{
3676 struct skd_device *skdev = skd_host_data;
3677 unsigned long flags;
3678
3679 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003680 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3681 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003682 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3683 spin_unlock_irqrestore(&skdev->lock, flags);
3684 return IRQ_HANDLED;
3685}
3686
3687/*
3688 *****************************************************************************
3689 * PCIe MSI/MSI-X SETUP
3690 *****************************************************************************
3691 */
3692
3693struct skd_msix_entry {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003694 char isr_name[30];
3695};
3696
3697struct skd_init_msix_entry {
3698 const char *name;
3699 irq_handler_t handler;
3700};
3701
3702#define SKD_MAX_MSIX_COUNT 13
3703#define SKD_MIN_MSIX_COUNT 7
3704#define SKD_BASE_MSIX_IRQ 4
3705
3706static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3707 { "(DMA 0)", skd_reserved_isr },
3708 { "(DMA 1)", skd_reserved_isr },
3709 { "(DMA 2)", skd_reserved_isr },
3710 { "(DMA 3)", skd_reserved_isr },
3711 { "(State Change)", skd_statec_isr },
3712 { "(COMPL_Q)", skd_comp_q },
3713 { "(MSG)", skd_msg_isr },
3714 { "(Reserved)", skd_reserved_isr },
3715 { "(Reserved)", skd_reserved_isr },
3716 { "(Queue Full 0)", skd_qfull_isr },
3717 { "(Queue Full 1)", skd_qfull_isr },
3718 { "(Queue Full 2)", skd_qfull_isr },
3719 { "(Queue Full 3)", skd_qfull_isr },
3720};
3721
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003722static int skd_acquire_msix(struct skd_device *skdev)
3723{
Alexander Gordeeva9df8622014-02-19 09:58:21 +01003724 int i, rc;
Alexander Gordeev46817762014-02-19 09:58:19 +01003725 struct pci_dev *pdev = skdev->pdev;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003726
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003727 rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
3728 PCI_IRQ_MSIX);
3729 if (rc < 0) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003730 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
Arnd Bergmann3bc84922016-11-09 13:55:34 +01003731 goto out;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003732 }
Alexander Gordeev46817762014-02-19 09:58:19 +01003733
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003734 skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
3735 sizeof(struct skd_msix_entry), GFP_KERNEL);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003736 if (!skdev->msix_entries) {
3737 rc = -ENOMEM;
Bart Van Asschef98806d2017-08-17 13:12:58 -07003738 dev_err(&skdev->pdev->dev, "msix table allocation error\n");
Arnd Bergmann3bc84922016-11-09 13:55:34 +01003739 goto out;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003740 }
3741
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003742 /* Enable MSI-X vectors for the base queue */
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003743 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3744 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
3745
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003746 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
3747 "%s%d-msix %s", DRV_NAME, skdev->devno,
3748 msix_entries[i].name);
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003749
3750 rc = devm_request_irq(&skdev->pdev->dev,
3751 pci_irq_vector(skdev->pdev, i),
3752 msix_entries[i].handler, 0,
3753 qentry->isr_name, skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003754 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003755 dev_err(&skdev->pdev->dev,
3756 "Unable to register(%d) MSI-X handler %d: %s\n",
3757 rc, i, qentry->isr_name);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003758 goto msix_out;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003759 }
3760 }
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003761
Bart Van Asschef98806d2017-08-17 13:12:58 -07003762 dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
3763 SKD_MAX_MSIX_COUNT);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003764 return 0;
3765
3766msix_out:
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003767 while (--i >= 0)
3768 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
Arnd Bergmann3bc84922016-11-09 13:55:34 +01003769out:
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003770 kfree(skdev->msix_entries);
3771 skdev->msix_entries = NULL;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003772 return rc;
3773}
3774
3775static int skd_acquire_irq(struct skd_device *skdev)
3776{
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003777 struct pci_dev *pdev = skdev->pdev;
3778 unsigned int irq_flag = PCI_IRQ_LEGACY;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003779 int rc;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003780
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003781 if (skd_isr_type == SKD_IRQ_MSIX) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003782 rc = skd_acquire_msix(skdev);
3783 if (!rc)
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003784 return 0;
3785
Bart Van Asschef98806d2017-08-17 13:12:58 -07003786 dev_err(&skdev->pdev->dev,
3787 "failed to enable MSI-X, re-trying with MSI %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003788 }
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003789
3790 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
3791 skdev->devno);
3792
3793 if (skd_isr_type != SKD_IRQ_LEGACY)
3794 irq_flag |= PCI_IRQ_MSI;
3795 rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
3796 if (rc < 0) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003797 dev_err(&skdev->pdev->dev,
3798 "failed to allocate the MSI interrupt %d\n", rc);
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003799 return rc;
3800 }
3801
3802 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
3803 pdev->msi_enabled ? 0 : IRQF_SHARED,
3804 skdev->isr_name, skdev);
3805 if (rc) {
3806 pci_free_irq_vectors(pdev);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003807 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
3808 rc);
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003809 return rc;
3810 }
3811
3812 return 0;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003813}
3814
3815static void skd_release_irq(struct skd_device *skdev)
3816{
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003817 struct pci_dev *pdev = skdev->pdev;
3818
3819 if (skdev->msix_entries) {
3820 int i;
3821
3822 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3823 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
3824 skdev);
3825 }
3826
3827 kfree(skdev->msix_entries);
3828 skdev->msix_entries = NULL;
3829 } else {
3830 devm_free_irq(&pdev->dev, pdev->irq, skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003831 }
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003832
3833 pci_free_irq_vectors(pdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003834}
3835
3836/*
3837 *****************************************************************************
3838 * CONSTRUCT
3839 *****************************************************************************
3840 */
3841
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003842static int skd_cons_skcomp(struct skd_device *skdev)
3843{
3844 int rc = 0;
3845 struct fit_completion_entry_v1 *skcomp;
3846 u32 nbytes;
3847
3848 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
3849 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
3850
Bart Van Asschef98806d2017-08-17 13:12:58 -07003851 dev_dbg(&skdev->pdev->dev,
3852 "comp pci_alloc, total bytes %d entries %d\n",
3853 nbytes, SKD_N_COMPLETION_ENTRY);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003854
Joe Perchesa5bbf612014-08-08 14:24:12 -07003855 skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
3856 &skdev->cq_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003857
3858 if (skcomp == NULL) {
3859 rc = -ENOMEM;
3860 goto err_out;
3861 }
3862
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003863 skdev->skcomp_table = skcomp;
3864 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
3865 sizeof(*skcomp) *
3866 SKD_N_COMPLETION_ENTRY);
3867
3868err_out:
3869 return rc;
3870}
3871
3872static int skd_cons_skmsg(struct skd_device *skdev)
3873{
3874 int rc = 0;
3875 u32 i;
3876
Bart Van Asschef98806d2017-08-17 13:12:58 -07003877 dev_dbg(&skdev->pdev->dev,
3878 "skmsg_table kzalloc, struct %lu, count %u total %lu\n",
3879 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
3880 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003881
3882 skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
3883 *skdev->num_fitmsg_context, GFP_KERNEL);
3884 if (skdev->skmsg_table == NULL) {
3885 rc = -ENOMEM;
3886 goto err_out;
3887 }
3888
3889 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3890 struct skd_fitmsg_context *skmsg;
3891
3892 skmsg = &skdev->skmsg_table[i];
3893
3894 skmsg->id = i + SKD_ID_FIT_MSG;
3895
3896 skmsg->state = SKD_MSG_STATE_IDLE;
3897 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
3898 SKD_N_FITMSG_BYTES + 64,
3899 &skmsg->mb_dma_address);
3900
3901 if (skmsg->msg_buf == NULL) {
3902 rc = -ENOMEM;
3903 goto err_out;
3904 }
3905
3906 skmsg->offset = (u32)((u64)skmsg->msg_buf &
3907 (~FIT_QCMD_BASE_ADDRESS_MASK));
3908 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
3909 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
3910 FIT_QCMD_BASE_ADDRESS_MASK);
3911 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
3912 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
3913 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
3914
3915 skmsg->next = &skmsg[1];
3916 }
3917
3918 /* Free list is in order starting with the 0th entry. */
3919 skdev->skmsg_table[i - 1].next = NULL;
3920 skdev->skmsg_free_list = skdev->skmsg_table;
3921
3922err_out:
3923 return rc;
3924}
3925
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01003926static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
3927 u32 n_sg,
3928 dma_addr_t *ret_dma_addr)
3929{
3930 struct fit_sg_descriptor *sg_list;
3931 u32 nbytes;
3932
3933 nbytes = sizeof(*sg_list) * n_sg;
3934
3935 sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
3936
3937 if (sg_list != NULL) {
3938 uint64_t dma_address = *ret_dma_addr;
3939 u32 i;
3940
3941 memset(sg_list, 0, nbytes);
3942
3943 for (i = 0; i < n_sg - 1; i++) {
3944 uint64_t ndp_off;
3945 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
3946
3947 sg_list[i].next_desc_ptr = dma_address + ndp_off;
3948 }
3949 sg_list[i].next_desc_ptr = 0LL;
3950 }
3951
3952 return sg_list;
3953}
3954
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003955static int skd_cons_skreq(struct skd_device *skdev)
3956{
3957 int rc = 0;
3958 u32 i;
3959
Bart Van Asschef98806d2017-08-17 13:12:58 -07003960 dev_dbg(&skdev->pdev->dev,
3961 "skreq_table kzalloc, struct %lu, count %u total %lu\n",
3962 sizeof(struct skd_request_context), skdev->num_req_context,
3963 sizeof(struct skd_request_context) * skdev->num_req_context);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003964
3965 skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
3966 * skdev->num_req_context, GFP_KERNEL);
3967 if (skdev->skreq_table == NULL) {
3968 rc = -ENOMEM;
3969 goto err_out;
3970 }
3971
Bart Van Asschef98806d2017-08-17 13:12:58 -07003972 dev_dbg(&skdev->pdev->dev, "alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
3973 skdev->sgs_per_request, sizeof(struct scatterlist),
3974 skdev->sgs_per_request * sizeof(struct scatterlist));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003975
3976 for (i = 0; i < skdev->num_req_context; i++) {
3977 struct skd_request_context *skreq;
3978
3979 skreq = &skdev->skreq_table[i];
3980
3981 skreq->id = i + SKD_ID_RW_REQUEST;
3982 skreq->state = SKD_REQ_STATE_IDLE;
3983
3984 skreq->sg = kzalloc(sizeof(struct scatterlist) *
3985 skdev->sgs_per_request, GFP_KERNEL);
3986 if (skreq->sg == NULL) {
3987 rc = -ENOMEM;
3988 goto err_out;
3989 }
3990 sg_init_table(skreq->sg, skdev->sgs_per_request);
3991
3992 skreq->sksg_list = skd_cons_sg_list(skdev,
3993 skdev->sgs_per_request,
3994 &skreq->sksg_dma_address);
3995
3996 if (skreq->sksg_list == NULL) {
3997 rc = -ENOMEM;
3998 goto err_out;
3999 }
4000
4001 skreq->next = &skreq[1];
4002 }
4003
4004 /* Free list is in order starting with the 0th entry. */
4005 skdev->skreq_table[i - 1].next = NULL;
4006 skdev->skreq_free_list = skdev->skreq_table;
4007
4008err_out:
4009 return rc;
4010}
4011
4012static int skd_cons_skspcl(struct skd_device *skdev)
4013{
4014 int rc = 0;
4015 u32 i, nbytes;
4016
Bart Van Asschef98806d2017-08-17 13:12:58 -07004017 dev_dbg(&skdev->pdev->dev,
4018 "skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4019 sizeof(struct skd_special_context), skdev->n_special,
4020 sizeof(struct skd_special_context) * skdev->n_special);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004021
4022 skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4023 * skdev->n_special, GFP_KERNEL);
4024 if (skdev->skspcl_table == NULL) {
4025 rc = -ENOMEM;
4026 goto err_out;
4027 }
4028
4029 for (i = 0; i < skdev->n_special; i++) {
4030 struct skd_special_context *skspcl;
4031
4032 skspcl = &skdev->skspcl_table[i];
4033
4034 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4035 skspcl->req.state = SKD_REQ_STATE_IDLE;
4036
4037 skspcl->req.next = &skspcl[1].req;
4038
4039 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4040
Joe Perchesa5bbf612014-08-08 14:24:12 -07004041 skspcl->msg_buf =
4042 pci_zalloc_consistent(skdev->pdev, nbytes,
4043 &skspcl->mb_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004044 if (skspcl->msg_buf == NULL) {
4045 rc = -ENOMEM;
4046 goto err_out;
4047 }
4048
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004049 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4050 SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4051 if (skspcl->req.sg == NULL) {
4052 rc = -ENOMEM;
4053 goto err_out;
4054 }
4055
4056 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4057 SKD_N_SG_PER_SPECIAL,
4058 &skspcl->req.
4059 sksg_dma_address);
4060 if (skspcl->req.sksg_list == NULL) {
4061 rc = -ENOMEM;
4062 goto err_out;
4063 }
4064 }
4065
4066 /* Free list is in order starting with the 0th entry. */
4067 skdev->skspcl_table[i - 1].req.next = NULL;
4068 skdev->skspcl_free_list = skdev->skspcl_table;
4069
4070 return rc;
4071
4072err_out:
4073 return rc;
4074}
4075
4076static int skd_cons_sksb(struct skd_device *skdev)
4077{
4078 int rc = 0;
4079 struct skd_special_context *skspcl;
4080 u32 nbytes;
4081
4082 skspcl = &skdev->internal_skspcl;
4083
4084 skspcl->req.id = 0 + SKD_ID_INTERNAL;
4085 skspcl->req.state = SKD_REQ_STATE_IDLE;
4086
4087 nbytes = SKD_N_INTERNAL_BYTES;
4088
Joe Perchesa5bbf612014-08-08 14:24:12 -07004089 skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4090 &skspcl->db_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004091 if (skspcl->data_buf == NULL) {
4092 rc = -ENOMEM;
4093 goto err_out;
4094 }
4095
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004096 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
Joe Perchesa5bbf612014-08-08 14:24:12 -07004097 skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4098 &skspcl->mb_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004099 if (skspcl->msg_buf == NULL) {
4100 rc = -ENOMEM;
4101 goto err_out;
4102 }
4103
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004104 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4105 &skspcl->req.sksg_dma_address);
4106 if (skspcl->req.sksg_list == NULL) {
4107 rc = -ENOMEM;
4108 goto err_out;
4109 }
4110
4111 if (!skd_format_internal_skspcl(skdev)) {
4112 rc = -EINVAL;
4113 goto err_out;
4114 }
4115
4116err_out:
4117 return rc;
4118}
4119
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004120static int skd_cons_disk(struct skd_device *skdev)
4121{
4122 int rc = 0;
4123 struct gendisk *disk;
4124 struct request_queue *q;
4125 unsigned long flags;
4126
4127 disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4128 if (!disk) {
4129 rc = -ENOMEM;
4130 goto err_out;
4131 }
4132
4133 skdev->disk = disk;
4134 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4135
4136 disk->major = skdev->major;
4137 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4138 disk->fops = &skd_blockdev_ops;
4139 disk->private_data = skdev;
4140
Jens Axboefcd37eb2013-11-01 10:14:56 -06004141 q = blk_init_queue(skd_request_fn, &skdev->lock);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004142 if (!q) {
4143 rc = -ENOMEM;
4144 goto err_out;
4145 }
Christoph Hellwig8fc45042017-06-19 09:26:26 +02004146 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004147
4148 skdev->queue = q;
4149 disk->queue = q;
4150 q->queuedata = skdev;
4151
Jens Axboe6975f732016-03-30 10:11:42 -06004152 blk_queue_write_cache(q, true, true);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004153 blk_queue_max_segments(q, skdev->sgs_per_request);
4154 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4155
Bart Van Asschea5c5b392017-08-17 13:12:53 -07004156 /* set optimal I/O size to 8KB */
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004157 blk_queue_io_opt(q, 8192);
4158
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004159 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
Mike Snitzerb277da02014-10-04 10:55:32 -06004160 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004161
4162 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07004163 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
Jens Axboe6a5ec652013-11-01 10:38:45 -06004164 blk_stop_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004165 spin_unlock_irqrestore(&skdev->lock, flags);
4166
4167err_out:
4168 return rc;
4169}
4170
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004171#define SKD_N_DEV_TABLE 16u
4172static u32 skd_next_devno;
4173
4174static struct skd_device *skd_construct(struct pci_dev *pdev)
4175{
4176 struct skd_device *skdev;
4177 int blk_major = skd_major;
4178 int rc;
4179
4180 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4181
4182 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004183 dev_err(&pdev->dev, "memory alloc failure\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004184 return NULL;
4185 }
4186
4187 skdev->state = SKD_DRVR_STATE_LOAD;
4188 skdev->pdev = pdev;
4189 skdev->devno = skd_next_devno++;
4190 skdev->major = blk_major;
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004191 skdev->dev_max_queue_depth = 0;
4192
4193 skdev->num_req_context = skd_max_queue_depth;
4194 skdev->num_fitmsg_context = skd_max_queue_depth;
4195 skdev->n_special = skd_max_pass_thru;
4196 skdev->cur_max_queue_depth = 1;
4197 skdev->queue_low_water_mark = 1;
4198 skdev->proto_ver = 99;
4199 skdev->sgs_per_request = skd_sgs_per_request;
4200 skdev->dbg_level = skd_dbg_level;
4201
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004202 spin_lock_init(&skdev->lock);
4203
4204 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4205
Bart Van Asschef98806d2017-08-17 13:12:58 -07004206 dev_dbg(&skdev->pdev->dev, "skcomp\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004207 rc = skd_cons_skcomp(skdev);
4208 if (rc < 0)
4209 goto err_out;
4210
Bart Van Asschef98806d2017-08-17 13:12:58 -07004211 dev_dbg(&skdev->pdev->dev, "skmsg\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004212 rc = skd_cons_skmsg(skdev);
4213 if (rc < 0)
4214 goto err_out;
4215
Bart Van Asschef98806d2017-08-17 13:12:58 -07004216 dev_dbg(&skdev->pdev->dev, "skreq\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004217 rc = skd_cons_skreq(skdev);
4218 if (rc < 0)
4219 goto err_out;
4220
Bart Van Asschef98806d2017-08-17 13:12:58 -07004221 dev_dbg(&skdev->pdev->dev, "skspcl\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004222 rc = skd_cons_skspcl(skdev);
4223 if (rc < 0)
4224 goto err_out;
4225
Bart Van Asschef98806d2017-08-17 13:12:58 -07004226 dev_dbg(&skdev->pdev->dev, "sksb\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004227 rc = skd_cons_sksb(skdev);
4228 if (rc < 0)
4229 goto err_out;
4230
Bart Van Asschef98806d2017-08-17 13:12:58 -07004231 dev_dbg(&skdev->pdev->dev, "disk\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004232 rc = skd_cons_disk(skdev);
4233 if (rc < 0)
4234 goto err_out;
4235
Bart Van Asschef98806d2017-08-17 13:12:58 -07004236 dev_dbg(&skdev->pdev->dev, "VICTORY\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004237 return skdev;
4238
4239err_out:
Bart Van Asschef98806d2017-08-17 13:12:58 -07004240 dev_dbg(&skdev->pdev->dev, "construct failed\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004241 skd_destruct(skdev);
4242 return NULL;
4243}
4244
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004245/*
4246 *****************************************************************************
4247 * DESTRUCT (FREE)
4248 *****************************************************************************
4249 */
4250
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004251static void skd_free_skcomp(struct skd_device *skdev)
4252{
4253 if (skdev->skcomp_table != NULL) {
4254 u32 nbytes;
4255
4256 nbytes = sizeof(skdev->skcomp_table[0]) *
4257 SKD_N_COMPLETION_ENTRY;
4258 pci_free_consistent(skdev->pdev, nbytes,
4259 skdev->skcomp_table, skdev->cq_dma_address);
4260 }
4261
4262 skdev->skcomp_table = NULL;
4263 skdev->cq_dma_address = 0;
4264}
4265
4266static void skd_free_skmsg(struct skd_device *skdev)
4267{
4268 u32 i;
4269
4270 if (skdev->skmsg_table == NULL)
4271 return;
4272
4273 for (i = 0; i < skdev->num_fitmsg_context; i++) {
4274 struct skd_fitmsg_context *skmsg;
4275
4276 skmsg = &skdev->skmsg_table[i];
4277
4278 if (skmsg->msg_buf != NULL) {
4279 skmsg->msg_buf += skmsg->offset;
4280 skmsg->mb_dma_address += skmsg->offset;
4281 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4282 skmsg->msg_buf,
4283 skmsg->mb_dma_address);
4284 }
4285 skmsg->msg_buf = NULL;
4286 skmsg->mb_dma_address = 0;
4287 }
4288
4289 kfree(skdev->skmsg_table);
4290 skdev->skmsg_table = NULL;
4291}
4292
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004293static void skd_free_sg_list(struct skd_device *skdev,
4294 struct fit_sg_descriptor *sg_list,
4295 u32 n_sg, dma_addr_t dma_addr)
4296{
4297 if (sg_list != NULL) {
4298 u32 nbytes;
4299
4300 nbytes = sizeof(*sg_list) * n_sg;
4301
4302 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4303 }
4304}
4305
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004306static void skd_free_skreq(struct skd_device *skdev)
4307{
4308 u32 i;
4309
4310 if (skdev->skreq_table == NULL)
4311 return;
4312
4313 for (i = 0; i < skdev->num_req_context; i++) {
4314 struct skd_request_context *skreq;
4315
4316 skreq = &skdev->skreq_table[i];
4317
4318 skd_free_sg_list(skdev, skreq->sksg_list,
4319 skdev->sgs_per_request,
4320 skreq->sksg_dma_address);
4321
4322 skreq->sksg_list = NULL;
4323 skreq->sksg_dma_address = 0;
4324
4325 kfree(skreq->sg);
4326 }
4327
4328 kfree(skdev->skreq_table);
4329 skdev->skreq_table = NULL;
4330}
4331
4332static void skd_free_skspcl(struct skd_device *skdev)
4333{
4334 u32 i;
4335 u32 nbytes;
4336
4337 if (skdev->skspcl_table == NULL)
4338 return;
4339
4340 for (i = 0; i < skdev->n_special; i++) {
4341 struct skd_special_context *skspcl;
4342
4343 skspcl = &skdev->skspcl_table[i];
4344
4345 if (skspcl->msg_buf != NULL) {
4346 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4347 pci_free_consistent(skdev->pdev, nbytes,
4348 skspcl->msg_buf,
4349 skspcl->mb_dma_address);
4350 }
4351
4352 skspcl->msg_buf = NULL;
4353 skspcl->mb_dma_address = 0;
4354
4355 skd_free_sg_list(skdev, skspcl->req.sksg_list,
4356 SKD_N_SG_PER_SPECIAL,
4357 skspcl->req.sksg_dma_address);
4358
4359 skspcl->req.sksg_list = NULL;
4360 skspcl->req.sksg_dma_address = 0;
4361
4362 kfree(skspcl->req.sg);
4363 }
4364
4365 kfree(skdev->skspcl_table);
4366 skdev->skspcl_table = NULL;
4367}
4368
4369static void skd_free_sksb(struct skd_device *skdev)
4370{
4371 struct skd_special_context *skspcl;
4372 u32 nbytes;
4373
4374 skspcl = &skdev->internal_skspcl;
4375
4376 if (skspcl->data_buf != NULL) {
4377 nbytes = SKD_N_INTERNAL_BYTES;
4378
4379 pci_free_consistent(skdev->pdev, nbytes,
4380 skspcl->data_buf, skspcl->db_dma_address);
4381 }
4382
4383 skspcl->data_buf = NULL;
4384 skspcl->db_dma_address = 0;
4385
4386 if (skspcl->msg_buf != NULL) {
4387 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4388 pci_free_consistent(skdev->pdev, nbytes,
4389 skspcl->msg_buf, skspcl->mb_dma_address);
4390 }
4391
4392 skspcl->msg_buf = NULL;
4393 skspcl->mb_dma_address = 0;
4394
4395 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4396 skspcl->req.sksg_dma_address);
4397
4398 skspcl->req.sksg_list = NULL;
4399 skspcl->req.sksg_dma_address = 0;
4400}
4401
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004402static void skd_free_disk(struct skd_device *skdev)
4403{
4404 struct gendisk *disk = skdev->disk;
4405
Bart Van Assche7277cc62017-08-17 13:12:45 -07004406 if (disk && (disk->flags & GENHD_FL_UP))
4407 del_gendisk(disk);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004408
Bart Van Assche7277cc62017-08-17 13:12:45 -07004409 if (skdev->queue) {
4410 blk_cleanup_queue(skdev->queue);
4411 skdev->queue = NULL;
4412 disk->queue = NULL;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004413 }
Bart Van Assche7277cc62017-08-17 13:12:45 -07004414
4415 put_disk(disk);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004416 skdev->disk = NULL;
4417}
4418
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004419static void skd_destruct(struct skd_device *skdev)
4420{
4421 if (skdev == NULL)
4422 return;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004423
Bart Van Asschef98806d2017-08-17 13:12:58 -07004424 dev_dbg(&skdev->pdev->dev, "disk\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004425 skd_free_disk(skdev);
4426
Bart Van Asschef98806d2017-08-17 13:12:58 -07004427 dev_dbg(&skdev->pdev->dev, "sksb\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004428 skd_free_sksb(skdev);
4429
Bart Van Asschef98806d2017-08-17 13:12:58 -07004430 dev_dbg(&skdev->pdev->dev, "skspcl\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004431 skd_free_skspcl(skdev);
4432
Bart Van Asschef98806d2017-08-17 13:12:58 -07004433 dev_dbg(&skdev->pdev->dev, "skreq\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004434 skd_free_skreq(skdev);
4435
Bart Van Asschef98806d2017-08-17 13:12:58 -07004436 dev_dbg(&skdev->pdev->dev, "skmsg\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004437 skd_free_skmsg(skdev);
4438
Bart Van Asschef98806d2017-08-17 13:12:58 -07004439 dev_dbg(&skdev->pdev->dev, "skcomp\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004440 skd_free_skcomp(skdev);
4441
Bart Van Asschef98806d2017-08-17 13:12:58 -07004442 dev_dbg(&skdev->pdev->dev, "skdev\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004443 kfree(skdev);
4444}
4445
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004446/*
4447 *****************************************************************************
4448 * BLOCK DEVICE (BDEV) GLUE
4449 *****************************************************************************
4450 */
4451
4452static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4453{
4454 struct skd_device *skdev;
4455 u64 capacity;
4456
4457 skdev = bdev->bd_disk->private_data;
4458
Bart Van Asschef98806d2017-08-17 13:12:58 -07004459 dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
4460 bdev->bd_disk->disk_name, current->comm);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004461
4462 if (skdev->read_cap_is_valid) {
4463 capacity = get_capacity(skdev->disk);
4464 geo->heads = 64;
4465 geo->sectors = 255;
4466 geo->cylinders = (capacity) / (255 * 64);
4467
4468 return 0;
4469 }
4470 return -EIO;
4471}
4472
Dan Williams0d52c7562016-06-15 19:44:20 -07004473static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004474{
Bart Van Asschef98806d2017-08-17 13:12:58 -07004475 dev_dbg(&skdev->pdev->dev, "add_disk\n");
Dan Williams0d52c7562016-06-15 19:44:20 -07004476 device_add_disk(parent, skdev->disk);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004477 return 0;
4478}
4479
4480static const struct block_device_operations skd_blockdev_ops = {
4481 .owner = THIS_MODULE,
4482 .ioctl = skd_bdev_ioctl,
4483 .getgeo = skd_bdev_getgeo,
4484};
4485
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004486/*
4487 *****************************************************************************
4488 * PCIe DRIVER GLUE
4489 *****************************************************************************
4490 */
4491
Benoit Taine9baa3c32014-08-08 15:56:03 +02004492static const struct pci_device_id skd_pci_tbl[] = {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004493 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4494 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4495 { 0 } /* terminate list */
4496};
4497
4498MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4499
4500static char *skd_pci_info(struct skd_device *skdev, char *str)
4501{
4502 int pcie_reg;
4503
4504 strcpy(str, "PCIe (");
4505 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4506
4507 if (pcie_reg) {
4508
4509 char lwstr[6];
4510 uint16_t pcie_lstat, lspeed, lwidth;
4511
4512 pcie_reg += 0x12;
4513 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4514 lspeed = pcie_lstat & (0xF);
4515 lwidth = (pcie_lstat & 0x3F0) >> 4;
4516
4517 if (lspeed == 1)
4518 strcat(str, "2.5GT/s ");
4519 else if (lspeed == 2)
4520 strcat(str, "5.0GT/s ");
4521 else
4522 strcat(str, "<unknown> ");
4523 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4524 strcat(str, lwstr);
4525 }
4526 return str;
4527}
4528
4529static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4530{
4531 int i;
4532 int rc = 0;
4533 char pci_str[32];
4534 struct skd_device *skdev;
4535
Bart Van Asschef98806d2017-08-17 13:12:58 -07004536 dev_info(&pdev->dev, "STEC s1120 Driver(%s) version %s-b%s\n",
4537 DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4538 dev_info(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
4539 pdev->device);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004540
4541 rc = pci_enable_device(pdev);
4542 if (rc)
4543 return rc;
4544 rc = pci_request_regions(pdev, DRV_NAME);
4545 if (rc)
4546 goto err_out;
4547 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4548 if (!rc) {
4549 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004550 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
4551 rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004552 }
4553 } else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004554 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004555 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004556 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004557 goto err_out_regions;
4558 }
4559 }
4560
Bartlomiej Zolnierkiewiczb8df6642013-11-05 12:37:02 +01004561 if (!skd_major) {
4562 rc = register_blkdev(0, DRV_NAME);
4563 if (rc < 0)
4564 goto err_out_regions;
4565 BUG_ON(!rc);
4566 skd_major = rc;
4567 }
4568
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004569 skdev = skd_construct(pdev);
Wei Yongjun1762b572013-10-30 13:23:53 +08004570 if (skdev == NULL) {
4571 rc = -ENOMEM;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004572 goto err_out_regions;
Wei Yongjun1762b572013-10-30 13:23:53 +08004573 }
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004574
4575 skd_pci_info(skdev, pci_str);
Bart Van Asschef98806d2017-08-17 13:12:58 -07004576 dev_info(&pdev->dev, "%s 64bit\n", pci_str);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004577
4578 pci_set_master(pdev);
4579 rc = pci_enable_pcie_error_reporting(pdev);
4580 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004581 dev_err(&pdev->dev,
4582 "bad enable of PCIe error reporting rc=%d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004583 skdev->pcie_error_reporting_is_enabled = 0;
4584 } else
4585 skdev->pcie_error_reporting_is_enabled = 1;
4586
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004587 pci_set_drvdata(pdev, skdev);
Bartlomiej Zolnierkiewiczebedd162013-11-05 12:37:05 +01004588
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004589 for (i = 0; i < SKD_MAX_BARS; i++) {
4590 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4591 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4592 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4593 skdev->mem_size[i]);
4594 if (!skdev->mem_map[i]) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004595 dev_err(&pdev->dev,
4596 "Unable to map adapter memory!\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004597 rc = -ENODEV;
4598 goto err_out_iounmap;
4599 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07004600 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
4601 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
4602 skdev->mem_size[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004603 }
4604
4605 rc = skd_acquire_irq(skdev);
4606 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004607 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004608 goto err_out_iounmap;
4609 }
4610
4611 rc = skd_start_timer(skdev);
4612 if (rc)
4613 goto err_out_timer;
4614
4615 init_waitqueue_head(&skdev->waitq);
4616
4617 skd_start_device(skdev);
4618
4619 rc = wait_event_interruptible_timeout(skdev->waitq,
4620 (skdev->gendisk_on),
4621 (SKD_START_WAIT_SECONDS * HZ));
4622 if (skdev->gendisk_on > 0) {
4623 /* device came on-line after reset */
Dan Williams0d52c7562016-06-15 19:44:20 -07004624 skd_bdev_attach(&pdev->dev, skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004625 rc = 0;
4626 } else {
4627 /* we timed out, something is wrong with the device,
4628 don't add the disk structure */
Bart Van Asschef98806d2017-08-17 13:12:58 -07004629 dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
4630 rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004631 /* in case of no error; we timeout with ENXIO */
4632 if (!rc)
4633 rc = -ENXIO;
4634 goto err_out_timer;
4635 }
4636
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004637 return rc;
4638
4639err_out_timer:
4640 skd_stop_device(skdev);
4641 skd_release_irq(skdev);
4642
4643err_out_iounmap:
4644 for (i = 0; i < SKD_MAX_BARS; i++)
4645 if (skdev->mem_map[i])
4646 iounmap(skdev->mem_map[i]);
4647
4648 if (skdev->pcie_error_reporting_is_enabled)
4649 pci_disable_pcie_error_reporting(pdev);
4650
4651 skd_destruct(skdev);
4652
4653err_out_regions:
4654 pci_release_regions(pdev);
4655
4656err_out:
4657 pci_disable_device(pdev);
4658 pci_set_drvdata(pdev, NULL);
4659 return rc;
4660}
4661
4662static void skd_pci_remove(struct pci_dev *pdev)
4663{
4664 int i;
4665 struct skd_device *skdev;
4666
4667 skdev = pci_get_drvdata(pdev);
4668 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004669 dev_err(&pdev->dev, "no device data for PCI\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004670 return;
4671 }
4672 skd_stop_device(skdev);
4673 skd_release_irq(skdev);
4674
4675 for (i = 0; i < SKD_MAX_BARS; i++)
4676 if (skdev->mem_map[i])
4677 iounmap((u32 *)skdev->mem_map[i]);
4678
4679 if (skdev->pcie_error_reporting_is_enabled)
4680 pci_disable_pcie_error_reporting(pdev);
4681
4682 skd_destruct(skdev);
4683
4684 pci_release_regions(pdev);
4685 pci_disable_device(pdev);
4686 pci_set_drvdata(pdev, NULL);
4687
4688 return;
4689}
4690
4691static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4692{
4693 int i;
4694 struct skd_device *skdev;
4695
4696 skdev = pci_get_drvdata(pdev);
4697 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004698 dev_err(&pdev->dev, "no device data for PCI\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004699 return -EIO;
4700 }
4701
4702 skd_stop_device(skdev);
4703
4704 skd_release_irq(skdev);
4705
4706 for (i = 0; i < SKD_MAX_BARS; i++)
4707 if (skdev->mem_map[i])
4708 iounmap((u32 *)skdev->mem_map[i]);
4709
4710 if (skdev->pcie_error_reporting_is_enabled)
4711 pci_disable_pcie_error_reporting(pdev);
4712
4713 pci_release_regions(pdev);
4714 pci_save_state(pdev);
4715 pci_disable_device(pdev);
4716 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4717 return 0;
4718}
4719
4720static int skd_pci_resume(struct pci_dev *pdev)
4721{
4722 int i;
4723 int rc = 0;
4724 struct skd_device *skdev;
4725
4726 skdev = pci_get_drvdata(pdev);
4727 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004728 dev_err(&pdev->dev, "no device data for PCI\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004729 return -1;
4730 }
4731
4732 pci_set_power_state(pdev, PCI_D0);
4733 pci_enable_wake(pdev, PCI_D0, 0);
4734 pci_restore_state(pdev);
4735
4736 rc = pci_enable_device(pdev);
4737 if (rc)
4738 return rc;
4739 rc = pci_request_regions(pdev, DRV_NAME);
4740 if (rc)
4741 goto err_out;
4742 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4743 if (!rc) {
4744 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4745
Bart Van Asschef98806d2017-08-17 13:12:58 -07004746 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
4747 rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004748 }
4749 } else {
4750 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4751 if (rc) {
4752
Bart Van Asschef98806d2017-08-17 13:12:58 -07004753 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004754 goto err_out_regions;
4755 }
4756 }
4757
4758 pci_set_master(pdev);
4759 rc = pci_enable_pcie_error_reporting(pdev);
4760 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004761 dev_err(&pdev->dev,
4762 "bad enable of PCIe error reporting rc=%d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004763 skdev->pcie_error_reporting_is_enabled = 0;
4764 } else
4765 skdev->pcie_error_reporting_is_enabled = 1;
4766
4767 for (i = 0; i < SKD_MAX_BARS; i++) {
4768
4769 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4770 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4771 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4772 skdev->mem_size[i]);
4773 if (!skdev->mem_map[i]) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004774 dev_err(&pdev->dev, "Unable to map adapter memory!\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004775 rc = -ENODEV;
4776 goto err_out_iounmap;
4777 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07004778 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
4779 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
4780 skdev->mem_size[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004781 }
4782 rc = skd_acquire_irq(skdev);
4783 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004784 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004785 goto err_out_iounmap;
4786 }
4787
4788 rc = skd_start_timer(skdev);
4789 if (rc)
4790 goto err_out_timer;
4791
4792 init_waitqueue_head(&skdev->waitq);
4793
4794 skd_start_device(skdev);
4795
4796 return rc;
4797
4798err_out_timer:
4799 skd_stop_device(skdev);
4800 skd_release_irq(skdev);
4801
4802err_out_iounmap:
4803 for (i = 0; i < SKD_MAX_BARS; i++)
4804 if (skdev->mem_map[i])
4805 iounmap(skdev->mem_map[i]);
4806
4807 if (skdev->pcie_error_reporting_is_enabled)
4808 pci_disable_pcie_error_reporting(pdev);
4809
4810err_out_regions:
4811 pci_release_regions(pdev);
4812
4813err_out:
4814 pci_disable_device(pdev);
4815 return rc;
4816}
4817
4818static void skd_pci_shutdown(struct pci_dev *pdev)
4819{
4820 struct skd_device *skdev;
4821
Bart Van Asschef98806d2017-08-17 13:12:58 -07004822 dev_err(&pdev->dev, "%s called\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004823
4824 skdev = pci_get_drvdata(pdev);
4825 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004826 dev_err(&pdev->dev, "no device data for PCI\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004827 return;
4828 }
4829
Bart Van Asschef98806d2017-08-17 13:12:58 -07004830 dev_err(&pdev->dev, "calling stop\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004831 skd_stop_device(skdev);
4832}
4833
4834static struct pci_driver skd_driver = {
4835 .name = DRV_NAME,
4836 .id_table = skd_pci_tbl,
4837 .probe = skd_pci_probe,
4838 .remove = skd_pci_remove,
4839 .suspend = skd_pci_suspend,
4840 .resume = skd_pci_resume,
4841 .shutdown = skd_pci_shutdown,
4842};
4843
4844/*
4845 *****************************************************************************
4846 * LOGGING SUPPORT
4847 *****************************************************************************
4848 */
4849
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004850const char *skd_drive_state_to_str(int state)
4851{
4852 switch (state) {
4853 case FIT_SR_DRIVE_OFFLINE:
4854 return "OFFLINE";
4855 case FIT_SR_DRIVE_INIT:
4856 return "INIT";
4857 case FIT_SR_DRIVE_ONLINE:
4858 return "ONLINE";
4859 case FIT_SR_DRIVE_BUSY:
4860 return "BUSY";
4861 case FIT_SR_DRIVE_FAULT:
4862 return "FAULT";
4863 case FIT_SR_DRIVE_DEGRADED:
4864 return "DEGRADED";
4865 case FIT_SR_PCIE_LINK_DOWN:
4866 return "INK_DOWN";
4867 case FIT_SR_DRIVE_SOFT_RESET:
4868 return "SOFT_RESET";
4869 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
4870 return "NEED_FW";
4871 case FIT_SR_DRIVE_INIT_FAULT:
4872 return "INIT_FAULT";
4873 case FIT_SR_DRIVE_BUSY_SANITIZE:
4874 return "BUSY_SANITIZE";
4875 case FIT_SR_DRIVE_BUSY_ERASE:
4876 return "BUSY_ERASE";
4877 case FIT_SR_DRIVE_FW_BOOTING:
4878 return "FW_BOOTING";
4879 default:
4880 return "???";
4881 }
4882}
4883
4884const char *skd_skdev_state_to_str(enum skd_drvr_state state)
4885{
4886 switch (state) {
4887 case SKD_DRVR_STATE_LOAD:
4888 return "LOAD";
4889 case SKD_DRVR_STATE_IDLE:
4890 return "IDLE";
4891 case SKD_DRVR_STATE_BUSY:
4892 return "BUSY";
4893 case SKD_DRVR_STATE_STARTING:
4894 return "STARTING";
4895 case SKD_DRVR_STATE_ONLINE:
4896 return "ONLINE";
4897 case SKD_DRVR_STATE_PAUSING:
4898 return "PAUSING";
4899 case SKD_DRVR_STATE_PAUSED:
4900 return "PAUSED";
4901 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
4902 return "DRAINING_TIMEOUT";
4903 case SKD_DRVR_STATE_RESTARTING:
4904 return "RESTARTING";
4905 case SKD_DRVR_STATE_RESUMING:
4906 return "RESUMING";
4907 case SKD_DRVR_STATE_STOPPING:
4908 return "STOPPING";
4909 case SKD_DRVR_STATE_SYNCING:
4910 return "SYNCING";
4911 case SKD_DRVR_STATE_FAULT:
4912 return "FAULT";
4913 case SKD_DRVR_STATE_DISAPPEARED:
4914 return "DISAPPEARED";
4915 case SKD_DRVR_STATE_BUSY_ERASE:
4916 return "BUSY_ERASE";
4917 case SKD_DRVR_STATE_BUSY_SANITIZE:
4918 return "BUSY_SANITIZE";
4919 case SKD_DRVR_STATE_BUSY_IMMINENT:
4920 return "BUSY_IMMINENT";
4921 case SKD_DRVR_STATE_WAIT_BOOT:
4922 return "WAIT_BOOT";
4923
4924 default:
4925 return "???";
4926 }
4927}
4928
Rashika Kheriaa26ba7f2013-12-19 15:02:22 +05304929static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004930{
4931 switch (state) {
4932 case SKD_MSG_STATE_IDLE:
4933 return "IDLE";
4934 case SKD_MSG_STATE_BUSY:
4935 return "BUSY";
4936 default:
4937 return "???";
4938 }
4939}
4940
Rashika Kheriaa26ba7f2013-12-19 15:02:22 +05304941static const char *skd_skreq_state_to_str(enum skd_req_state state)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004942{
4943 switch (state) {
4944 case SKD_REQ_STATE_IDLE:
4945 return "IDLE";
4946 case SKD_REQ_STATE_SETUP:
4947 return "SETUP";
4948 case SKD_REQ_STATE_BUSY:
4949 return "BUSY";
4950 case SKD_REQ_STATE_COMPLETED:
4951 return "COMPLETED";
4952 case SKD_REQ_STATE_TIMEOUT:
4953 return "TIMEOUT";
4954 case SKD_REQ_STATE_ABORTED:
4955 return "ABORTED";
4956 default:
4957 return "???";
4958 }
4959}
4960
4961static void skd_log_skdev(struct skd_device *skdev, const char *event)
4962{
Bart Van Asschef98806d2017-08-17 13:12:58 -07004963 dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
4964 dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
4965 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
4966 skd_skdev_state_to_str(skdev->state), skdev->state);
4967 dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
4968 skdev->in_flight, skdev->cur_max_queue_depth,
4969 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
4970 dev_dbg(&skdev->pdev->dev, " timestamp=0x%x cycle=%d cycle_ix=%d\n",
4971 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004972}
4973
4974static void skd_log_skmsg(struct skd_device *skdev,
4975 struct skd_fitmsg_context *skmsg, const char *event)
4976{
Bart Van Asschef98806d2017-08-17 13:12:58 -07004977 dev_dbg(&skdev->pdev->dev, "skmsg=%p event='%s'\n", skmsg, event);
4978 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x length=%d\n",
4979 skd_skmsg_state_to_str(skmsg->state), skmsg->state, skmsg->id,
4980 skmsg->length);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004981}
4982
4983static void skd_log_skreq(struct skd_device *skdev,
4984 struct skd_request_context *skreq, const char *event)
4985{
Bart Van Asschef98806d2017-08-17 13:12:58 -07004986 dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
4987 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
4988 skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
4989 skreq->fitmsg_id);
4990 dev_dbg(&skdev->pdev->dev, " timo=0x%x sg_dir=%d n_sg=%d\n",
4991 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004992
Jens Axboefcd37eb2013-11-01 10:14:56 -06004993 if (skreq->req != NULL) {
4994 struct request *req = skreq->req;
4995 u32 lba = (u32)blk_rq_pos(req);
4996 u32 count = blk_rq_sectors(req);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004997
Bart Van Asschef98806d2017-08-17 13:12:58 -07004998 dev_dbg(&skdev->pdev->dev,
4999 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req,
5000 lba, lba, count, count, (int)rq_data_dir(req));
Jens Axboefcd37eb2013-11-01 10:14:56 -06005001 } else
Bart Van Asschef98806d2017-08-17 13:12:58 -07005002 dev_dbg(&skdev->pdev->dev, "req=NULL\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005003}
5004
5005/*
5006 *****************************************************************************
5007 * MODULE GLUE
5008 *****************************************************************************
5009 */
5010
5011static int __init skd_init(void)
5012{
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005013 pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5014
5015 switch (skd_isr_type) {
5016 case SKD_IRQ_LEGACY:
5017 case SKD_IRQ_MSI:
5018 case SKD_IRQ_MSIX:
5019 break;
5020 default:
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005021 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005022 skd_isr_type, SKD_IRQ_DEFAULT);
5023 skd_isr_type = SKD_IRQ_DEFAULT;
5024 }
5025
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005026 if (skd_max_queue_depth < 1 ||
5027 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5028 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005029 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5030 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5031 }
5032
5033 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005034 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005035 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5036 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5037 }
5038
5039 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005040 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005041 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5042 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5043 }
5044
5045 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005046 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005047 skd_dbg_level, 0);
5048 skd_dbg_level = 0;
5049 }
5050
5051 if (skd_isr_comp_limit < 0) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005052 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005053 skd_isr_comp_limit, 0);
5054 skd_isr_comp_limit = 0;
5055 }
5056
5057 if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005058 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005059 skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5060 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5061 }
5062
Bartlomiej Zolnierkiewiczb8df6642013-11-05 12:37:02 +01005063 return pci_register_driver(&skd_driver);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005064}
5065
5066static void __exit skd_exit(void)
5067{
5068 pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5069
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005070 pci_unregister_driver(&skd_driver);
Bartlomiej Zolnierkiewiczb8df6642013-11-05 12:37:02 +01005071
5072 if (skd_major)
5073 unregister_blkdev(skd_major, DRV_NAME);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005074}
5075
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005076module_init(skd_init);
5077module_exit(skd_exit);