blob: 9d56b8c797c4f5cd04d011a39bc03e696171d8e8 [file] [log] [blame]
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef _CXLFLASH_COMMON_H
16#define _CXLFLASH_COMMON_H
17
18#include <linux/list.h>
Matthew R. Ochs0a27ae52015-10-21 15:11:52 -050019#include <linux/rwsem.h>
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050020#include <linux/types.h>
21#include <scsi/scsi.h>
Matthew R. Ochs5fbb96c2016-11-28 18:42:19 -060022#include <scsi/scsi_cmnd.h>
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050023#include <scsi/scsi_device.h>
24
Matthew R. Ochs17ead262015-10-21 15:15:37 -050025extern const struct file_operations cxlflash_cxl_fops;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050026
27#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
28
29#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */
30#define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */
31#define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants
32 max_sectors
33 in units of
34 512 byte
35 sectors
36 */
37
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050038#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
39
40/* AFU command retry limit */
41#define MC_RETRY_CNT 5 /* sufficient for SCSI check and
42 certain AFU errors */
43
44/* Command management definitions */
45#define CXLFLASH_NUM_CMDS (2 * CXLFLASH_MAX_CMDS) /* Must be a pow2 for
46 alignment and more
47 efficient array
48 index derivation
49 */
50
Manoj N. Kumar83430832016-03-04 15:55:20 -060051#define CXLFLASH_MAX_CMDS 256
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050052#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
53
Manoj N. Kumar83430832016-03-04 15:55:20 -060054/* RRQ for master issued cmds */
55#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS
56
Matthew R. Ochs696d0b02017-01-11 19:19:33 -060057/* SQ for master issued cmds */
58#define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS
59
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050060
61static inline void check_sizes(void)
62{
63 BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_CMDS);
64}
65
66/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
67#define CMD_BUFSIZE SIZE_4K
68
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050069enum cxlflash_lr_state {
70 LINK_RESET_INVALID,
71 LINK_RESET_REQUIRED,
72 LINK_RESET_COMPLETE
73};
74
75enum cxlflash_init_state {
76 INIT_STATE_NONE,
77 INIT_STATE_PCI,
78 INIT_STATE_AFU,
79 INIT_STATE_SCSI
80};
81
Matthew R. Ochs5cdac812015-08-13 21:47:34 -050082enum cxlflash_state {
83 STATE_NORMAL, /* Normal running state, everything good */
Matthew R. Ochs439e85c2015-10-21 15:12:00 -050084 STATE_RESET, /* Reset state, trying to reset/recover */
Matthew R. Ochs5cdac812015-08-13 21:47:34 -050085 STATE_FAILTERM /* Failed/terminating state, error out users/threads */
86};
87
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050088/*
89 * Each context has its own set of resource handles that is visible
90 * only from that context.
91 */
92
93struct cxlflash_cfg {
94 struct afu *afu;
95 struct cxl_context *mcctx;
96
97 struct pci_dev *dev;
98 struct pci_device_id *dev_id;
99 struct Scsi_Host *host;
100
101 ulong cxlflash_regs_pci;
102
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500103 struct work_struct work_q;
104 enum cxlflash_init_state init_state;
105 enum cxlflash_lr_state lr_state;
106 int lr_port;
Matthew R. Ochsef510742015-10-21 15:13:37 -0500107 atomic_t scan_host_needed;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500108
109 struct cxl_afu *cxl_afu;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500110
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500111 atomic_t recovery_threads;
112 struct mutex ctx_recovery_mutex;
113 struct mutex ctx_tbl_list_mutex;
Matthew R. Ochs0a27ae52015-10-21 15:11:52 -0500114 struct rw_semaphore ioctl_rwsem;
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500115 struct ctx_info *ctx_tbl[MAX_CONTEXT];
116 struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
117 struct file_operations cxl_fops;
118
Matthew R. Ochs2cb79262015-08-13 21:47:53 -0500119 /* Parameters that are LUN table related */
120 int last_lun_index[CXLFLASH_NUM_FC_PORTS];
121 int promote_lun_index;
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500122 struct list_head lluns; /* list of llun_info structs */
123
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500124 wait_queue_head_t tmf_waitq;
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500125 spinlock_t tmf_slock;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500126 bool tmf_active;
Matthew R. Ochs439e85c2015-10-21 15:12:00 -0500127 wait_queue_head_t reset_waitq;
Matthew R. Ochs5cdac812015-08-13 21:47:34 -0500128 enum cxlflash_state state;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500129};
130
131struct afu_cmd {
132 struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
133 struct sisl_ioasa sa; /* IOASA must follow IOARCB */
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500134 struct afu *parent;
Matthew R. Ochsfe7f9692016-11-28 18:43:18 -0600135 struct scsi_cmnd *scp;
Matthew R. Ochs9ba848a2016-11-28 18:42:42 -0600136 struct completion cevent;
Matthew R. Ochsf918b4a2017-04-12 14:12:55 -0500137 struct list_head queue;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500138
139 u8 cmd_tmf:1;
140
141 /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
142 * However for performance reasons the IOARCB/IOASA should be
143 * cache line aligned.
144 */
145} __aligned(cache_line_size());
146
Matthew R. Ochs5fbb96c2016-11-28 18:42:19 -0600147static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
148{
149 return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
150}
151
152static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
153{
154 struct afu_cmd *afuc = sc_to_afuc(sc);
155
156 memset(afuc, 0, sizeof(*afuc));
157 return afuc;
158}
159
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500160struct afu {
161 /* Stuff requiring alignment go first. */
Matthew R. Ochs696d0b02017-01-11 19:19:33 -0600162 struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */
163 u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500164
165 /* Beware of alignment till here. Preferably introduce new
166 * fields after this point
167 */
168
Matthew R. Ochs48b4be32016-11-28 18:43:09 -0600169 int (*send_cmd)(struct afu *, struct afu_cmd *);
170 void (*context_reset)(struct afu_cmd *);
171
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500172 /* AFU HW */
173 struct cxl_ioctl_start_work work;
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -0500174 struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
175 struct sisl_host_map __iomem *host_map; /* MC host map */
176 struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500177
178 ctx_hndl_t ctx_hndl; /* master's context handle */
Matthew R. Ochs696d0b02017-01-11 19:19:33 -0600179
180 atomic_t hsq_credits;
181 spinlock_t hsq_slock;
182 struct sisl_ioarcb *hsq_start;
183 struct sisl_ioarcb *hsq_end;
184 struct sisl_ioarcb *hsq_curr;
Matthew R. Ochsf918b4a2017-04-12 14:12:55 -0500185 spinlock_t hrrq_slock;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500186 u64 *hrrq_start;
187 u64 *hrrq_end;
188 u64 *hrrq_curr;
189 bool toggle;
Matthew R. Ochsde012832016-11-28 18:42:33 -0600190 atomic_t cmds_active; /* Number of currently active AFU commands */
Uma Krishnan11f7b182016-11-28 18:41:45 -0600191 s64 room;
192 spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500193 u64 hb;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500194 u32 internal_lun; /* User-desired LUN mode for this AFU */
195
Matthew R. Ochse5ce0672015-10-21 15:14:01 -0500196 char version[16];
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500197 u64 interface_version;
198
199 struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
200
201};
202
Matthew R. Ochs696d0b02017-01-11 19:19:33 -0600203static inline bool afu_is_cmd_mode(struct afu *afu, u64 cmd_mode)
204{
205 u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
206
207 return afu_cap & cmd_mode;
208}
209
210static inline bool afu_is_sq_cmd_mode(struct afu *afu)
211{
212 return afu_is_cmd_mode(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
213}
214
215static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu)
216{
217 return afu_is_cmd_mode(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
218}
219
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500220static inline u64 lun_to_lunid(u64 lun)
221{
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -0500222 __be64 lun_id;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500223
224 int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -0500225 return be64_to_cpu(lun_id);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500226}
227
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500228int cxlflash_afu_sync(struct afu *, ctx_hndl_t, res_hndl_t, u8);
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500229void cxlflash_list_init(void);
230void cxlflash_term_global_luns(void);
231void cxlflash_free_errpage(void);
232int cxlflash_ioctl(struct scsi_device *, int, void __user *);
233void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *);
234int cxlflash_mark_contexts_error(struct cxlflash_cfg *);
235void cxlflash_term_local_luns(struct cxlflash_cfg *);
Matthew R. Ochs2cb79262015-08-13 21:47:53 -0500236void cxlflash_restore_luntable(struct cxlflash_cfg *);
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500237
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500238#endif /* ifndef _CXLFLASH_COMMON_H */