blob: e6a7c975c0dcad03c00732933718dd150962d0ee [file] [log] [blame]
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef _CXLFLASH_COMMON_H
16#define _CXLFLASH_COMMON_H
17
Matthew R. Ochscba06e62017-04-12 14:13:20 -050018#include <linux/irq_poll.h>
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050019#include <linux/list.h>
Matthew R. Ochs0a27ae52015-10-21 15:11:52 -050020#include <linux/rwsem.h>
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050021#include <linux/types.h>
22#include <scsi/scsi.h>
Matthew R. Ochs5fbb96c2016-11-28 18:42:19 -060023#include <scsi/scsi_cmnd.h>
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050024#include <scsi/scsi_device.h>
25
Matthew R. Ochs17ead262015-10-21 15:15:37 -050026extern const struct file_operations cxlflash_cxl_fops;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050027
Matthew R. Ochs78ae0282017-04-12 14:13:50 -050028#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
29#define NUM_FC_PORTS CXLFLASH_NUM_FC_PORTS /* ports per AFU */
30#define MAX_FC_PORTS CXLFLASH_MAX_FC_PORTS /* ports per AFU */
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050031
Matthew R. Ochs8fa4f172017-04-12 14:14:05 -050032#define CHAN2PORTMASK(_x) (1 << (_x)) /* channel to port mask */
33#define PORTMASK2CHAN(_x) (ilog2((_x))) /* port mask to channel */
34#define PORTNUM2CHAN(_x) ((_x) - 1) /* port number to channel */
35
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050036#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */
37#define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */
38#define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants
39 max_sectors
40 in units of
41 512 byte
42 sectors
43 */
44
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050045#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
46
47/* AFU command retry limit */
48#define MC_RETRY_CNT 5 /* sufficient for SCSI check and
49 certain AFU errors */
50
51/* Command management definitions */
52#define CXLFLASH_NUM_CMDS (2 * CXLFLASH_MAX_CMDS) /* Must be a pow2 for
53 alignment and more
54 efficient array
55 index derivation
56 */
57
Manoj N. Kumar83430832016-03-04 15:55:20 -060058#define CXLFLASH_MAX_CMDS 256
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050059#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
60
Manoj N. Kumar83430832016-03-04 15:55:20 -060061/* RRQ for master issued cmds */
62#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS
63
Matthew R. Ochs696d0b02017-01-11 19:19:33 -060064/* SQ for master issued cmds */
65#define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS
66
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050067
68static inline void check_sizes(void)
69{
70 BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_CMDS);
71}
72
73/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
74#define CMD_BUFSIZE SIZE_4K
75
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050076enum cxlflash_lr_state {
77 LINK_RESET_INVALID,
78 LINK_RESET_REQUIRED,
79 LINK_RESET_COMPLETE
80};
81
82enum cxlflash_init_state {
83 INIT_STATE_NONE,
84 INIT_STATE_PCI,
85 INIT_STATE_AFU,
86 INIT_STATE_SCSI
87};
88
Matthew R. Ochs5cdac812015-08-13 21:47:34 -050089enum cxlflash_state {
90 STATE_NORMAL, /* Normal running state, everything good */
Matthew R. Ochs439e85c2015-10-21 15:12:00 -050091 STATE_RESET, /* Reset state, trying to reset/recover */
Matthew R. Ochs5cdac812015-08-13 21:47:34 -050092 STATE_FAILTERM /* Failed/terminating state, error out users/threads */
93};
94
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050095/*
96 * Each context has its own set of resource handles that is visible
97 * only from that context.
98 */
99
100struct cxlflash_cfg {
101 struct afu *afu;
102 struct cxl_context *mcctx;
103
104 struct pci_dev *dev;
105 struct pci_device_id *dev_id;
106 struct Scsi_Host *host;
Matthew R. Ochs78ae0282017-04-12 14:13:50 -0500107 int num_fc_ports;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500108
109 ulong cxlflash_regs_pci;
110
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500111 struct work_struct work_q;
112 enum cxlflash_init_state init_state;
113 enum cxlflash_lr_state lr_state;
114 int lr_port;
Matthew R. Ochsef510742015-10-21 15:13:37 -0500115 atomic_t scan_host_needed;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500116
117 struct cxl_afu *cxl_afu;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500118
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500119 atomic_t recovery_threads;
120 struct mutex ctx_recovery_mutex;
121 struct mutex ctx_tbl_list_mutex;
Matthew R. Ochs0a27ae52015-10-21 15:11:52 -0500122 struct rw_semaphore ioctl_rwsem;
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500123 struct ctx_info *ctx_tbl[MAX_CONTEXT];
124 struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
125 struct file_operations cxl_fops;
126
Matthew R. Ochs2cb79262015-08-13 21:47:53 -0500127 /* Parameters that are LUN table related */
Matthew R. Ochs78ae0282017-04-12 14:13:50 -0500128 int last_lun_index[MAX_FC_PORTS];
Matthew R. Ochs2cb79262015-08-13 21:47:53 -0500129 int promote_lun_index;
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500130 struct list_head lluns; /* list of llun_info structs */
131
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500132 wait_queue_head_t tmf_waitq;
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500133 spinlock_t tmf_slock;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500134 bool tmf_active;
Matthew R. Ochs439e85c2015-10-21 15:12:00 -0500135 wait_queue_head_t reset_waitq;
Matthew R. Ochs5cdac812015-08-13 21:47:34 -0500136 enum cxlflash_state state;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500137};
138
139struct afu_cmd {
140 struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
141 struct sisl_ioasa sa; /* IOASA must follow IOARCB */
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500142 struct afu *parent;
Matthew R. Ochsfe7f9692016-11-28 18:43:18 -0600143 struct scsi_cmnd *scp;
Matthew R. Ochs9ba848a2016-11-28 18:42:42 -0600144 struct completion cevent;
Matthew R. Ochsf918b4a2017-04-12 14:12:55 -0500145 struct list_head queue;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500146
147 u8 cmd_tmf:1;
148
149 /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
150 * However for performance reasons the IOARCB/IOASA should be
151 * cache line aligned.
152 */
153} __aligned(cache_line_size());
154
Matthew R. Ochs5fbb96c2016-11-28 18:42:19 -0600155static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
156{
157 return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
158}
159
160static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
161{
162 struct afu_cmd *afuc = sc_to_afuc(sc);
163
164 memset(afuc, 0, sizeof(*afuc));
165 return afuc;
166}
167
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500168struct afu {
169 /* Stuff requiring alignment go first. */
Matthew R. Ochs696d0b02017-01-11 19:19:33 -0600170 struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */
171 u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500172
173 /* Beware of alignment till here. Preferably introduce new
174 * fields after this point
175 */
176
Matthew R. Ochs48b4be32016-11-28 18:43:09 -0600177 int (*send_cmd)(struct afu *, struct afu_cmd *);
178 void (*context_reset)(struct afu_cmd *);
179
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500180 /* AFU HW */
181 struct cxl_ioctl_start_work work;
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -0500182 struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
183 struct sisl_host_map __iomem *host_map; /* MC host map */
184 struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500185
186 ctx_hndl_t ctx_hndl; /* master's context handle */
Matthew R. Ochs696d0b02017-01-11 19:19:33 -0600187
188 atomic_t hsq_credits;
189 spinlock_t hsq_slock;
190 struct sisl_ioarcb *hsq_start;
191 struct sisl_ioarcb *hsq_end;
192 struct sisl_ioarcb *hsq_curr;
Matthew R. Ochsf918b4a2017-04-12 14:12:55 -0500193 spinlock_t hrrq_slock;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500194 u64 *hrrq_start;
195 u64 *hrrq_end;
196 u64 *hrrq_curr;
197 bool toggle;
Matthew R. Ochsde012832016-11-28 18:42:33 -0600198 atomic_t cmds_active; /* Number of currently active AFU commands */
Uma Krishnan11f7b182016-11-28 18:41:45 -0600199 s64 room;
200 spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500201 u64 hb;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500202 u32 internal_lun; /* User-desired LUN mode for this AFU */
203
Matthew R. Ochse5ce0672015-10-21 15:14:01 -0500204 char version[16];
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500205 u64 interface_version;
206
Matthew R. Ochscba06e62017-04-12 14:13:20 -0500207 u32 irqpoll_weight;
208 struct irq_poll irqpoll;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500209 struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
210
211};
212
Matthew R. Ochscba06e62017-04-12 14:13:20 -0500213static inline bool afu_is_irqpoll_enabled(struct afu *afu)
214{
215 return !!afu->irqpoll_weight;
216}
217
Matthew R. Ochs696d0b02017-01-11 19:19:33 -0600218static inline bool afu_is_cmd_mode(struct afu *afu, u64 cmd_mode)
219{
220 u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
221
222 return afu_cap & cmd_mode;
223}
224
225static inline bool afu_is_sq_cmd_mode(struct afu *afu)
226{
227 return afu_is_cmd_mode(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
228}
229
230static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu)
231{
232 return afu_is_cmd_mode(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
233}
234
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500235static inline u64 lun_to_lunid(u64 lun)
236{
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -0500237 __be64 lun_id;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500238
239 int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -0500240 return be64_to_cpu(lun_id);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500241}
242
Matthew R. Ochs0aa148872017-04-12 14:14:17 -0500243static inline __be64 __iomem *get_fc_port_regs(struct cxlflash_cfg *cfg, int i)
244{
245 struct afu *afu = cfg->afu;
246
247 return &afu->afu_map->global.fc_regs[i][0];
248}
249
250static inline __be64 __iomem *get_fc_port_luns(struct cxlflash_cfg *cfg, int i)
251{
252 struct afu *afu = cfg->afu;
253
254 return &afu->afu_map->global.fc_port[i][0];
255}
256
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500257int cxlflash_afu_sync(struct afu *, ctx_hndl_t, res_hndl_t, u8);
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500258void cxlflash_list_init(void);
259void cxlflash_term_global_luns(void);
260void cxlflash_free_errpage(void);
261int cxlflash_ioctl(struct scsi_device *, int, void __user *);
262void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *);
263int cxlflash_mark_contexts_error(struct cxlflash_cfg *);
264void cxlflash_term_local_luns(struct cxlflash_cfg *);
Matthew R. Ochs2cb79262015-08-13 21:47:53 -0500265void cxlflash_restore_luntable(struct cxlflash_cfg *);
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500266
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500267#endif /* ifndef _CXLFLASH_COMMON_H */