blob: 14fb9b4c970d5aab8ca9922a0371c3486337404b [file] [log] [blame]
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/delay.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19
20#include <asm/unaligned.h>
21
22#include <misc/cxl.h>
23
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_host.h>
Matthew R. Ochs65be2c72015-08-13 21:47:43 -050026#include <uapi/scsi/cxlflash_ioctl.h>
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050027
28#include "main.h"
29#include "sislite.h"
30#include "common.h"
31
32MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35MODULE_LICENSE("GPL");
36
37
38/**
Matthew R. Ochs15305512015-10-21 15:12:10 -050039 * cmd_checkout() - checks out an AFU command
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050040 * @afu: AFU to checkout from.
41 *
42 * Commands are checked out in a round-robin fashion. Note that since
43 * the command pool is larger than the hardware queue, the majority of
44 * times we will only loop once or twice before getting a command. The
45 * buffer and CDB within the command are initialized (zeroed) prior to
46 * returning.
47 *
48 * Return: The checked out command or NULL when command pool is empty.
49 */
Matthew R. Ochs15305512015-10-21 15:12:10 -050050static struct afu_cmd *cmd_checkout(struct afu *afu)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050051{
52 int k, dec = CXLFLASH_NUM_CMDS;
53 struct afu_cmd *cmd;
54
55 while (dec--) {
56 k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
57
58 cmd = &afu->cmd[k];
59
60 if (!atomic_dec_if_positive(&cmd->free)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -050061 pr_devel("%s: returning found index=%d cmd=%p\n",
62 __func__, cmd->slot, cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050063 memset(cmd->buf, 0, CMD_BUFSIZE);
64 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
65 return cmd;
66 }
67 }
68
69 return NULL;
70}
71
72/**
Matthew R. Ochs15305512015-10-21 15:12:10 -050073 * cmd_checkin() - checks in an AFU command
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050074 * @cmd: AFU command to checkin.
75 *
76 * Safe to pass commands that have already been checked in. Several
77 * internal tracking fields are reset as part of the checkin. Note
78 * that these are intentionally reset prior to toggling the free bit
79 * to avoid clobbering values in the event that the command is checked
80 * out right away.
81 */
Matthew R. Ochs15305512015-10-21 15:12:10 -050082static void cmd_checkin(struct afu_cmd *cmd)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050083{
84 cmd->rcb.scp = NULL;
85 cmd->rcb.timeout = 0;
86 cmd->sa.ioasc = 0;
87 cmd->cmd_tmf = false;
88 cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
89
90 if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
91 pr_err("%s: Freeing cmd (%d) that is not in use!\n",
92 __func__, cmd->slot);
93 return;
94 }
95
Matthew R. Ochs4392ba42015-10-21 15:13:11 -050096 pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050097}
98
99/**
100 * process_cmd_err() - command error handler
101 * @cmd: AFU command that experienced the error.
102 * @scp: SCSI command associated with the AFU command in error.
103 *
104 * Translates error bits from AFU command to SCSI command results.
105 */
106static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
107{
108 struct sisl_ioarcb *ioarcb;
109 struct sisl_ioasa *ioasa;
Matthew R. Ochs83960122015-10-21 15:13:29 -0500110 u32 resid;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500111
112 if (unlikely(!cmd))
113 return;
114
115 ioarcb = &(cmd->rcb);
116 ioasa = &(cmd->sa);
117
118 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
Matthew R. Ochs83960122015-10-21 15:13:29 -0500119 resid = ioasa->resid;
120 scsi_set_resid(scp, resid);
121 pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
122 __func__, cmd, scp, resid);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500123 }
124
125 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
126 pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
127 __func__, cmd, scp);
128 scp->result = (DID_ERROR << 16);
129 }
130
131 pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500132 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500133 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
134 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
135 ioasa->fc_extra);
136
137 if (ioasa->rc.scsi_rc) {
138 /* We have a SCSI status */
139 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
140 memcpy(scp->sense_buffer, ioasa->sense_data,
141 SISL_SENSE_DATA_LEN);
142 scp->result = ioasa->rc.scsi_rc;
143 } else
144 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
145 }
146
147 /*
148 * We encountered an error. Set scp->result based on nature
149 * of error.
150 */
151 if (ioasa->rc.fc_rc) {
152 /* We have an FC status */
153 switch (ioasa->rc.fc_rc) {
154 case SISL_FC_RC_LINKDOWN:
155 scp->result = (DID_REQUEUE << 16);
156 break;
157 case SISL_FC_RC_RESID:
158 /* This indicates an FCP resid underrun */
159 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
160 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
161 * then we will handle this error else where.
162 * If not then we must handle it here.
Matthew R. Ochs83960122015-10-21 15:13:29 -0500163 * This is probably an AFU bug.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500164 */
165 scp->result = (DID_ERROR << 16);
166 }
167 break;
168 case SISL_FC_RC_RESIDERR:
169 /* Resid mismatch between adapter and device */
170 case SISL_FC_RC_TGTABORT:
171 case SISL_FC_RC_ABORTOK:
172 case SISL_FC_RC_ABORTFAIL:
173 case SISL_FC_RC_NOLOGI:
174 case SISL_FC_RC_ABORTPEND:
175 case SISL_FC_RC_WRABORTPEND:
176 case SISL_FC_RC_NOEXP:
177 case SISL_FC_RC_INUSE:
178 scp->result = (DID_ERROR << 16);
179 break;
180 }
181 }
182
183 if (ioasa->rc.afu_rc) {
184 /* We have an AFU error */
185 switch (ioasa->rc.afu_rc) {
186 case SISL_AFU_RC_NO_CHANNELS:
Matthew R. Ochs83960122015-10-21 15:13:29 -0500187 scp->result = (DID_NO_CONNECT << 16);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500188 break;
189 case SISL_AFU_RC_DATA_DMA_ERR:
190 switch (ioasa->afu_extra) {
191 case SISL_AFU_DMA_ERR_PAGE_IN:
192 /* Retry */
193 scp->result = (DID_IMM_RETRY << 16);
194 break;
195 case SISL_AFU_DMA_ERR_INVALID_EA:
196 default:
197 scp->result = (DID_ERROR << 16);
198 }
199 break;
200 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
201 /* Retry */
202 scp->result = (DID_ALLOC_FAILURE << 16);
203 break;
204 default:
205 scp->result = (DID_ERROR << 16);
206 }
207 }
208}
209
210/**
211 * cmd_complete() - command completion handler
212 * @cmd: AFU command that has completed.
213 *
214 * Prepares and submits command that has either completed or timed out to
215 * the SCSI stack. Checks AFU command back into command pool for non-internal
216 * (rcb.scp populated) commands.
217 */
218static void cmd_complete(struct afu_cmd *cmd)
219{
220 struct scsi_cmnd *scp;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500221 ulong lock_flags;
222 struct afu *afu = cmd->parent;
223 struct cxlflash_cfg *cfg = afu->parent;
224 bool cmd_is_tmf;
225
226 spin_lock_irqsave(&cmd->slock, lock_flags);
227 cmd->sa.host_use_b[0] |= B_DONE;
228 spin_unlock_irqrestore(&cmd->slock, lock_flags);
229
230 if (cmd->rcb.scp) {
231 scp = cmd->rcb.scp;
Matthew R. Ochs83960122015-10-21 15:13:29 -0500232 if (unlikely(cmd->sa.ioasc))
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500233 process_cmd_err(cmd, scp);
234 else
235 scp->result = (DID_OK << 16);
236
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500237 cmd_is_tmf = cmd->cmd_tmf;
Matthew R. Ochs15305512015-10-21 15:12:10 -0500238 cmd_checkin(cmd); /* Don't use cmd after here */
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500239
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500240 pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
241 "ioasc=%d\n", __func__, scp, scp->result,
242 cmd->sa.ioasc);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500243
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500244 scsi_dma_unmap(scp);
245 scp->scsi_done(scp);
246
247 if (cmd_is_tmf) {
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500248 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500249 cfg->tmf_active = false;
250 wake_up_all_locked(&cfg->tmf_waitq);
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500251 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500252 }
253 } else
254 complete(&cmd->cevent);
255}
256
257/**
Matthew R. Ochs15305512015-10-21 15:12:10 -0500258 * context_reset() - timeout handler for AFU commands
259 * @cmd: AFU command that timed out.
260 *
261 * Sends a reset to the AFU.
262 */
263static void context_reset(struct afu_cmd *cmd)
264{
265 int nretry = 0;
266 u64 rrin = 0x1;
267 u64 room = 0;
268 struct afu *afu = cmd->parent;
269 ulong lock_flags;
270
271 pr_debug("%s: cmd=%p\n", __func__, cmd);
272
273 spin_lock_irqsave(&cmd->slock, lock_flags);
274
275 /* Already completed? */
276 if (cmd->sa.host_use_b[0] & B_DONE) {
277 spin_unlock_irqrestore(&cmd->slock, lock_flags);
278 return;
279 }
280
281 cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
282 spin_unlock_irqrestore(&cmd->slock, lock_flags);
283
284 /*
285 * We really want to send this reset at all costs, so spread
286 * out wait time on successive retries for available room.
287 */
288 do {
289 room = readq_be(&afu->host_map->cmd_room);
290 atomic64_set(&afu->room, room);
291 if (room)
292 goto write_rrin;
293 udelay(nretry);
294 } while (nretry++ < MC_ROOM_RETRY_CNT);
295
296 pr_err("%s: no cmd_room to send reset\n", __func__);
297 return;
298
299write_rrin:
300 nretry = 0;
301 writeq_be(rrin, &afu->host_map->ioarrin);
302 do {
303 rrin = readq_be(&afu->host_map->ioarrin);
304 if (rrin != 0x1)
305 break;
306 /* Double delay each time */
307 udelay(2 ^ nretry);
308 } while (nretry++ < MC_ROOM_RETRY_CNT);
309}
310
311/**
312 * send_cmd() - sends an AFU command
313 * @afu: AFU associated with the host.
314 * @cmd: AFU command to send.
315 *
316 * Return:
Matthew R. Ochs1284fb02015-10-21 15:14:40 -0500317 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
Matthew R. Ochs15305512015-10-21 15:12:10 -0500318 */
319static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
320{
321 struct cxlflash_cfg *cfg = afu->parent;
322 struct device *dev = &cfg->dev->dev;
323 int nretry = 0;
324 int rc = 0;
325 u64 room;
326 long newval;
327
328 /*
329 * This routine is used by critical users such an AFU sync and to
330 * send a task management function (TMF). Thus we want to retry a
331 * bit before returning an error. To avoid the performance penalty
332 * of MMIO, we spread the update of 'room' over multiple commands.
333 */
334retry:
335 newval = atomic64_dec_if_positive(&afu->room);
336 if (!newval) {
337 do {
338 room = readq_be(&afu->host_map->cmd_room);
339 atomic64_set(&afu->room, room);
340 if (room)
341 goto write_ioarrin;
342 udelay(nretry);
343 } while (nretry++ < MC_ROOM_RETRY_CNT);
344
345 dev_err(dev, "%s: no cmd_room to send 0x%X\n",
346 __func__, cmd->rcb.cdb[0]);
347
348 goto no_room;
349 } else if (unlikely(newval < 0)) {
350 /* This should be rare. i.e. Only if two threads race and
351 * decrement before the MMIO read is done. In this case
352 * just benefit from the other thread having updated
353 * afu->room.
354 */
355 if (nretry++ < MC_ROOM_RETRY_CNT) {
356 udelay(nretry);
357 goto retry;
358 }
359
360 goto no_room;
361 }
362
363write_ioarrin:
364 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
365out:
366 pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
367 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
368 return rc;
369
370no_room:
371 afu->read_room = true;
372 schedule_work(&cfg->work_q);
373 rc = SCSI_MLQUEUE_HOST_BUSY;
374 goto out;
375}
376
377/**
378 * wait_resp() - polls for a response or timeout to a sent AFU command
379 * @afu: AFU associated with the host.
380 * @cmd: AFU command that was sent.
381 */
382static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
383{
384 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
385
386 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
387 if (!timeout)
388 context_reset(cmd);
389
390 if (unlikely(cmd->sa.ioasc != 0))
391 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
392 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
393 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
394 cmd->sa.rc.fc_rc);
395}
396
397/**
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500398 * send_tmf() - sends a Task Management Function (TMF)
399 * @afu: AFU to checkout from.
400 * @scp: SCSI command from stack.
401 * @tmfcmd: TMF command to send.
402 *
403 * Return:
Matthew R. Ochs1284fb02015-10-21 15:14:40 -0500404 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500405 */
406static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
407{
408 struct afu_cmd *cmd;
409
410 u32 port_sel = scp->device->channel + 1;
411 short lflag = 0;
412 struct Scsi_Host *host = scp->device->host;
413 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500414 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500415 ulong lock_flags;
416 int rc = 0;
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500417 ulong to;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500418
Matthew R. Ochs15305512015-10-21 15:12:10 -0500419 cmd = cmd_checkout(afu);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500420 if (unlikely(!cmd)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500421 dev_err(dev, "%s: could not get a free command\n", __func__);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500422 rc = SCSI_MLQUEUE_HOST_BUSY;
423 goto out;
424 }
425
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500426 /* When Task Management Function is active do not send another */
427 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500428 if (cfg->tmf_active)
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500429 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
430 !cfg->tmf_active,
431 cfg->tmf_slock);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500432 cfg->tmf_active = true;
433 cmd->cmd_tmf = true;
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500434 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500435
436 cmd->rcb.ctx_id = afu->ctx_hndl;
437 cmd->rcb.port_sel = port_sel;
438 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
439
440 lflag = SISL_REQ_FLAGS_TMF_CMD;
441
442 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
443 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
444
445 /* Stash the scp in the reserved field, for reuse during interrupt */
446 cmd->rcb.scp = scp;
447
448 /* Copy the CDB from the cmd passed in */
449 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
450
451 /* Send the command */
Matthew R. Ochs15305512015-10-21 15:12:10 -0500452 rc = send_cmd(afu, cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500453 if (unlikely(rc)) {
Matthew R. Ochs15305512015-10-21 15:12:10 -0500454 cmd_checkin(cmd);
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500455 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500456 cfg->tmf_active = false;
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500457 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500458 goto out;
459 }
460
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500461 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
462 to = msecs_to_jiffies(5000);
463 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
464 !cfg->tmf_active,
465 cfg->tmf_slock,
466 to);
467 if (!to) {
468 cfg->tmf_active = false;
469 dev_err(dev, "%s: TMF timed out!\n", __func__);
470 rc = -1;
471 }
472 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500473out:
474 return rc;
475}
476
477/**
478 * cxlflash_driver_info() - information handler for this host driver
479 * @host: SCSI host associated with device.
480 *
481 * Return: A string describing the device.
482 */
483static const char *cxlflash_driver_info(struct Scsi_Host *host)
484{
485 return CXLFLASH_ADAPTER_NAME;
486}
487
488/**
489 * cxlflash_queuecommand() - sends a mid-layer request
490 * @host: SCSI host associated with device.
491 * @scp: SCSI command to send.
492 *
Matthew R. Ochs1284fb02015-10-21 15:14:40 -0500493 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500494 */
495static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
496{
497 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
498 struct afu *afu = cfg->afu;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500499 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500500 struct afu_cmd *cmd;
501 u32 port_sel = scp->device->channel + 1;
502 int nseg, i, ncount;
503 struct scatterlist *sg;
504 ulong lock_flags;
505 short lflag = 0;
506 int rc = 0;
507
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500508 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
509 "cdb=(%08X-%08X-%08X-%08X)\n",
510 __func__, scp, host->host_no, scp->device->channel,
511 scp->device->id, scp->device->lun,
512 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
513 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
514 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
515 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500516
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500517 /*
518 * If a Task Management Function is active, wait for it to complete
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500519 * before continuing with regular commands.
520 */
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500521 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500522 if (cfg->tmf_active) {
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500523 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500524 rc = SCSI_MLQUEUE_HOST_BUSY;
525 goto out;
526 }
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500527 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500528
Matthew R. Ochs5cdac812015-08-13 21:47:34 -0500529 switch (cfg->state) {
Matthew R. Ochs439e85c2015-10-21 15:12:00 -0500530 case STATE_RESET:
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500531 dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -0500532 rc = SCSI_MLQUEUE_HOST_BUSY;
533 goto out;
534 case STATE_FAILTERM:
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500535 dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -0500536 scp->result = (DID_NO_CONNECT << 16);
537 scp->scsi_done(scp);
538 rc = 0;
539 goto out;
540 default:
541 break;
542 }
543
Matthew R. Ochs15305512015-10-21 15:12:10 -0500544 cmd = cmd_checkout(afu);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500545 if (unlikely(!cmd)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500546 dev_err(dev, "%s: could not get a free command\n", __func__);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500547 rc = SCSI_MLQUEUE_HOST_BUSY;
548 goto out;
549 }
550
551 cmd->rcb.ctx_id = afu->ctx_hndl;
552 cmd->rcb.port_sel = port_sel;
553 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
554
555 if (scp->sc_data_direction == DMA_TO_DEVICE)
556 lflag = SISL_REQ_FLAGS_HOST_WRITE;
557 else
558 lflag = SISL_REQ_FLAGS_HOST_READ;
559
560 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
561 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
562
563 /* Stash the scp in the reserved field, for reuse during interrupt */
564 cmd->rcb.scp = scp;
565
566 nseg = scsi_dma_map(scp);
567 if (unlikely(nseg < 0)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500568 dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500569 __func__, nseg);
570 rc = SCSI_MLQUEUE_HOST_BUSY;
571 goto out;
572 }
573
574 ncount = scsi_sg_count(scp);
575 scsi_for_each_sg(scp, sg, ncount, i) {
576 cmd->rcb.data_len = sg_dma_len(sg);
577 cmd->rcb.data_ea = sg_dma_address(sg);
578 }
579
580 /* Copy the CDB from the scsi_cmnd passed in */
581 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
582
583 /* Send the command */
Matthew R. Ochs15305512015-10-21 15:12:10 -0500584 rc = send_cmd(afu, cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500585 if (unlikely(rc)) {
Matthew R. Ochs15305512015-10-21 15:12:10 -0500586 cmd_checkin(cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500587 scsi_dma_unmap(scp);
588 }
589
590out:
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500591 pr_devel("%s: returning rc=%d\n", __func__, rc);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500592 return rc;
593}
594
595/**
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500596 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
Matthew R. Ochs1284fb02015-10-21 15:14:40 -0500597 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500598 */
599static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
600{
601 struct pci_dev *pdev = cfg->dev;
602
603 if (pci_channel_offline(pdev))
Matthew R. Ochs439e85c2015-10-21 15:12:00 -0500604 wait_event_timeout(cfg->reset_waitq,
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500605 !pci_channel_offline(pdev),
606 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
607}
608
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500609/**
610 * free_mem() - free memory associated with the AFU
Matthew R. Ochs1284fb02015-10-21 15:14:40 -0500611 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500612 */
613static void free_mem(struct cxlflash_cfg *cfg)
614{
615 int i;
616 char *buf = NULL;
617 struct afu *afu = cfg->afu;
618
619 if (cfg->afu) {
620 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
621 buf = afu->cmd[i].buf;
622 if (!((u64)buf & (PAGE_SIZE - 1)))
623 free_page((ulong)buf);
624 }
625
626 free_pages((ulong)afu, get_order(sizeof(struct afu)));
627 cfg->afu = NULL;
628 }
629}
630
631/**
632 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
Matthew R. Ochs1284fb02015-10-21 15:14:40 -0500633 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500634 *
635 * Safe to call with AFU in a partially allocated/initialized state.
636 */
637static void stop_afu(struct cxlflash_cfg *cfg)
638{
639 int i;
640 struct afu *afu = cfg->afu;
641
642 if (likely(afu)) {
643 for (i = 0; i < CXLFLASH_NUM_CMDS; i++)
644 complete(&afu->cmd[i].cevent);
645
646 if (likely(afu->afu_map)) {
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -0500647 cxl_psa_unmap((void __iomem *)afu->afu_map);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500648 afu->afu_map = NULL;
649 }
650 }
651}
652
653/**
654 * term_mc() - terminates the master context
Matthew R. Ochs1284fb02015-10-21 15:14:40 -0500655 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500656 * @level: Depth of allocation, where to begin waterfall tear down.
657 *
658 * Safe to call with AFU/MC in partially allocated/initialized state.
659 */
660static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
661{
662 int rc = 0;
663 struct afu *afu = cfg->afu;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500664 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500665
666 if (!afu || !cfg->mcctx) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500667 dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500668 __func__);
669 return;
670 }
671
672 switch (level) {
673 case UNDO_START:
674 rc = cxl_stop_context(cfg->mcctx);
675 BUG_ON(rc);
676 case UNMAP_THREE:
677 cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
678 case UNMAP_TWO:
679 cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
680 case UNMAP_ONE:
681 cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
682 case FREE_IRQ:
683 cxl_free_afu_irqs(cfg->mcctx);
684 case RELEASE_CONTEXT:
685 cfg->mcctx = NULL;
686 }
687}
688
689/**
690 * term_afu() - terminates the AFU
Matthew R. Ochs1284fb02015-10-21 15:14:40 -0500691 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500692 *
693 * Safe to call with AFU/MC in partially allocated/initialized state.
694 */
695static void term_afu(struct cxlflash_cfg *cfg)
696{
697 term_mc(cfg, UNDO_START);
698
699 if (cfg->afu)
700 stop_afu(cfg);
701
702 pr_debug("%s: returning\n", __func__);
703}
704
705/**
706 * cxlflash_remove() - PCI entry point to tear down host
707 * @pdev: PCI device associated with the host.
708 *
709 * Safe to use as a cleanup in partially allocated/initialized state.
710 */
711static void cxlflash_remove(struct pci_dev *pdev)
712{
713 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
714 ulong lock_flags;
715
716 /* If a Task Management Function is active, wait for it to complete
717 * before continuing with remove.
718 */
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500719 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500720 if (cfg->tmf_active)
Matthew R. Ochs018d1dc952015-10-21 15:13:21 -0500721 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
722 !cfg->tmf_active,
723 cfg->tmf_slock);
724 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500725
Matthew R. Ochs5cdac812015-08-13 21:47:34 -0500726 cfg->state = STATE_FAILTERM;
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500727 cxlflash_stop_term_user_contexts(cfg);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -0500728
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500729 switch (cfg->init_state) {
730 case INIT_STATE_SCSI:
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500731 cxlflash_term_local_luns(cfg);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500732 scsi_remove_host(cfg->host);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500733 /* Fall through */
734 case INIT_STATE_AFU:
735 term_afu(cfg);
Matthew R. Ochsd8046212015-10-21 15:14:17 -0500736 cancel_work_sync(&cfg->work_q);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500737 case INIT_STATE_PCI:
738 pci_release_regions(cfg->dev);
739 pci_disable_device(pdev);
740 case INIT_STATE_NONE:
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500741 free_mem(cfg);
Matthew R. Ochs8b5b1e82015-10-21 15:14:09 -0500742 scsi_host_put(cfg->host);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500743 break;
744 }
745
746 pr_debug("%s: returning\n", __func__);
747}
748
749/**
750 * alloc_mem() - allocates the AFU and its command pool
Matthew R. Ochs1284fb02015-10-21 15:14:40 -0500751 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500752 *
753 * A partially allocated state remains on failure.
754 *
755 * Return:
756 * 0 on success
757 * -ENOMEM on failure to allocate memory
758 */
759static int alloc_mem(struct cxlflash_cfg *cfg)
760{
761 int rc = 0;
762 int i;
763 char *buf = NULL;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500764 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500765
766 /* This allocation is about 12K, i.e. only 1 64k page
767 * and upto 4 4k pages
768 */
769 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
770 get_order(sizeof(struct afu)));
771 if (unlikely(!cfg->afu)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500772 dev_err(dev, "%s: cannot get %d free pages\n",
773 __func__, get_order(sizeof(struct afu)));
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500774 rc = -ENOMEM;
775 goto out;
776 }
777 cfg->afu->parent = cfg;
778 cfg->afu->afu_map = NULL;
779
780 for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
781 if (!((u64)buf & (PAGE_SIZE - 1))) {
782 buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
783 if (unlikely(!buf)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500784 dev_err(dev,
785 "%s: Allocate command buffers fail!\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500786 __func__);
787 rc = -ENOMEM;
788 free_mem(cfg);
789 goto out;
790 }
791 }
792
793 cfg->afu->cmd[i].buf = buf;
794 atomic_set(&cfg->afu->cmd[i].free, 1);
795 cfg->afu->cmd[i].slot = i;
796 }
797
798out:
799 return rc;
800}
801
802/**
803 * init_pci() - initializes the host as a PCI device
Matthew R. Ochs1284fb02015-10-21 15:14:40 -0500804 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500805 *
Matthew R. Ochs1284fb02015-10-21 15:14:40 -0500806 * Return: 0 on success, -errno on failure
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500807 */
808static int init_pci(struct cxlflash_cfg *cfg)
809{
810 struct pci_dev *pdev = cfg->dev;
811 int rc = 0;
812
813 cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0);
814 rc = pci_request_regions(pdev, CXLFLASH_NAME);
815 if (rc < 0) {
816 dev_err(&pdev->dev,
817 "%s: Couldn't register memory range of registers\n",
818 __func__);
819 goto out;
820 }
821
822 rc = pci_enable_device(pdev);
823 if (rc || pci_channel_offline(pdev)) {
824 if (pci_channel_offline(pdev)) {
825 cxlflash_wait_for_pci_err_recovery(cfg);
826 rc = pci_enable_device(pdev);
827 }
828
829 if (rc) {
830 dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
831 __func__);
832 cxlflash_wait_for_pci_err_recovery(cfg);
833 goto out_release_regions;
834 }
835 }
836
837 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
838 if (rc < 0) {
839 dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n",
840 __func__);
841 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
842 }
843
844 if (rc < 0) {
845 dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n",
846 __func__);
847 goto out_disable;
848 }
849
850 pci_set_master(pdev);
851
852 if (pci_channel_offline(pdev)) {
853 cxlflash_wait_for_pci_err_recovery(cfg);
854 if (pci_channel_offline(pdev)) {
855 rc = -EIO;
856 goto out_msi_disable;
857 }
858 }
859
860 rc = pci_save_state(pdev);
861
862 if (rc != PCIBIOS_SUCCESSFUL) {
863 dev_err(&pdev->dev, "%s: Failed to save PCI config space\n",
864 __func__);
865 rc = -EIO;
866 goto cleanup_nolog;
867 }
868
869out:
870 pr_debug("%s: returning rc=%d\n", __func__, rc);
871 return rc;
872
873cleanup_nolog:
874out_msi_disable:
875 cxlflash_wait_for_pci_err_recovery(cfg);
876out_disable:
877 pci_disable_device(pdev);
878out_release_regions:
879 pci_release_regions(pdev);
880 goto out;
881
882}
883
884/**
885 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
Matthew R. Ochs1284fb02015-10-21 15:14:40 -0500886 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500887 *
Matthew R. Ochs1284fb02015-10-21 15:14:40 -0500888 * Return: 0 on success, -errno on failure
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500889 */
890static int init_scsi(struct cxlflash_cfg *cfg)
891{
892 struct pci_dev *pdev = cfg->dev;
893 int rc = 0;
894
895 rc = scsi_add_host(cfg->host, &pdev->dev);
896 if (rc) {
897 dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
898 __func__, rc);
899 goto out;
900 }
901
902 scsi_scan_host(cfg->host);
903
904out:
905 pr_debug("%s: returning rc=%d\n", __func__, rc);
906 return rc;
907}
908
909/**
910 * set_port_online() - transitions the specified host FC port to online state
911 * @fc_regs: Top of MMIO region defined for specified port.
912 *
913 * The provided MMIO region must be mapped prior to call. Online state means
914 * that the FC link layer has synced, completed the handshaking process, and
915 * is ready for login to start.
916 */
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -0500917static void set_port_online(__be64 __iomem *fc_regs)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500918{
919 u64 cmdcfg;
920
921 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
922 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
923 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
924 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
925}
926
927/**
928 * set_port_offline() - transitions the specified host FC port to offline state
929 * @fc_regs: Top of MMIO region defined for specified port.
930 *
931 * The provided MMIO region must be mapped prior to call.
932 */
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -0500933static void set_port_offline(__be64 __iomem *fc_regs)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500934{
935 u64 cmdcfg;
936
937 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
938 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
939 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
940 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
941}
942
943/**
944 * wait_port_online() - waits for the specified host FC port come online
945 * @fc_regs: Top of MMIO region defined for specified port.
946 * @delay_us: Number of microseconds to delay between reading port status.
947 * @nretry: Number of cycles to retry reading port status.
948 *
949 * The provided MMIO region must be mapped prior to call. This will timeout
950 * when the cable is not plugged in.
951 *
952 * Return:
953 * TRUE (1) when the specified port is online
954 * FALSE (0) when the specified port fails to come online after timeout
955 * -EINVAL when @delay_us is less than 1000
956 */
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -0500957static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500958{
959 u64 status;
960
961 if (delay_us < 1000) {
962 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
963 return -EINVAL;
964 }
965
966 do {
967 msleep(delay_us / 1000);
968 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
969 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
970 nretry--);
971
972 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
973}
974
975/**
976 * wait_port_offline() - waits for the specified host FC port go offline
977 * @fc_regs: Top of MMIO region defined for specified port.
978 * @delay_us: Number of microseconds to delay between reading port status.
979 * @nretry: Number of cycles to retry reading port status.
980 *
981 * The provided MMIO region must be mapped prior to call.
982 *
983 * Return:
984 * TRUE (1) when the specified port is offline
985 * FALSE (0) when the specified port fails to go offline after timeout
986 * -EINVAL when @delay_us is less than 1000
987 */
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -0500988static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500989{
990 u64 status;
991
992 if (delay_us < 1000) {
993 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
994 return -EINVAL;
995 }
996
997 do {
998 msleep(delay_us / 1000);
999 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1000 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1001 nretry--);
1002
1003 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1004}
1005
1006/**
1007 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1008 * @afu: AFU associated with the host that owns the specified FC port.
1009 * @port: Port number being configured.
1010 * @fc_regs: Top of MMIO region defined for specified port.
1011 * @wwpn: The world-wide-port-number previously discovered for port.
1012 *
1013 * The provided MMIO region must be mapped prior to call. As part of the
1014 * sequence to configure the WWPN, the port is toggled offline and then back
1015 * online. This toggling action can cause this routine to delay up to a few
1016 * seconds. When configured to use the internal LUN feature of the AFU, a
1017 * failure to come online is overridden.
1018 *
1019 * Return:
1020 * 0 when the WWPN is successfully written and the port comes back online
1021 * -1 when the port fails to go offline or come back up online
1022 */
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -05001023static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1024 u64 wwpn)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001025{
Matthew R. Ochs964497b2015-10-21 15:13:54 -05001026 int rc = 0;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001027
1028 set_port_offline(fc_regs);
1029
1030 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1031 FC_PORT_STATUS_RETRY_CNT)) {
1032 pr_debug("%s: wait on port %d to go offline timed out\n",
1033 __func__, port);
Matthew R. Ochs964497b2015-10-21 15:13:54 -05001034 rc = -1; /* but continue on to leave the port back online */
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001035 }
1036
Matthew R. Ochs964497b2015-10-21 15:13:54 -05001037 if (rc == 0)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001038 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1039
Matthew R. Ochs964497b2015-10-21 15:13:54 -05001040 /* Always return success after programming WWPN */
1041 rc = 0;
1042
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001043 set_port_online(fc_regs);
1044
1045 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1046 FC_PORT_STATUS_RETRY_CNT)) {
Matthew R. Ochs964497b2015-10-21 15:13:54 -05001047 pr_err("%s: wait on port %d to go online timed out\n",
1048 __func__, port);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001049 }
1050
Matthew R. Ochs964497b2015-10-21 15:13:54 -05001051 pr_debug("%s: returning rc=%d\n", __func__, rc);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001052
Matthew R. Ochs964497b2015-10-21 15:13:54 -05001053 return rc;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001054}
1055
1056/**
1057 * afu_link_reset() - resets the specified host FC port
1058 * @afu: AFU associated with the host that owns the specified FC port.
1059 * @port: Port number being configured.
1060 * @fc_regs: Top of MMIO region defined for specified port.
1061 *
1062 * The provided MMIO region must be mapped prior to call. The sequence to
1063 * reset the port involves toggling it offline and then back online. This
1064 * action can cause this routine to delay up to a few seconds. An effort
1065 * is made to maintain link with the device by switching to host to use
1066 * the alternate port exclusively while the reset takes place.
1067 * failure to come online is overridden.
1068 */
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -05001069static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001070{
1071 u64 port_sel;
1072
1073 /* first switch the AFU to the other links, if any */
1074 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
Dan Carpenter4da74db2015-08-18 11:57:43 +03001075 port_sel &= ~(1ULL << port);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001076 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1077 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1078
1079 set_port_offline(fc_regs);
1080 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1081 FC_PORT_STATUS_RETRY_CNT))
1082 pr_err("%s: wait on port %d to go offline timed out\n",
1083 __func__, port);
1084
1085 set_port_online(fc_regs);
1086 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1087 FC_PORT_STATUS_RETRY_CNT))
1088 pr_err("%s: wait on port %d to go online timed out\n",
1089 __func__, port);
1090
1091 /* switch back to include this port */
Dan Carpenter4da74db2015-08-18 11:57:43 +03001092 port_sel |= (1ULL << port);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001093 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1094 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1095
1096 pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
1097}
1098
1099/*
1100 * Asynchronous interrupt information table
1101 */
1102static const struct asyc_intr_info ainfo[] = {
1103 {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
1104 {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
1105 {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
1106 {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, 0},
1107 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
Matthew R. Ochsef510742015-10-21 15:13:37 -05001108 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001109 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
Matthew R. Ochsef510742015-10-21 15:13:37 -05001110 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST},
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001111 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
1112 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
1113 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
1114 {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
1115 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
Matthew R. Ochsef510742015-10-21 15:13:37 -05001116 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001117 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
Matthew R. Ochsef510742015-10-21 15:13:37 -05001118 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST},
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001119 {0x0, "", 0, 0} /* terminator */
1120};
1121
1122/**
1123 * find_ainfo() - locates and returns asynchronous interrupt information
1124 * @status: Status code set by AFU on error.
1125 *
1126 * Return: The located information or NULL when the status code is invalid.
1127 */
1128static const struct asyc_intr_info *find_ainfo(u64 status)
1129{
1130 const struct asyc_intr_info *info;
1131
1132 for (info = &ainfo[0]; info->status; info++)
1133 if (info->status == status)
1134 return info;
1135
1136 return NULL;
1137}
1138
1139/**
1140 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1141 * @afu: AFU associated with the host.
1142 */
1143static void afu_err_intr_init(struct afu *afu)
1144{
1145 int i;
1146 u64 reg;
1147
1148 /* global async interrupts: AFU clears afu_ctrl on context exit
1149 * if async interrupts were sent to that context. This prevents
1150 * the AFU form sending further async interrupts when
1151 * there is
1152 * nobody to receive them.
1153 */
1154
1155 /* mask all */
1156 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1157 /* set LISN# to send and point to master context */
1158 reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1159
1160 if (afu->internal_lun)
1161 reg |= 1; /* Bit 63 indicates local lun */
1162 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1163 /* clear all */
1164 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1165 /* unmask bits that are of interest */
1166 /* note: afu can send an interrupt after this step */
1167 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1168 /* clear again in case a bit came on after previous clear but before */
1169 /* unmask */
1170 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1171
1172 /* Clear/Set internal lun bits */
1173 reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1174 reg &= SISL_FC_INTERNAL_MASK;
1175 if (afu->internal_lun)
1176 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1177 writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1178
1179 /* now clear FC errors */
1180 for (i = 0; i < NUM_FC_PORTS; i++) {
1181 writeq_be(0xFFFFFFFFU,
1182 &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
1183 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
1184 }
1185
1186 /* sync interrupts for master's IOARRIN write */
1187 /* note that unlike asyncs, there can be no pending sync interrupts */
1188 /* at this time (this is a fresh context and master has not written */
1189 /* IOARRIN yet), so there is nothing to clear. */
1190
1191 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1192 writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1193 writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1194}
1195
1196/**
1197 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1198 * @irq: Interrupt number.
1199 * @data: Private data provided at interrupt registration, the AFU.
1200 *
1201 * Return: Always return IRQ_HANDLED.
1202 */
1203static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1204{
1205 struct afu *afu = (struct afu *)data;
1206 u64 reg;
1207 u64 reg_unmasked;
1208
1209 reg = readq_be(&afu->host_map->intr_status);
1210 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1211
1212 if (reg_unmasked == 0UL) {
1213 pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
1214 __func__, (u64)afu, reg);
1215 goto cxlflash_sync_err_irq_exit;
1216 }
1217
1218 pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
1219 __func__, (u64)afu, reg);
1220
1221 writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1222
1223cxlflash_sync_err_irq_exit:
1224 pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
1225 return IRQ_HANDLED;
1226}
1227
1228/**
1229 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1230 * @irq: Interrupt number.
1231 * @data: Private data provided at interrupt registration, the AFU.
1232 *
1233 * Return: Always return IRQ_HANDLED.
1234 */
1235static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1236{
1237 struct afu *afu = (struct afu *)data;
1238 struct afu_cmd *cmd;
1239 bool toggle = afu->toggle;
1240 u64 entry,
1241 *hrrq_start = afu->hrrq_start,
1242 *hrrq_end = afu->hrrq_end,
1243 *hrrq_curr = afu->hrrq_curr;
1244
1245 /* Process however many RRQ entries that are ready */
1246 while (true) {
1247 entry = *hrrq_curr;
1248
1249 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1250 break;
1251
1252 cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
1253 cmd_complete(cmd);
1254
1255 /* Advance to next entry or wrap and flip the toggle bit */
1256 if (hrrq_curr < hrrq_end)
1257 hrrq_curr++;
1258 else {
1259 hrrq_curr = hrrq_start;
1260 toggle ^= SISL_RESP_HANDLE_T_BIT;
1261 }
1262 }
1263
1264 afu->hrrq_curr = hrrq_curr;
1265 afu->toggle = toggle;
1266
1267 return IRQ_HANDLED;
1268}
1269
1270/**
1271 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1272 * @irq: Interrupt number.
1273 * @data: Private data provided at interrupt registration, the AFU.
1274 *
1275 * Return: Always return IRQ_HANDLED.
1276 */
1277static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1278{
1279 struct afu *afu = (struct afu *)data;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001280 struct cxlflash_cfg *cfg = afu->parent;
1281 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001282 u64 reg_unmasked;
1283 const struct asyc_intr_info *info;
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -05001284 struct sisl_global_map __iomem *global = &afu->afu_map->global;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001285 u64 reg;
1286 u8 port;
1287 int i;
1288
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001289 reg = readq_be(&global->regs.aintr_status);
1290 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1291
1292 if (reg_unmasked == 0) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001293 dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
1294 __func__, reg);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001295 goto out;
1296 }
1297
1298 /* it is OK to clear AFU status before FC_ERROR */
1299 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1300
1301 /* check each bit that is on */
1302 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1303 info = find_ainfo(1ULL << i);
Matthew R. Ochs16798d32015-10-21 15:13:45 -05001304 if (((reg_unmasked & 0x1) == 0) || !info)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001305 continue;
1306
1307 port = info->port;
1308
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001309 dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
1310 __func__, port, info->desc,
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001311 readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1312
1313 /*
1314 * do link reset first, some OTHER errors will set FC_ERROR
1315 * again if cleared before or w/o a reset
1316 */
1317 if (info->action & LINK_RESET) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001318 dev_err(dev, "%s: FC Port %d: resetting link\n",
1319 __func__, port);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001320 cfg->lr_state = LINK_RESET_REQUIRED;
1321 cfg->lr_port = port;
1322 schedule_work(&cfg->work_q);
1323 }
1324
1325 if (info->action & CLR_FC_ERROR) {
1326 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1327
1328 /*
1329 * since all errors are unmasked, FC_ERROR and FC_ERRCAP
1330 * should be the same and tracing one is sufficient.
1331 */
1332
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001333 dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
1334 __func__, port, reg);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001335
1336 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
1337 writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
1338 }
Matthew R. Ochsef510742015-10-21 15:13:37 -05001339
1340 if (info->action & SCAN_HOST) {
1341 atomic_inc(&cfg->scan_host_needed);
1342 schedule_work(&cfg->work_q);
1343 }
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001344 }
1345
1346out:
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001347 dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001348 return IRQ_HANDLED;
1349}
1350
1351/**
1352 * start_context() - starts the master context
Matthew R. Ochs1284fb02015-10-21 15:14:40 -05001353 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001354 *
1355 * Return: A success or failure value from CXL services.
1356 */
1357static int start_context(struct cxlflash_cfg *cfg)
1358{
1359 int rc = 0;
1360
1361 rc = cxl_start_context(cfg->mcctx,
1362 cfg->afu->work.work_element_descriptor,
1363 NULL);
1364
1365 pr_debug("%s: returning rc=%d\n", __func__, rc);
1366 return rc;
1367}
1368
1369/**
1370 * read_vpd() - obtains the WWPNs from VPD
Matthew R. Ochs1284fb02015-10-21 15:14:40 -05001371 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001372 * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
1373 *
Matthew R. Ochs1284fb02015-10-21 15:14:40 -05001374 * Return: 0 on success, -errno on failure
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001375 */
1376static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1377{
1378 struct pci_dev *dev = cfg->parent_dev;
1379 int rc = 0;
1380 int ro_start, ro_size, i, j, k;
1381 ssize_t vpd_size;
1382 char vpd_data[CXLFLASH_VPD_LEN];
1383 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1384 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1385
1386 /* Get the VPD data from the device */
1387 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
1388 if (unlikely(vpd_size <= 0)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001389 dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001390 __func__, vpd_size);
1391 rc = -ENODEV;
1392 goto out;
1393 }
1394
1395 /* Get the read only section offset */
1396 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1397 PCI_VPD_LRDT_RO_DATA);
1398 if (unlikely(ro_start < 0)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001399 dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
1400 __func__);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001401 rc = -ENODEV;
1402 goto out;
1403 }
1404
1405 /* Get the read only section size, cap when extends beyond read VPD */
1406 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1407 j = ro_size;
1408 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1409 if (unlikely((i + j) > vpd_size)) {
1410 pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
1411 __func__, (i + j), vpd_size);
1412 ro_size = vpd_size - i;
1413 }
1414
1415 /*
1416 * Find the offset of the WWPN tag within the read only
1417 * VPD data and validate the found field (partials are
1418 * no good to us). Convert the ASCII data to an integer
1419 * value. Note that we must copy to a temporary buffer
1420 * because the conversion service requires that the ASCII
1421 * string be terminated.
1422 */
1423 for (k = 0; k < NUM_FC_PORTS; k++) {
1424 j = ro_size;
1425 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1426
1427 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1428 if (unlikely(i < 0)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001429 dev_err(&dev->dev, "%s: Port %d WWPN not found "
1430 "in VPD\n", __func__, k);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001431 rc = -ENODEV;
1432 goto out;
1433 }
1434
1435 j = pci_vpd_info_field_size(&vpd_data[i]);
1436 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1437 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001438 dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
1439 "VPD corrupt\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001440 __func__, k);
1441 rc = -ENODEV;
1442 goto out;
1443 }
1444
1445 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1446 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1447 if (unlikely(rc)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001448 dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
1449 "to integer\n", __func__, k);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001450 rc = -ENODEV;
1451 goto out;
1452 }
1453 }
1454
1455out:
1456 pr_debug("%s: returning rc=%d\n", __func__, rc);
1457 return rc;
1458}
1459
1460/**
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001461 * init_pcr() - initialize the provisioning and control registers
Matthew R. Ochs1284fb02015-10-21 15:14:40 -05001462 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001463 *
1464 * Also sets up fast access to the mapped registers and initializes AFU
1465 * command fields that never change.
1466 */
Matthew R. Ochs15305512015-10-21 15:12:10 -05001467static void init_pcr(struct cxlflash_cfg *cfg)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001468{
1469 struct afu *afu = cfg->afu;
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -05001470 struct sisl_ctrl_map __iomem *ctrl_map;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001471 int i;
1472
1473 for (i = 0; i < MAX_CONTEXT; i++) {
1474 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1475 /* disrupt any clients that could be running */
1476 /* e. g. clients that survived a master restart */
1477 writeq_be(0, &ctrl_map->rht_start);
1478 writeq_be(0, &ctrl_map->rht_cnt_id);
1479 writeq_be(0, &ctrl_map->ctx_cap);
1480 }
1481
1482 /* copy frequently used fields into afu */
1483 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1484 /* ctx_hndl is 16 bits in CAIA */
1485 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1486 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1487
1488 /* Program the Endian Control for the master context */
1489 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1490
1491 /* initialize cmd fields that never change */
1492 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1493 afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
1494 afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
1495 afu->cmd[i].rcb.rrq = 0x0;
1496 }
1497}
1498
1499/**
1500 * init_global() - initialize AFU global registers
Matthew R. Ochs1284fb02015-10-21 15:14:40 -05001501 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001502 */
Matthew R. Ochs15305512015-10-21 15:12:10 -05001503static int init_global(struct cxlflash_cfg *cfg)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001504{
1505 struct afu *afu = cfg->afu;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001506 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001507 u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
1508 int i = 0, num_ports = 0;
1509 int rc = 0;
1510 u64 reg;
1511
1512 rc = read_vpd(cfg, &wwpn[0]);
1513 if (rc) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001514 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001515 goto out;
1516 }
1517
1518 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
1519
1520 /* set up RRQ in AFU for master issued cmds */
1521 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1522 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1523
1524 /* AFU configuration */
1525 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1526 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1527 /* enable all auto retry options and control endianness */
1528 /* leave others at default: */
1529 /* CTX_CAP write protected, mbox_r does not clear on read and */
1530 /* checker on if dual afu */
1531 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1532
1533 /* global port select: select either port */
1534 if (afu->internal_lun) {
1535 /* only use port 0 */
1536 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1537 num_ports = NUM_FC_PORTS - 1;
1538 } else {
1539 writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
1540 num_ports = NUM_FC_PORTS;
1541 }
1542
1543 for (i = 0; i < num_ports; i++) {
1544 /* unmask all errors (but they are still masked at AFU) */
1545 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1546 /* clear CRC error cnt & set a threshold */
1547 (void)readq_be(&afu->afu_map->global.
1548 fc_regs[i][FC_CNT_CRCERR / 8]);
1549 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1550 [FC_CRC_THRESH / 8]);
1551
1552 /* set WWPNs. If already programmed, wwpn[i] is 0 */
1553 if (wwpn[i] != 0 &&
1554 afu_set_wwpn(afu, i,
1555 &afu->afu_map->global.fc_regs[i][0],
1556 wwpn[i])) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001557 dev_err(dev, "%s: failed to set WWPN on port %d\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001558 __func__, i);
1559 rc = -EIO;
1560 goto out;
1561 }
1562 /* Programming WWPN back to back causes additional
1563 * offline/online transitions and a PLOGI
1564 */
1565 msleep(100);
1566
1567 }
1568
1569 /* set up master's own CTX_CAP to allow real mode, host translation */
1570 /* tbls, afu cmds and read/write GSCSI cmds. */
1571 /* First, unlock ctx_cap write by reading mbox */
1572 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
1573 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1574 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1575 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1576 &afu->ctrl_map->ctx_cap);
1577 /* init heartbeat */
1578 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1579
1580out:
1581 return rc;
1582}
1583
1584/**
1585 * start_afu() - initializes and starts the AFU
Matthew R. Ochs1284fb02015-10-21 15:14:40 -05001586 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001587 */
1588static int start_afu(struct cxlflash_cfg *cfg)
1589{
1590 struct afu *afu = cfg->afu;
1591 struct afu_cmd *cmd;
1592
1593 int i = 0;
1594 int rc = 0;
1595
1596 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1597 cmd = &afu->cmd[i];
1598
1599 init_completion(&cmd->cevent);
1600 spin_lock_init(&cmd->slock);
1601 cmd->parent = afu;
1602 }
1603
1604 init_pcr(cfg);
1605
1606 /* initialize RRQ pointers */
1607 afu->hrrq_start = &afu->rrq_entry[0];
1608 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1609 afu->hrrq_curr = afu->hrrq_start;
1610 afu->toggle = 1;
1611
1612 rc = init_global(cfg);
1613
1614 pr_debug("%s: returning rc=%d\n", __func__, rc);
1615 return rc;
1616}
1617
1618/**
1619 * init_mc() - create and register as the master context
Matthew R. Ochs1284fb02015-10-21 15:14:40 -05001620 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001621 *
Matthew R. Ochs1284fb02015-10-21 15:14:40 -05001622 * Return: 0 on success, -errno on failure
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001623 */
1624static int init_mc(struct cxlflash_cfg *cfg)
1625{
1626 struct cxl_context *ctx;
1627 struct device *dev = &cfg->dev->dev;
1628 struct afu *afu = cfg->afu;
1629 int rc = 0;
1630 enum undo_level level;
1631
1632 ctx = cxl_get_context(cfg->dev);
1633 if (unlikely(!ctx))
1634 return -ENOMEM;
1635 cfg->mcctx = ctx;
1636
1637 /* Set it up as a master with the CXL */
1638 cxl_set_master(ctx);
1639
1640 /* During initialization reset the AFU to start from a clean slate */
1641 rc = cxl_afu_reset(cfg->mcctx);
1642 if (unlikely(rc)) {
1643 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1644 __func__, rc);
1645 level = RELEASE_CONTEXT;
1646 goto out;
1647 }
1648
1649 rc = cxl_allocate_afu_irqs(ctx, 3);
1650 if (unlikely(rc)) {
1651 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1652 __func__, rc);
1653 level = RELEASE_CONTEXT;
1654 goto out;
1655 }
1656
1657 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1658 "SISL_MSI_SYNC_ERROR");
1659 if (unlikely(rc <= 0)) {
1660 dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
1661 __func__);
1662 level = FREE_IRQ;
1663 goto out;
1664 }
1665
1666 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1667 "SISL_MSI_RRQ_UPDATED");
1668 if (unlikely(rc <= 0)) {
1669 dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
1670 __func__);
1671 level = UNMAP_ONE;
1672 goto out;
1673 }
1674
1675 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1676 "SISL_MSI_ASYNC_ERROR");
1677 if (unlikely(rc <= 0)) {
1678 dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
1679 __func__);
1680 level = UNMAP_TWO;
1681 goto out;
1682 }
1683
1684 rc = 0;
1685
1686 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1687 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1688 * element (pe) that is embedded in the context (ctx)
1689 */
1690 rc = start_context(cfg);
1691 if (unlikely(rc)) {
1692 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1693 level = UNMAP_THREE;
1694 goto out;
1695 }
1696ret:
1697 pr_debug("%s: returning rc=%d\n", __func__, rc);
1698 return rc;
1699out:
1700 term_mc(cfg, level);
1701 goto ret;
1702}
1703
1704/**
1705 * init_afu() - setup as master context and start AFU
Matthew R. Ochs1284fb02015-10-21 15:14:40 -05001706 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001707 *
1708 * This routine is a higher level of control for configuring the
1709 * AFU on probe and reset paths.
1710 *
Matthew R. Ochs1284fb02015-10-21 15:14:40 -05001711 * Return: 0 on success, -errno on failure
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001712 */
1713static int init_afu(struct cxlflash_cfg *cfg)
1714{
1715 u64 reg;
1716 int rc = 0;
1717 struct afu *afu = cfg->afu;
1718 struct device *dev = &cfg->dev->dev;
1719
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05001720 cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1721
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001722 rc = init_mc(cfg);
1723 if (rc) {
1724 dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
1725 __func__, rc);
1726 goto err1;
1727 }
1728
1729 /* Map the entire MMIO space of the AFU.
1730 */
1731 afu->afu_map = cxl_psa_map(cfg->mcctx);
1732 if (!afu->afu_map) {
1733 rc = -ENOMEM;
1734 term_mc(cfg, UNDO_START);
1735 dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
1736 goto err1;
1737 }
1738
Matthew R. Ochse5ce0672015-10-21 15:14:01 -05001739 /* No byte reverse on reading afu_version or string will be backwards */
1740 reg = readq(&afu->afu_map->global.regs.afu_version);
1741 memcpy(afu->version, &reg, sizeof(reg));
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001742 afu->interface_version =
1743 readq_be(&afu->afu_map->global.regs.interface_version);
Matthew R. Ochse5ce0672015-10-21 15:14:01 -05001744 if ((afu->interface_version + 1) == 0) {
1745 pr_err("Back level AFU, please upgrade. AFU version %s "
1746 "interface version 0x%llx\n", afu->version,
1747 afu->interface_version);
1748 rc = -EINVAL;
1749 goto err1;
1750 } else
1751 pr_debug("%s: afu version %s, interface version 0x%llX\n",
1752 __func__, afu->version, afu->interface_version);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001753
1754 rc = start_afu(cfg);
1755 if (rc) {
1756 dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
1757 __func__, rc);
1758 term_mc(cfg, UNDO_START);
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -05001759 cxl_psa_unmap((void __iomem *)afu->afu_map);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001760 afu->afu_map = NULL;
1761 goto err1;
1762 }
1763
1764 afu_err_intr_init(cfg->afu);
1765 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
1766
Matthew R. Ochs2cb79262015-08-13 21:47:53 -05001767 /* Restore the LUN mappings */
1768 cxlflash_restore_luntable(cfg);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001769err1:
1770 pr_debug("%s: returning rc=%d\n", __func__, rc);
1771 return rc;
1772}
1773
1774/**
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001775 * cxlflash_afu_sync() - builds and sends an AFU sync command
1776 * @afu: AFU associated with the host.
1777 * @ctx_hndl_u: Identifies context requesting sync.
1778 * @res_hndl_u: Identifies resource requesting sync.
1779 * @mode: Type of sync to issue (lightweight, heavyweight, global).
1780 *
1781 * The AFU can only take 1 sync command at a time. This routine enforces this
1782 * limitation by using a mutex to provide exlusive access to the AFU during
1783 * the sync. This design point requires calling threads to not be on interrupt
1784 * context due to the possibility of sleeping during concurrent sync operations.
1785 *
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05001786 * AFU sync operations are only necessary and allowed when the device is
1787 * operating normally. When not operating normally, sync requests can occur as
1788 * part of cleaning up resources associated with an adapter prior to removal.
1789 * In this scenario, these requests are simply ignored (safe due to the AFU
1790 * going away).
1791 *
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001792 * Return:
1793 * 0 on success
1794 * -1 on failure
1795 */
1796int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1797 res_hndl_t res_hndl_u, u8 mode)
1798{
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05001799 struct cxlflash_cfg *cfg = afu->parent;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001800 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001801 struct afu_cmd *cmd = NULL;
1802 int rc = 0;
1803 int retry_cnt = 0;
1804 static DEFINE_MUTEX(sync_active);
1805
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05001806 if (cfg->state != STATE_NORMAL) {
1807 pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
1808 return 0;
1809 }
1810
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001811 mutex_lock(&sync_active);
1812retry:
Matthew R. Ochs15305512015-10-21 15:12:10 -05001813 cmd = cmd_checkout(afu);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001814 if (unlikely(!cmd)) {
1815 retry_cnt++;
1816 udelay(1000 * retry_cnt);
1817 if (retry_cnt < MC_RETRY_CNT)
1818 goto retry;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001819 dev_err(dev, "%s: could not get a free command\n", __func__);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001820 rc = -1;
1821 goto out;
1822 }
1823
1824 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1825
1826 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
1827
1828 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1829 cmd->rcb.port_sel = 0x0; /* NA */
1830 cmd->rcb.lun_id = 0x0; /* NA */
1831 cmd->rcb.data_len = 0x0;
1832 cmd->rcb.data_ea = 0x0;
1833 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
1834
1835 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
1836 cmd->rcb.cdb[1] = mode;
1837
1838 /* The cdb is aligned, no unaligned accessors required */
Matthew R. Ochs1786f4a2015-10-21 15:14:48 -05001839 *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
1840 *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001841
Matthew R. Ochs15305512015-10-21 15:12:10 -05001842 rc = send_cmd(afu, cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001843 if (unlikely(rc))
1844 goto out;
1845
Matthew R. Ochs15305512015-10-21 15:12:10 -05001846 wait_resp(afu, cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001847
1848 /* set on timeout */
1849 if (unlikely((cmd->sa.ioasc != 0) ||
1850 (cmd->sa.host_use_b[0] & B_ERROR)))
1851 rc = -1;
1852out:
1853 mutex_unlock(&sync_active);
1854 if (cmd)
Matthew R. Ochs15305512015-10-21 15:12:10 -05001855 cmd_checkin(cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001856 pr_debug("%s: returning rc=%d\n", __func__, rc);
1857 return rc;
1858}
1859
1860/**
Matthew R. Ochs15305512015-10-21 15:12:10 -05001861 * afu_reset() - resets the AFU
1862 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001863 *
Matthew R. Ochs1284fb02015-10-21 15:14:40 -05001864 * Return: 0 on success, -errno on failure
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001865 */
Matthew R. Ochs15305512015-10-21 15:12:10 -05001866static int afu_reset(struct cxlflash_cfg *cfg)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001867{
1868 int rc = 0;
1869 /* Stop the context before the reset. Since the context is
1870 * no longer available restart it after the reset is complete
1871 */
1872
1873 term_afu(cfg);
1874
1875 rc = init_afu(cfg);
1876
1877 pr_debug("%s: returning rc=%d\n", __func__, rc);
1878 return rc;
1879}
1880
1881/**
Matthew R. Ochs15305512015-10-21 15:12:10 -05001882 * cxlflash_eh_device_reset_handler() - reset a single LUN
1883 * @scp: SCSI command to send.
1884 *
1885 * Return:
1886 * SUCCESS as defined in scsi/scsi.h
1887 * FAILED as defined in scsi/scsi.h
1888 */
1889static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
1890{
1891 int rc = SUCCESS;
1892 struct Scsi_Host *host = scp->device->host;
1893 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1894 struct afu *afu = cfg->afu;
1895 int rcr = 0;
1896
1897 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1898 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1899 host->host_no, scp->device->channel,
1900 scp->device->id, scp->device->lun,
1901 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1902 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1903 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1904 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1905
Matthew R. Ochsed486da2015-10-21 15:14:24 -05001906retry:
Matthew R. Ochs15305512015-10-21 15:12:10 -05001907 switch (cfg->state) {
1908 case STATE_NORMAL:
1909 rcr = send_tmf(afu, scp, TMF_LUN_RESET);
1910 if (unlikely(rcr))
1911 rc = FAILED;
1912 break;
1913 case STATE_RESET:
1914 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
Matthew R. Ochsed486da2015-10-21 15:14:24 -05001915 goto retry;
Matthew R. Ochs15305512015-10-21 15:12:10 -05001916 default:
1917 rc = FAILED;
1918 break;
1919 }
1920
1921 pr_debug("%s: returning rc=%d\n", __func__, rc);
1922 return rc;
1923}
1924
1925/**
1926 * cxlflash_eh_host_reset_handler() - reset the host adapter
1927 * @scp: SCSI command from stack identifying host.
1928 *
1929 * Return:
1930 * SUCCESS as defined in scsi/scsi.h
1931 * FAILED as defined in scsi/scsi.h
1932 */
1933static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
1934{
1935 int rc = SUCCESS;
1936 int rcr = 0;
1937 struct Scsi_Host *host = scp->device->host;
1938 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1939
1940 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1941 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1942 host->host_no, scp->device->channel,
1943 scp->device->id, scp->device->lun,
1944 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1945 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1946 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1947 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1948
1949 switch (cfg->state) {
1950 case STATE_NORMAL:
1951 cfg->state = STATE_RESET;
Matthew R. Ochs15305512015-10-21 15:12:10 -05001952 cxlflash_mark_contexts_error(cfg);
1953 rcr = afu_reset(cfg);
1954 if (rcr) {
1955 rc = FAILED;
1956 cfg->state = STATE_FAILTERM;
1957 } else
1958 cfg->state = STATE_NORMAL;
1959 wake_up_all(&cfg->reset_waitq);
Matthew R. Ochs15305512015-10-21 15:12:10 -05001960 break;
1961 case STATE_RESET:
1962 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1963 if (cfg->state == STATE_NORMAL)
1964 break;
1965 /* fall through */
1966 default:
1967 rc = FAILED;
1968 break;
1969 }
1970
1971 pr_debug("%s: returning rc=%d\n", __func__, rc);
1972 return rc;
1973}
1974
1975/**
1976 * cxlflash_change_queue_depth() - change the queue depth for the device
1977 * @sdev: SCSI device destined for queue depth change.
1978 * @qdepth: Requested queue depth value to set.
1979 *
1980 * The requested queue depth is capped to the maximum supported value.
1981 *
1982 * Return: The actual queue depth set.
1983 */
1984static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
1985{
1986
1987 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
1988 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
1989
1990 scsi_change_queue_depth(sdev, qdepth);
1991 return sdev->queue_depth;
1992}
1993
1994/**
1995 * cxlflash_show_port_status() - queries and presents the current port status
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05001996 * @port: Desired port for status reporting.
1997 * @afu: AFU owning the specified port.
Matthew R. Ochs15305512015-10-21 15:12:10 -05001998 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
1999 *
2000 * Return: The size of the ASCII string returned in @buf.
2001 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002002static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
Matthew R. Ochs15305512015-10-21 15:12:10 -05002003{
Matthew R. Ochs15305512015-10-21 15:12:10 -05002004 char *disp_status;
Matthew R. Ochs15305512015-10-21 15:12:10 -05002005 u64 status;
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002006 __be64 __iomem *fc_regs;
Matthew R. Ochs15305512015-10-21 15:12:10 -05002007
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002008 if (port >= NUM_FC_PORTS)
Matthew R. Ochs15305512015-10-21 15:12:10 -05002009 return 0;
2010
2011 fc_regs = &afu->afu_map->global.fc_regs[port][0];
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002012 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
2013 status &= FC_MTIP_STATUS_MASK;
Matthew R. Ochs15305512015-10-21 15:12:10 -05002014
2015 if (status == FC_MTIP_STATUS_ONLINE)
2016 disp_status = "online";
2017 else if (status == FC_MTIP_STATUS_OFFLINE)
2018 disp_status = "offline";
2019 else
2020 disp_status = "unknown";
2021
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002022 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
Matthew R. Ochs15305512015-10-21 15:12:10 -05002023}
2024
2025/**
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002026 * port0_show() - queries and presents the current status of port 0
2027 * @dev: Generic device associated with the host owning the port.
2028 * @attr: Device attribute representing the port.
2029 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
Matthew R. Ochs15305512015-10-21 15:12:10 -05002030 *
2031 * Return: The size of the ASCII string returned in @buf.
2032 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002033static ssize_t port0_show(struct device *dev,
2034 struct device_attribute *attr,
2035 char *buf)
Matthew R. Ochs15305512015-10-21 15:12:10 -05002036{
2037 struct Scsi_Host *shost = class_to_shost(dev);
2038 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2039 struct afu *afu = cfg->afu;
2040
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002041 return cxlflash_show_port_status(0, afu, buf);
Matthew R. Ochs15305512015-10-21 15:12:10 -05002042}
2043
2044/**
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002045 * port1_show() - queries and presents the current status of port 1
2046 * @dev: Generic device associated with the host owning the port.
2047 * @attr: Device attribute representing the port.
2048 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2049 *
2050 * Return: The size of the ASCII string returned in @buf.
2051 */
2052static ssize_t port1_show(struct device *dev,
2053 struct device_attribute *attr,
2054 char *buf)
2055{
2056 struct Scsi_Host *shost = class_to_shost(dev);
2057 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2058 struct afu *afu = cfg->afu;
2059
2060 return cxlflash_show_port_status(1, afu, buf);
2061}
2062
2063/**
2064 * lun_mode_show() - presents the current LUN mode of the host
Matthew R. Ochs15305512015-10-21 15:12:10 -05002065 * @dev: Generic device associated with the host.
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002066 * @attr: Device attribute representing the LUN mode.
2067 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2068 *
2069 * Return: The size of the ASCII string returned in @buf.
2070 */
2071static ssize_t lun_mode_show(struct device *dev,
2072 struct device_attribute *attr, char *buf)
2073{
2074 struct Scsi_Host *shost = class_to_shost(dev);
2075 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2076 struct afu *afu = cfg->afu;
2077
2078 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2079}
2080
2081/**
2082 * lun_mode_store() - sets the LUN mode of the host
2083 * @dev: Generic device associated with the host.
2084 * @attr: Device attribute representing the LUN mode.
Matthew R. Ochs15305512015-10-21 15:12:10 -05002085 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2086 * @count: Length of data resizing in @buf.
2087 *
2088 * The CXL Flash AFU supports a dummy LUN mode where the external
2089 * links and storage are not required. Space on the FPGA is used
2090 * to create 1 or 2 small LUNs which are presented to the system
2091 * as if they were a normal storage device. This feature is useful
2092 * during development and also provides manufacturing with a way
2093 * to test the AFU without an actual device.
2094 *
2095 * 0 = external LUN[s] (default)
2096 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2097 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2098 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2099 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2100 *
2101 * Return: The size of the ASCII string returned in @buf.
2102 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002103static ssize_t lun_mode_store(struct device *dev,
2104 struct device_attribute *attr,
2105 const char *buf, size_t count)
Matthew R. Ochs15305512015-10-21 15:12:10 -05002106{
2107 struct Scsi_Host *shost = class_to_shost(dev);
2108 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2109 struct afu *afu = cfg->afu;
2110 int rc;
2111 u32 lun_mode;
2112
2113 rc = kstrtouint(buf, 10, &lun_mode);
2114 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2115 afu->internal_lun = lun_mode;
2116 afu_reset(cfg);
2117 scsi_scan_host(cfg->host);
2118 }
2119
2120 return count;
2121}
2122
2123/**
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002124 * ioctl_version_show() - presents the current ioctl version of the host
Matthew R. Ochs15305512015-10-21 15:12:10 -05002125 * @dev: Generic device associated with the host.
2126 * @attr: Device attribute representing the ioctl version.
2127 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2128 *
2129 * Return: The size of the ASCII string returned in @buf.
2130 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002131static ssize_t ioctl_version_show(struct device *dev,
2132 struct device_attribute *attr, char *buf)
Matthew R. Ochs15305512015-10-21 15:12:10 -05002133{
2134 return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
2135}
2136
2137/**
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002138 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2139 * @port: Desired port for status reporting.
2140 * @afu: AFU owning the specified port.
2141 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2142 *
2143 * Return: The size of the ASCII string returned in @buf.
2144 */
2145static ssize_t cxlflash_show_port_lun_table(u32 port,
2146 struct afu *afu,
2147 char *buf)
2148{
2149 int i;
2150 ssize_t bytes = 0;
2151 __be64 __iomem *fc_port;
2152
2153 if (port >= NUM_FC_PORTS)
2154 return 0;
2155
2156 fc_port = &afu->afu_map->global.fc_port[port][0];
2157
2158 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2159 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2160 "%03d: %016llX\n", i, readq_be(&fc_port[i]));
2161 return bytes;
2162}
2163
2164/**
2165 * port0_lun_table_show() - presents the current LUN table of port 0
2166 * @dev: Generic device associated with the host owning the port.
2167 * @attr: Device attribute representing the port.
2168 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2169 *
2170 * Return: The size of the ASCII string returned in @buf.
2171 */
2172static ssize_t port0_lun_table_show(struct device *dev,
2173 struct device_attribute *attr,
2174 char *buf)
2175{
2176 struct Scsi_Host *shost = class_to_shost(dev);
2177 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2178 struct afu *afu = cfg->afu;
2179
2180 return cxlflash_show_port_lun_table(0, afu, buf);
2181}
2182
2183/**
2184 * port1_lun_table_show() - presents the current LUN table of port 1
2185 * @dev: Generic device associated with the host owning the port.
2186 * @attr: Device attribute representing the port.
2187 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2188 *
2189 * Return: The size of the ASCII string returned in @buf.
2190 */
2191static ssize_t port1_lun_table_show(struct device *dev,
2192 struct device_attribute *attr,
2193 char *buf)
2194{
2195 struct Scsi_Host *shost = class_to_shost(dev);
2196 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2197 struct afu *afu = cfg->afu;
2198
2199 return cxlflash_show_port_lun_table(1, afu, buf);
2200}
2201
2202/**
2203 * mode_show() - presents the current mode of the device
Matthew R. Ochs15305512015-10-21 15:12:10 -05002204 * @dev: Generic device associated with the device.
2205 * @attr: Device attribute representing the device mode.
2206 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2207 *
2208 * Return: The size of the ASCII string returned in @buf.
2209 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002210static ssize_t mode_show(struct device *dev,
2211 struct device_attribute *attr, char *buf)
Matthew R. Ochs15305512015-10-21 15:12:10 -05002212{
2213 struct scsi_device *sdev = to_scsi_device(dev);
2214
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002215 return scnprintf(buf, PAGE_SIZE, "%s\n",
2216 sdev->hostdata ? "superpipe" : "legacy");
Matthew R. Ochs15305512015-10-21 15:12:10 -05002217}
2218
2219/*
2220 * Host attributes
2221 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002222static DEVICE_ATTR_RO(port0);
2223static DEVICE_ATTR_RO(port1);
2224static DEVICE_ATTR_RW(lun_mode);
2225static DEVICE_ATTR_RO(ioctl_version);
2226static DEVICE_ATTR_RO(port0_lun_table);
2227static DEVICE_ATTR_RO(port1_lun_table);
Matthew R. Ochs15305512015-10-21 15:12:10 -05002228
2229static struct device_attribute *cxlflash_host_attrs[] = {
2230 &dev_attr_port0,
2231 &dev_attr_port1,
2232 &dev_attr_lun_mode,
2233 &dev_attr_ioctl_version,
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002234 &dev_attr_port0_lun_table,
2235 &dev_attr_port1_lun_table,
Matthew R. Ochs15305512015-10-21 15:12:10 -05002236 NULL
2237};
2238
2239/*
2240 * Device attributes
2241 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002242static DEVICE_ATTR_RO(mode);
Matthew R. Ochs15305512015-10-21 15:12:10 -05002243
2244static struct device_attribute *cxlflash_dev_attrs[] = {
2245 &dev_attr_mode,
2246 NULL
2247};
2248
2249/*
2250 * Host template
2251 */
2252static struct scsi_host_template driver_template = {
2253 .module = THIS_MODULE,
2254 .name = CXLFLASH_ADAPTER_NAME,
2255 .info = cxlflash_driver_info,
2256 .ioctl = cxlflash_ioctl,
2257 .proc_name = CXLFLASH_NAME,
2258 .queuecommand = cxlflash_queuecommand,
2259 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
2260 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
2261 .change_queue_depth = cxlflash_change_queue_depth,
2262 .cmd_per_lun = 16,
2263 .can_queue = CXLFLASH_MAX_CMDS,
2264 .this_id = -1,
2265 .sg_tablesize = SG_NONE, /* No scatter gather support. */
2266 .max_sectors = CXLFLASH_MAX_SECTORS,
2267 .use_clustering = ENABLE_CLUSTERING,
2268 .shost_attrs = cxlflash_host_attrs,
2269 .sdev_attrs = cxlflash_dev_attrs,
2270};
2271
2272/*
2273 * Device dependent values
2274 */
2275static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
2276
2277/*
2278 * PCI device binding table
2279 */
2280static struct pci_device_id cxlflash_pci_table[] = {
2281 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
2282 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2283 {}
2284};
2285
2286MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2287
2288/**
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002289 * cxlflash_worker_thread() - work thread handler for the AFU
2290 * @work: Work structure contained within cxlflash associated with host.
2291 *
2292 * Handles the following events:
2293 * - Link reset which cannot be performed on interrupt context due to
2294 * blocking up to a few seconds
2295 * - Read AFU command room
Matthew R. Ochsef510742015-10-21 15:13:37 -05002296 * - Rescan the host
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002297 */
2298static void cxlflash_worker_thread(struct work_struct *work)
2299{
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002300 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2301 work_q);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002302 struct afu *afu = cfg->afu;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05002303 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002304 int port;
2305 ulong lock_flags;
2306
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002307 /* Avoid MMIO if the device has failed */
2308
2309 if (cfg->state != STATE_NORMAL)
2310 return;
2311
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002312 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2313
2314 if (cfg->lr_state == LINK_RESET_REQUIRED) {
2315 port = cfg->lr_port;
2316 if (port < 0)
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05002317 dev_err(dev, "%s: invalid port index %d\n",
2318 __func__, port);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002319 else {
2320 spin_unlock_irqrestore(cfg->host->host_lock,
2321 lock_flags);
2322
2323 /* The reset can block... */
2324 afu_link_reset(afu, port,
2325 &afu->afu_map->
2326 global.fc_regs[port][0]);
2327 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2328 }
2329
2330 cfg->lr_state = LINK_RESET_COMPLETE;
2331 }
2332
2333 if (afu->read_room) {
2334 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
2335 afu->read_room = false;
2336 }
2337
2338 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
Matthew R. Ochsef510742015-10-21 15:13:37 -05002339
2340 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
2341 scsi_scan_host(cfg->host);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002342}
2343
2344/**
2345 * cxlflash_probe() - PCI entry point to add host
2346 * @pdev: PCI device associated with the host.
2347 * @dev_id: PCI device id associated with device.
2348 *
Matthew R. Ochs1284fb02015-10-21 15:14:40 -05002349 * Return: 0 on success, -errno on failure
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002350 */
2351static int cxlflash_probe(struct pci_dev *pdev,
2352 const struct pci_device_id *dev_id)
2353{
2354 struct Scsi_Host *host;
2355 struct cxlflash_cfg *cfg = NULL;
2356 struct device *phys_dev;
2357 struct dev_dependent_vals *ddv;
2358 int rc = 0;
2359
2360 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2361 __func__, pdev->irq);
2362
2363 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2364 driver_template.max_sectors = ddv->max_sectors;
2365
2366 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2367 if (!host) {
2368 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2369 __func__);
2370 rc = -ENOMEM;
2371 goto out;
2372 }
2373
2374 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2375 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2376 host->max_channel = NUM_FC_PORTS - 1;
2377 host->unique_id = host->host_no;
2378 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2379
2380 cfg = (struct cxlflash_cfg *)host->hostdata;
2381 cfg->host = host;
2382 rc = alloc_mem(cfg);
2383 if (rc) {
2384 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2385 __func__);
2386 rc = -ENOMEM;
Matthew R. Ochs8b5b1e82015-10-21 15:14:09 -05002387 scsi_host_put(cfg->host);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002388 goto out;
2389 }
2390
2391 cfg->init_state = INIT_STATE_NONE;
2392 cfg->dev = pdev;
Matthew R. Ochs2cb79262015-08-13 21:47:53 -05002393
2394 /*
2395 * The promoted LUNs move to the top of the LUN table. The rest stay
2396 * on the bottom half. The bottom half grows from the end
2397 * (index = 255), whereas the top half grows from the beginning
2398 * (index = 0).
2399 */
2400 cfg->promote_lun_index = 0;
2401 cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
2402 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2403
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002404 cfg->dev_id = (struct pci_device_id *)dev_id;
2405 cfg->mcctx = NULL;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002406
2407 init_waitqueue_head(&cfg->tmf_waitq);
Matthew R. Ochs439e85c2015-10-21 15:12:00 -05002408 init_waitqueue_head(&cfg->reset_waitq);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002409
2410 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2411 cfg->lr_state = LINK_RESET_INVALID;
2412 cfg->lr_port = -1;
Matthew R. Ochs65be2c72015-08-13 21:47:43 -05002413 mutex_init(&cfg->ctx_tbl_list_mutex);
2414 mutex_init(&cfg->ctx_recovery_mutex);
Matthew R. Ochs0a27ae52015-10-21 15:11:52 -05002415 init_rwsem(&cfg->ioctl_rwsem);
Matthew R. Ochs65be2c72015-08-13 21:47:43 -05002416 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2417 INIT_LIST_HEAD(&cfg->lluns);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002418
2419 pci_set_drvdata(pdev, cfg);
2420
2421 /* Use the special service provided to look up the physical
2422 * PCI device, since we are called on the probe of the virtual
2423 * PCI host bus (vphb)
2424 */
2425 phys_dev = cxl_get_phys_dev(pdev);
2426 if (!dev_is_pci(phys_dev)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05002427 dev_err(&pdev->dev, "%s: not a pci dev\n", __func__);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002428 rc = -ENODEV;
2429 goto out_remove;
2430 }
2431 cfg->parent_dev = to_pci_dev(phys_dev);
2432
2433 cfg->cxl_afu = cxl_pci_to_afu(pdev);
2434
2435 rc = init_pci(cfg);
2436 if (rc) {
2437 dev_err(&pdev->dev, "%s: call to init_pci "
2438 "failed rc=%d!\n", __func__, rc);
2439 goto out_remove;
2440 }
2441 cfg->init_state = INIT_STATE_PCI;
2442
2443 rc = init_afu(cfg);
2444 if (rc) {
2445 dev_err(&pdev->dev, "%s: call to init_afu "
2446 "failed rc=%d!\n", __func__, rc);
2447 goto out_remove;
2448 }
2449 cfg->init_state = INIT_STATE_AFU;
2450
2451
2452 rc = init_scsi(cfg);
2453 if (rc) {
2454 dev_err(&pdev->dev, "%s: call to init_scsi "
2455 "failed rc=%d!\n", __func__, rc);
2456 goto out_remove;
2457 }
2458 cfg->init_state = INIT_STATE_SCSI;
2459
2460out:
2461 pr_debug("%s: returning rc=%d\n", __func__, rc);
2462 return rc;
2463
2464out_remove:
2465 cxlflash_remove(pdev);
2466 goto out;
2467}
2468
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002469/**
Matthew R. Ochs0a27ae52015-10-21 15:11:52 -05002470 * drain_ioctls() - wait until all currently executing ioctls have completed
2471 * @cfg: Internal structure associated with the host.
2472 *
2473 * Obtain write access to read/write semaphore that wraps ioctl
2474 * handling to 'drain' ioctls currently executing.
2475 */
2476static void drain_ioctls(struct cxlflash_cfg *cfg)
2477{
2478 down_write(&cfg->ioctl_rwsem);
2479 up_write(&cfg->ioctl_rwsem);
2480}
2481
2482/**
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002483 * cxlflash_pci_error_detected() - called when a PCI error is detected
2484 * @pdev: PCI device struct.
2485 * @state: PCI channel state.
2486 *
2487 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2488 */
2489static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2490 pci_channel_state_t state)
2491{
Matthew R. Ochs65be2c72015-08-13 21:47:43 -05002492 int rc = 0;
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002493 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2494 struct device *dev = &cfg->dev->dev;
2495
2496 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2497
2498 switch (state) {
2499 case pci_channel_io_frozen:
Matthew R. Ochs439e85c2015-10-21 15:12:00 -05002500 cfg->state = STATE_RESET;
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002501 scsi_block_requests(cfg->host);
Matthew R. Ochs0a27ae52015-10-21 15:11:52 -05002502 drain_ioctls(cfg);
Matthew R. Ochs65be2c72015-08-13 21:47:43 -05002503 rc = cxlflash_mark_contexts_error(cfg);
2504 if (unlikely(rc))
2505 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2506 __func__, rc);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002507 term_mc(cfg, UNDO_START);
2508 stop_afu(cfg);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002509 return PCI_ERS_RESULT_NEED_RESET;
2510 case pci_channel_io_perm_failure:
2511 cfg->state = STATE_FAILTERM;
Matthew R. Ochs439e85c2015-10-21 15:12:00 -05002512 wake_up_all(&cfg->reset_waitq);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002513 scsi_unblock_requests(cfg->host);
2514 return PCI_ERS_RESULT_DISCONNECT;
2515 default:
2516 break;
2517 }
2518 return PCI_ERS_RESULT_NEED_RESET;
2519}
2520
2521/**
2522 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2523 * @pdev: PCI device struct.
2524 *
2525 * This routine is called by the pci error recovery code after the PCI
2526 * slot has been reset, just before we should resume normal operations.
2527 *
2528 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2529 */
2530static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2531{
2532 int rc = 0;
2533 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2534 struct device *dev = &cfg->dev->dev;
2535
2536 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2537
2538 rc = init_afu(cfg);
2539 if (unlikely(rc)) {
2540 dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
2541 return PCI_ERS_RESULT_DISCONNECT;
2542 }
2543
2544 return PCI_ERS_RESULT_RECOVERED;
2545}
2546
2547/**
2548 * cxlflash_pci_resume() - called when normal operation can resume
2549 * @pdev: PCI device struct
2550 */
2551static void cxlflash_pci_resume(struct pci_dev *pdev)
2552{
2553 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2554 struct device *dev = &cfg->dev->dev;
2555
2556 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2557
2558 cfg->state = STATE_NORMAL;
Matthew R. Ochs439e85c2015-10-21 15:12:00 -05002559 wake_up_all(&cfg->reset_waitq);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002560 scsi_unblock_requests(cfg->host);
2561}
2562
2563static const struct pci_error_handlers cxlflash_err_handler = {
2564 .error_detected = cxlflash_pci_error_detected,
2565 .slot_reset = cxlflash_pci_slot_reset,
2566 .resume = cxlflash_pci_resume,
2567};
2568
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002569/*
2570 * PCI device structure
2571 */
2572static struct pci_driver cxlflash_driver = {
2573 .name = CXLFLASH_NAME,
2574 .id_table = cxlflash_pci_table,
2575 .probe = cxlflash_probe,
2576 .remove = cxlflash_remove,
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002577 .err_handler = &cxlflash_err_handler,
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002578};
2579
2580/**
2581 * init_cxlflash() - module entry point
2582 *
Matthew R. Ochs1284fb02015-10-21 15:14:40 -05002583 * Return: 0 on success, -errno on failure
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002584 */
2585static int __init init_cxlflash(void)
2586{
2587 pr_info("%s: IBM Power CXL Flash Adapter: %s\n",
2588 __func__, CXLFLASH_DRIVER_DATE);
2589
Matthew R. Ochs65be2c72015-08-13 21:47:43 -05002590 cxlflash_list_init();
2591
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002592 return pci_register_driver(&cxlflash_driver);
2593}
2594
2595/**
2596 * exit_cxlflash() - module exit point
2597 */
2598static void __exit exit_cxlflash(void)
2599{
Matthew R. Ochs65be2c72015-08-13 21:47:43 -05002600 cxlflash_term_global_luns();
2601 cxlflash_free_errpage();
2602
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002603 pci_unregister_driver(&cxlflash_driver);
2604}
2605
2606module_init(init_cxlflash);
2607module_exit(exit_cxlflash);