blob: 527ff85467ed66cbae2551417430533d9c0b9638 [file] [log] [blame]
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/delay.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19
20#include <asm/unaligned.h>
21
22#include <misc/cxl.h>
23
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_host.h>
Matthew R. Ochs65be2c72015-08-13 21:47:43 -050026#include <uapi/scsi/cxlflash_ioctl.h>
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050027
28#include "main.h"
29#include "sislite.h"
30#include "common.h"
31
32MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35MODULE_LICENSE("GPL");
36
37
38/**
Matthew R. Ochs15305512015-10-21 15:12:10 -050039 * cmd_checkout() - checks out an AFU command
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050040 * @afu: AFU to checkout from.
41 *
42 * Commands are checked out in a round-robin fashion. Note that since
43 * the command pool is larger than the hardware queue, the majority of
44 * times we will only loop once or twice before getting a command. The
45 * buffer and CDB within the command are initialized (zeroed) prior to
46 * returning.
47 *
48 * Return: The checked out command or NULL when command pool is empty.
49 */
Matthew R. Ochs15305512015-10-21 15:12:10 -050050static struct afu_cmd *cmd_checkout(struct afu *afu)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050051{
52 int k, dec = CXLFLASH_NUM_CMDS;
53 struct afu_cmd *cmd;
54
55 while (dec--) {
56 k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
57
58 cmd = &afu->cmd[k];
59
60 if (!atomic_dec_if_positive(&cmd->free)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -050061 pr_devel("%s: returning found index=%d cmd=%p\n",
62 __func__, cmd->slot, cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050063 memset(cmd->buf, 0, CMD_BUFSIZE);
64 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
65 return cmd;
66 }
67 }
68
69 return NULL;
70}
71
72/**
Matthew R. Ochs15305512015-10-21 15:12:10 -050073 * cmd_checkin() - checks in an AFU command
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050074 * @cmd: AFU command to checkin.
75 *
76 * Safe to pass commands that have already been checked in. Several
77 * internal tracking fields are reset as part of the checkin. Note
78 * that these are intentionally reset prior to toggling the free bit
79 * to avoid clobbering values in the event that the command is checked
80 * out right away.
81 */
Matthew R. Ochs15305512015-10-21 15:12:10 -050082static void cmd_checkin(struct afu_cmd *cmd)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050083{
84 cmd->rcb.scp = NULL;
85 cmd->rcb.timeout = 0;
86 cmd->sa.ioasc = 0;
87 cmd->cmd_tmf = false;
88 cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
89
90 if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
91 pr_err("%s: Freeing cmd (%d) that is not in use!\n",
92 __func__, cmd->slot);
93 return;
94 }
95
Matthew R. Ochs4392ba42015-10-21 15:13:11 -050096 pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -050097}
98
99/**
100 * process_cmd_err() - command error handler
101 * @cmd: AFU command that experienced the error.
102 * @scp: SCSI command associated with the AFU command in error.
103 *
104 * Translates error bits from AFU command to SCSI command results.
105 */
106static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
107{
108 struct sisl_ioarcb *ioarcb;
109 struct sisl_ioasa *ioasa;
110
111 if (unlikely(!cmd))
112 return;
113
114 ioarcb = &(cmd->rcb);
115 ioasa = &(cmd->sa);
116
117 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
118 pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
119 __func__, cmd, scp);
120 scp->result = (DID_ERROR << 16);
121 }
122
123 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
124 pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
125 __func__, cmd, scp);
126 scp->result = (DID_ERROR << 16);
127 }
128
129 pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500130 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500131 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
132 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
133 ioasa->fc_extra);
134
135 if (ioasa->rc.scsi_rc) {
136 /* We have a SCSI status */
137 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
138 memcpy(scp->sense_buffer, ioasa->sense_data,
139 SISL_SENSE_DATA_LEN);
140 scp->result = ioasa->rc.scsi_rc;
141 } else
142 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
143 }
144
145 /*
146 * We encountered an error. Set scp->result based on nature
147 * of error.
148 */
149 if (ioasa->rc.fc_rc) {
150 /* We have an FC status */
151 switch (ioasa->rc.fc_rc) {
152 case SISL_FC_RC_LINKDOWN:
153 scp->result = (DID_REQUEUE << 16);
154 break;
155 case SISL_FC_RC_RESID:
156 /* This indicates an FCP resid underrun */
157 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
158 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
159 * then we will handle this error else where.
160 * If not then we must handle it here.
161 * This is probably an AFU bug. We will
162 * attempt a retry to see if that resolves it.
163 */
164 scp->result = (DID_ERROR << 16);
165 }
166 break;
167 case SISL_FC_RC_RESIDERR:
168 /* Resid mismatch between adapter and device */
169 case SISL_FC_RC_TGTABORT:
170 case SISL_FC_RC_ABORTOK:
171 case SISL_FC_RC_ABORTFAIL:
172 case SISL_FC_RC_NOLOGI:
173 case SISL_FC_RC_ABORTPEND:
174 case SISL_FC_RC_WRABORTPEND:
175 case SISL_FC_RC_NOEXP:
176 case SISL_FC_RC_INUSE:
177 scp->result = (DID_ERROR << 16);
178 break;
179 }
180 }
181
182 if (ioasa->rc.afu_rc) {
183 /* We have an AFU error */
184 switch (ioasa->rc.afu_rc) {
185 case SISL_AFU_RC_NO_CHANNELS:
186 scp->result = (DID_MEDIUM_ERROR << 16);
187 break;
188 case SISL_AFU_RC_DATA_DMA_ERR:
189 switch (ioasa->afu_extra) {
190 case SISL_AFU_DMA_ERR_PAGE_IN:
191 /* Retry */
192 scp->result = (DID_IMM_RETRY << 16);
193 break;
194 case SISL_AFU_DMA_ERR_INVALID_EA:
195 default:
196 scp->result = (DID_ERROR << 16);
197 }
198 break;
199 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
200 /* Retry */
201 scp->result = (DID_ALLOC_FAILURE << 16);
202 break;
203 default:
204 scp->result = (DID_ERROR << 16);
205 }
206 }
207}
208
209/**
210 * cmd_complete() - command completion handler
211 * @cmd: AFU command that has completed.
212 *
213 * Prepares and submits command that has either completed or timed out to
214 * the SCSI stack. Checks AFU command back into command pool for non-internal
215 * (rcb.scp populated) commands.
216 */
217static void cmd_complete(struct afu_cmd *cmd)
218{
219 struct scsi_cmnd *scp;
220 u32 resid;
221 ulong lock_flags;
222 struct afu *afu = cmd->parent;
223 struct cxlflash_cfg *cfg = afu->parent;
224 bool cmd_is_tmf;
225
226 spin_lock_irqsave(&cmd->slock, lock_flags);
227 cmd->sa.host_use_b[0] |= B_DONE;
228 spin_unlock_irqrestore(&cmd->slock, lock_flags);
229
230 if (cmd->rcb.scp) {
231 scp = cmd->rcb.scp;
232 if (unlikely(cmd->sa.rc.afu_rc ||
233 cmd->sa.rc.scsi_rc ||
234 cmd->sa.rc.fc_rc))
235 process_cmd_err(cmd, scp);
236 else
237 scp->result = (DID_OK << 16);
238
239 resid = cmd->sa.resid;
240 cmd_is_tmf = cmd->cmd_tmf;
Matthew R. Ochs15305512015-10-21 15:12:10 -0500241 cmd_checkin(cmd); /* Don't use cmd after here */
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500242
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500243 pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
244 "ioasc=%d\n", __func__, scp, scp->result,
245 cmd->sa.ioasc);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500246
247 scsi_set_resid(scp, resid);
248 scsi_dma_unmap(scp);
249 scp->scsi_done(scp);
250
251 if (cmd_is_tmf) {
252 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
253 cfg->tmf_active = false;
254 wake_up_all_locked(&cfg->tmf_waitq);
255 spin_unlock_irqrestore(&cfg->tmf_waitq.lock,
256 lock_flags);
257 }
258 } else
259 complete(&cmd->cevent);
260}
261
262/**
Matthew R. Ochs15305512015-10-21 15:12:10 -0500263 * context_reset() - timeout handler for AFU commands
264 * @cmd: AFU command that timed out.
265 *
266 * Sends a reset to the AFU.
267 */
268static void context_reset(struct afu_cmd *cmd)
269{
270 int nretry = 0;
271 u64 rrin = 0x1;
272 u64 room = 0;
273 struct afu *afu = cmd->parent;
274 ulong lock_flags;
275
276 pr_debug("%s: cmd=%p\n", __func__, cmd);
277
278 spin_lock_irqsave(&cmd->slock, lock_flags);
279
280 /* Already completed? */
281 if (cmd->sa.host_use_b[0] & B_DONE) {
282 spin_unlock_irqrestore(&cmd->slock, lock_flags);
283 return;
284 }
285
286 cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
287 spin_unlock_irqrestore(&cmd->slock, lock_flags);
288
289 /*
290 * We really want to send this reset at all costs, so spread
291 * out wait time on successive retries for available room.
292 */
293 do {
294 room = readq_be(&afu->host_map->cmd_room);
295 atomic64_set(&afu->room, room);
296 if (room)
297 goto write_rrin;
298 udelay(nretry);
299 } while (nretry++ < MC_ROOM_RETRY_CNT);
300
301 pr_err("%s: no cmd_room to send reset\n", __func__);
302 return;
303
304write_rrin:
305 nretry = 0;
306 writeq_be(rrin, &afu->host_map->ioarrin);
307 do {
308 rrin = readq_be(&afu->host_map->ioarrin);
309 if (rrin != 0x1)
310 break;
311 /* Double delay each time */
312 udelay(2 ^ nretry);
313 } while (nretry++ < MC_ROOM_RETRY_CNT);
314}
315
316/**
317 * send_cmd() - sends an AFU command
318 * @afu: AFU associated with the host.
319 * @cmd: AFU command to send.
320 *
321 * Return:
322 * 0 on success or SCSI_MLQUEUE_HOST_BUSY
323 */
324static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
325{
326 struct cxlflash_cfg *cfg = afu->parent;
327 struct device *dev = &cfg->dev->dev;
328 int nretry = 0;
329 int rc = 0;
330 u64 room;
331 long newval;
332
333 /*
334 * This routine is used by critical users such an AFU sync and to
335 * send a task management function (TMF). Thus we want to retry a
336 * bit before returning an error. To avoid the performance penalty
337 * of MMIO, we spread the update of 'room' over multiple commands.
338 */
339retry:
340 newval = atomic64_dec_if_positive(&afu->room);
341 if (!newval) {
342 do {
343 room = readq_be(&afu->host_map->cmd_room);
344 atomic64_set(&afu->room, room);
345 if (room)
346 goto write_ioarrin;
347 udelay(nretry);
348 } while (nretry++ < MC_ROOM_RETRY_CNT);
349
350 dev_err(dev, "%s: no cmd_room to send 0x%X\n",
351 __func__, cmd->rcb.cdb[0]);
352
353 goto no_room;
354 } else if (unlikely(newval < 0)) {
355 /* This should be rare. i.e. Only if two threads race and
356 * decrement before the MMIO read is done. In this case
357 * just benefit from the other thread having updated
358 * afu->room.
359 */
360 if (nretry++ < MC_ROOM_RETRY_CNT) {
361 udelay(nretry);
362 goto retry;
363 }
364
365 goto no_room;
366 }
367
368write_ioarrin:
369 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
370out:
371 pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
372 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
373 return rc;
374
375no_room:
376 afu->read_room = true;
377 schedule_work(&cfg->work_q);
378 rc = SCSI_MLQUEUE_HOST_BUSY;
379 goto out;
380}
381
382/**
383 * wait_resp() - polls for a response or timeout to a sent AFU command
384 * @afu: AFU associated with the host.
385 * @cmd: AFU command that was sent.
386 */
387static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
388{
389 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
390
391 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
392 if (!timeout)
393 context_reset(cmd);
394
395 if (unlikely(cmd->sa.ioasc != 0))
396 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
397 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
398 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
399 cmd->sa.rc.fc_rc);
400}
401
402/**
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500403 * send_tmf() - sends a Task Management Function (TMF)
404 * @afu: AFU to checkout from.
405 * @scp: SCSI command from stack.
406 * @tmfcmd: TMF command to send.
407 *
408 * Return:
409 * 0 on success
410 * SCSI_MLQUEUE_HOST_BUSY when host is busy
411 */
412static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
413{
414 struct afu_cmd *cmd;
415
416 u32 port_sel = scp->device->channel + 1;
417 short lflag = 0;
418 struct Scsi_Host *host = scp->device->host;
419 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500420 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500421 ulong lock_flags;
422 int rc = 0;
423
Matthew R. Ochs15305512015-10-21 15:12:10 -0500424 cmd = cmd_checkout(afu);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500425 if (unlikely(!cmd)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500426 dev_err(dev, "%s: could not get a free command\n", __func__);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500427 rc = SCSI_MLQUEUE_HOST_BUSY;
428 goto out;
429 }
430
431 /* If a Task Management Function is active, do not send one more.
432 */
433 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
434 if (cfg->tmf_active)
435 wait_event_interruptible_locked_irq(cfg->tmf_waitq,
436 !cfg->tmf_active);
437 cfg->tmf_active = true;
438 cmd->cmd_tmf = true;
439 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
440
441 cmd->rcb.ctx_id = afu->ctx_hndl;
442 cmd->rcb.port_sel = port_sel;
443 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
444
445 lflag = SISL_REQ_FLAGS_TMF_CMD;
446
447 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
448 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
449
450 /* Stash the scp in the reserved field, for reuse during interrupt */
451 cmd->rcb.scp = scp;
452
453 /* Copy the CDB from the cmd passed in */
454 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
455
456 /* Send the command */
Matthew R. Ochs15305512015-10-21 15:12:10 -0500457 rc = send_cmd(afu, cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500458 if (unlikely(rc)) {
Matthew R. Ochs15305512015-10-21 15:12:10 -0500459 cmd_checkin(cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500460 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
461 cfg->tmf_active = false;
462 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
463 goto out;
464 }
465
466 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
467 wait_event_interruptible_locked_irq(cfg->tmf_waitq, !cfg->tmf_active);
468 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
469out:
470 return rc;
471}
472
473/**
474 * cxlflash_driver_info() - information handler for this host driver
475 * @host: SCSI host associated with device.
476 *
477 * Return: A string describing the device.
478 */
479static const char *cxlflash_driver_info(struct Scsi_Host *host)
480{
481 return CXLFLASH_ADAPTER_NAME;
482}
483
484/**
485 * cxlflash_queuecommand() - sends a mid-layer request
486 * @host: SCSI host associated with device.
487 * @scp: SCSI command to send.
488 *
489 * Return:
490 * 0 on success
491 * SCSI_MLQUEUE_HOST_BUSY when host is busy
492 */
493static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
494{
495 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
496 struct afu *afu = cfg->afu;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500497 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500498 struct afu_cmd *cmd;
499 u32 port_sel = scp->device->channel + 1;
500 int nseg, i, ncount;
501 struct scatterlist *sg;
502 ulong lock_flags;
503 short lflag = 0;
504 int rc = 0;
505
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500506 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
507 "cdb=(%08X-%08X-%08X-%08X)\n",
508 __func__, scp, host->host_no, scp->device->channel,
509 scp->device->id, scp->device->lun,
510 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
511 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
512 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
513 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500514
515 /* If a Task Management Function is active, wait for it to complete
516 * before continuing with regular commands.
517 */
518 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
519 if (cfg->tmf_active) {
520 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
521 rc = SCSI_MLQUEUE_HOST_BUSY;
522 goto out;
523 }
524 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
525
Matthew R. Ochs5cdac812015-08-13 21:47:34 -0500526 switch (cfg->state) {
Matthew R. Ochs439e85c2015-10-21 15:12:00 -0500527 case STATE_RESET:
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500528 dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -0500529 rc = SCSI_MLQUEUE_HOST_BUSY;
530 goto out;
531 case STATE_FAILTERM:
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500532 dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -0500533 scp->result = (DID_NO_CONNECT << 16);
534 scp->scsi_done(scp);
535 rc = 0;
536 goto out;
537 default:
538 break;
539 }
540
Matthew R. Ochs15305512015-10-21 15:12:10 -0500541 cmd = cmd_checkout(afu);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500542 if (unlikely(!cmd)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500543 dev_err(dev, "%s: could not get a free command\n", __func__);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500544 rc = SCSI_MLQUEUE_HOST_BUSY;
545 goto out;
546 }
547
548 cmd->rcb.ctx_id = afu->ctx_hndl;
549 cmd->rcb.port_sel = port_sel;
550 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
551
552 if (scp->sc_data_direction == DMA_TO_DEVICE)
553 lflag = SISL_REQ_FLAGS_HOST_WRITE;
554 else
555 lflag = SISL_REQ_FLAGS_HOST_READ;
556
557 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
558 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
559
560 /* Stash the scp in the reserved field, for reuse during interrupt */
561 cmd->rcb.scp = scp;
562
563 nseg = scsi_dma_map(scp);
564 if (unlikely(nseg < 0)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500565 dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500566 __func__, nseg);
567 rc = SCSI_MLQUEUE_HOST_BUSY;
568 goto out;
569 }
570
571 ncount = scsi_sg_count(scp);
572 scsi_for_each_sg(scp, sg, ncount, i) {
573 cmd->rcb.data_len = sg_dma_len(sg);
574 cmd->rcb.data_ea = sg_dma_address(sg);
575 }
576
577 /* Copy the CDB from the scsi_cmnd passed in */
578 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
579
580 /* Send the command */
Matthew R. Ochs15305512015-10-21 15:12:10 -0500581 rc = send_cmd(afu, cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500582 if (unlikely(rc)) {
Matthew R. Ochs15305512015-10-21 15:12:10 -0500583 cmd_checkin(cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500584 scsi_dma_unmap(scp);
585 }
586
587out:
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500588 pr_devel("%s: returning rc=%d\n", __func__, rc);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500589 return rc;
590}
591
592/**
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500593 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
594 * @cxlflash: Internal structure associated with the host.
595 */
596static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
597{
598 struct pci_dev *pdev = cfg->dev;
599
600 if (pci_channel_offline(pdev))
Matthew R. Ochs439e85c2015-10-21 15:12:00 -0500601 wait_event_timeout(cfg->reset_waitq,
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500602 !pci_channel_offline(pdev),
603 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
604}
605
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500606/**
607 * free_mem() - free memory associated with the AFU
608 * @cxlflash: Internal structure associated with the host.
609 */
610static void free_mem(struct cxlflash_cfg *cfg)
611{
612 int i;
613 char *buf = NULL;
614 struct afu *afu = cfg->afu;
615
616 if (cfg->afu) {
617 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
618 buf = afu->cmd[i].buf;
619 if (!((u64)buf & (PAGE_SIZE - 1)))
620 free_page((ulong)buf);
621 }
622
623 free_pages((ulong)afu, get_order(sizeof(struct afu)));
624 cfg->afu = NULL;
625 }
626}
627
628/**
629 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
630 * @cxlflash: Internal structure associated with the host.
631 *
632 * Safe to call with AFU in a partially allocated/initialized state.
633 */
634static void stop_afu(struct cxlflash_cfg *cfg)
635{
636 int i;
637 struct afu *afu = cfg->afu;
638
639 if (likely(afu)) {
640 for (i = 0; i < CXLFLASH_NUM_CMDS; i++)
641 complete(&afu->cmd[i].cevent);
642
643 if (likely(afu->afu_map)) {
644 cxl_psa_unmap((void *)afu->afu_map);
645 afu->afu_map = NULL;
646 }
647 }
648}
649
650/**
651 * term_mc() - terminates the master context
652 * @cxlflash: Internal structure associated with the host.
653 * @level: Depth of allocation, where to begin waterfall tear down.
654 *
655 * Safe to call with AFU/MC in partially allocated/initialized state.
656 */
657static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
658{
659 int rc = 0;
660 struct afu *afu = cfg->afu;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500661 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500662
663 if (!afu || !cfg->mcctx) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500664 dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500665 __func__);
666 return;
667 }
668
669 switch (level) {
670 case UNDO_START:
671 rc = cxl_stop_context(cfg->mcctx);
672 BUG_ON(rc);
673 case UNMAP_THREE:
674 cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
675 case UNMAP_TWO:
676 cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
677 case UNMAP_ONE:
678 cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
679 case FREE_IRQ:
680 cxl_free_afu_irqs(cfg->mcctx);
681 case RELEASE_CONTEXT:
682 cfg->mcctx = NULL;
683 }
684}
685
686/**
687 * term_afu() - terminates the AFU
688 * @cxlflash: Internal structure associated with the host.
689 *
690 * Safe to call with AFU/MC in partially allocated/initialized state.
691 */
692static void term_afu(struct cxlflash_cfg *cfg)
693{
694 term_mc(cfg, UNDO_START);
695
696 if (cfg->afu)
697 stop_afu(cfg);
698
699 pr_debug("%s: returning\n", __func__);
700}
701
702/**
703 * cxlflash_remove() - PCI entry point to tear down host
704 * @pdev: PCI device associated with the host.
705 *
706 * Safe to use as a cleanup in partially allocated/initialized state.
707 */
708static void cxlflash_remove(struct pci_dev *pdev)
709{
710 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
711 ulong lock_flags;
712
713 /* If a Task Management Function is active, wait for it to complete
714 * before continuing with remove.
715 */
716 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
717 if (cfg->tmf_active)
718 wait_event_interruptible_locked_irq(cfg->tmf_waitq,
719 !cfg->tmf_active);
720 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
721
Matthew R. Ochs5cdac812015-08-13 21:47:34 -0500722 cfg->state = STATE_FAILTERM;
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500723 cxlflash_stop_term_user_contexts(cfg);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -0500724
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500725 switch (cfg->init_state) {
726 case INIT_STATE_SCSI:
Matthew R. Ochs65be2c72015-08-13 21:47:43 -0500727 cxlflash_term_local_luns(cfg);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500728 scsi_remove_host(cfg->host);
729 scsi_host_put(cfg->host);
730 /* Fall through */
731 case INIT_STATE_AFU:
732 term_afu(cfg);
733 case INIT_STATE_PCI:
734 pci_release_regions(cfg->dev);
735 pci_disable_device(pdev);
736 case INIT_STATE_NONE:
737 flush_work(&cfg->work_q);
738 free_mem(cfg);
739 break;
740 }
741
742 pr_debug("%s: returning\n", __func__);
743}
744
745/**
746 * alloc_mem() - allocates the AFU and its command pool
747 * @cxlflash: Internal structure associated with the host.
748 *
749 * A partially allocated state remains on failure.
750 *
751 * Return:
752 * 0 on success
753 * -ENOMEM on failure to allocate memory
754 */
755static int alloc_mem(struct cxlflash_cfg *cfg)
756{
757 int rc = 0;
758 int i;
759 char *buf = NULL;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500760 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500761
762 /* This allocation is about 12K, i.e. only 1 64k page
763 * and upto 4 4k pages
764 */
765 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
766 get_order(sizeof(struct afu)));
767 if (unlikely(!cfg->afu)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500768 dev_err(dev, "%s: cannot get %d free pages\n",
769 __func__, get_order(sizeof(struct afu)));
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500770 rc = -ENOMEM;
771 goto out;
772 }
773 cfg->afu->parent = cfg;
774 cfg->afu->afu_map = NULL;
775
776 for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
777 if (!((u64)buf & (PAGE_SIZE - 1))) {
778 buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
779 if (unlikely(!buf)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -0500780 dev_err(dev,
781 "%s: Allocate command buffers fail!\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -0500782 __func__);
783 rc = -ENOMEM;
784 free_mem(cfg);
785 goto out;
786 }
787 }
788
789 cfg->afu->cmd[i].buf = buf;
790 atomic_set(&cfg->afu->cmd[i].free, 1);
791 cfg->afu->cmd[i].slot = i;
792 }
793
794out:
795 return rc;
796}
797
798/**
799 * init_pci() - initializes the host as a PCI device
800 * @cxlflash: Internal structure associated with the host.
801 *
802 * Return:
803 * 0 on success
804 * -EIO on unable to communicate with device
805 * A return code from the PCI sub-routines
806 */
807static int init_pci(struct cxlflash_cfg *cfg)
808{
809 struct pci_dev *pdev = cfg->dev;
810 int rc = 0;
811
812 cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0);
813 rc = pci_request_regions(pdev, CXLFLASH_NAME);
814 if (rc < 0) {
815 dev_err(&pdev->dev,
816 "%s: Couldn't register memory range of registers\n",
817 __func__);
818 goto out;
819 }
820
821 rc = pci_enable_device(pdev);
822 if (rc || pci_channel_offline(pdev)) {
823 if (pci_channel_offline(pdev)) {
824 cxlflash_wait_for_pci_err_recovery(cfg);
825 rc = pci_enable_device(pdev);
826 }
827
828 if (rc) {
829 dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
830 __func__);
831 cxlflash_wait_for_pci_err_recovery(cfg);
832 goto out_release_regions;
833 }
834 }
835
836 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
837 if (rc < 0) {
838 dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n",
839 __func__);
840 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
841 }
842
843 if (rc < 0) {
844 dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n",
845 __func__);
846 goto out_disable;
847 }
848
849 pci_set_master(pdev);
850
851 if (pci_channel_offline(pdev)) {
852 cxlflash_wait_for_pci_err_recovery(cfg);
853 if (pci_channel_offline(pdev)) {
854 rc = -EIO;
855 goto out_msi_disable;
856 }
857 }
858
859 rc = pci_save_state(pdev);
860
861 if (rc != PCIBIOS_SUCCESSFUL) {
862 dev_err(&pdev->dev, "%s: Failed to save PCI config space\n",
863 __func__);
864 rc = -EIO;
865 goto cleanup_nolog;
866 }
867
868out:
869 pr_debug("%s: returning rc=%d\n", __func__, rc);
870 return rc;
871
872cleanup_nolog:
873out_msi_disable:
874 cxlflash_wait_for_pci_err_recovery(cfg);
875out_disable:
876 pci_disable_device(pdev);
877out_release_regions:
878 pci_release_regions(pdev);
879 goto out;
880
881}
882
883/**
884 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
885 * @cxlflash: Internal structure associated with the host.
886 *
887 * Return:
888 * 0 on success
889 * A return code from adding the host
890 */
891static int init_scsi(struct cxlflash_cfg *cfg)
892{
893 struct pci_dev *pdev = cfg->dev;
894 int rc = 0;
895
896 rc = scsi_add_host(cfg->host, &pdev->dev);
897 if (rc) {
898 dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
899 __func__, rc);
900 goto out;
901 }
902
903 scsi_scan_host(cfg->host);
904
905out:
906 pr_debug("%s: returning rc=%d\n", __func__, rc);
907 return rc;
908}
909
910/**
911 * set_port_online() - transitions the specified host FC port to online state
912 * @fc_regs: Top of MMIO region defined for specified port.
913 *
914 * The provided MMIO region must be mapped prior to call. Online state means
915 * that the FC link layer has synced, completed the handshaking process, and
916 * is ready for login to start.
917 */
918static void set_port_online(u64 *fc_regs)
919{
920 u64 cmdcfg;
921
922 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
923 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
924 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
925 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
926}
927
928/**
929 * set_port_offline() - transitions the specified host FC port to offline state
930 * @fc_regs: Top of MMIO region defined for specified port.
931 *
932 * The provided MMIO region must be mapped prior to call.
933 */
934static void set_port_offline(u64 *fc_regs)
935{
936 u64 cmdcfg;
937
938 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
939 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
940 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
941 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
942}
943
944/**
945 * wait_port_online() - waits for the specified host FC port come online
946 * @fc_regs: Top of MMIO region defined for specified port.
947 * @delay_us: Number of microseconds to delay between reading port status.
948 * @nretry: Number of cycles to retry reading port status.
949 *
950 * The provided MMIO region must be mapped prior to call. This will timeout
951 * when the cable is not plugged in.
952 *
953 * Return:
954 * TRUE (1) when the specified port is online
955 * FALSE (0) when the specified port fails to come online after timeout
956 * -EINVAL when @delay_us is less than 1000
957 */
958static int wait_port_online(u64 *fc_regs, u32 delay_us, u32 nretry)
959{
960 u64 status;
961
962 if (delay_us < 1000) {
963 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
964 return -EINVAL;
965 }
966
967 do {
968 msleep(delay_us / 1000);
969 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
970 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
971 nretry--);
972
973 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
974}
975
976/**
977 * wait_port_offline() - waits for the specified host FC port go offline
978 * @fc_regs: Top of MMIO region defined for specified port.
979 * @delay_us: Number of microseconds to delay between reading port status.
980 * @nretry: Number of cycles to retry reading port status.
981 *
982 * The provided MMIO region must be mapped prior to call.
983 *
984 * Return:
985 * TRUE (1) when the specified port is offline
986 * FALSE (0) when the specified port fails to go offline after timeout
987 * -EINVAL when @delay_us is less than 1000
988 */
989static int wait_port_offline(u64 *fc_regs, u32 delay_us, u32 nretry)
990{
991 u64 status;
992
993 if (delay_us < 1000) {
994 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
995 return -EINVAL;
996 }
997
998 do {
999 msleep(delay_us / 1000);
1000 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1001 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1002 nretry--);
1003
1004 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1005}
1006
1007/**
1008 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1009 * @afu: AFU associated with the host that owns the specified FC port.
1010 * @port: Port number being configured.
1011 * @fc_regs: Top of MMIO region defined for specified port.
1012 * @wwpn: The world-wide-port-number previously discovered for port.
1013 *
1014 * The provided MMIO region must be mapped prior to call. As part of the
1015 * sequence to configure the WWPN, the port is toggled offline and then back
1016 * online. This toggling action can cause this routine to delay up to a few
1017 * seconds. When configured to use the internal LUN feature of the AFU, a
1018 * failure to come online is overridden.
1019 *
1020 * Return:
1021 * 0 when the WWPN is successfully written and the port comes back online
1022 * -1 when the port fails to go offline or come back up online
1023 */
1024static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn)
1025{
1026 int ret = 0;
1027
1028 set_port_offline(fc_regs);
1029
1030 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1031 FC_PORT_STATUS_RETRY_CNT)) {
1032 pr_debug("%s: wait on port %d to go offline timed out\n",
1033 __func__, port);
1034 ret = -1; /* but continue on to leave the port back online */
1035 }
1036
1037 if (ret == 0)
1038 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1039
1040 set_port_online(fc_regs);
1041
1042 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1043 FC_PORT_STATUS_RETRY_CNT)) {
1044 pr_debug("%s: wait on port %d to go online timed out\n",
1045 __func__, port);
1046 ret = -1;
1047
1048 /*
1049 * Override for internal lun!!!
1050 */
1051 if (afu->internal_lun) {
1052 pr_debug("%s: Overriding port %d online timeout!!!\n",
1053 __func__, port);
1054 ret = 0;
1055 }
1056 }
1057
1058 pr_debug("%s: returning rc=%d\n", __func__, ret);
1059
1060 return ret;
1061}
1062
1063/**
1064 * afu_link_reset() - resets the specified host FC port
1065 * @afu: AFU associated with the host that owns the specified FC port.
1066 * @port: Port number being configured.
1067 * @fc_regs: Top of MMIO region defined for specified port.
1068 *
1069 * The provided MMIO region must be mapped prior to call. The sequence to
1070 * reset the port involves toggling it offline and then back online. This
1071 * action can cause this routine to delay up to a few seconds. An effort
1072 * is made to maintain link with the device by switching to host to use
1073 * the alternate port exclusively while the reset takes place.
1074 * failure to come online is overridden.
1075 */
1076static void afu_link_reset(struct afu *afu, int port, u64 *fc_regs)
1077{
1078 u64 port_sel;
1079
1080 /* first switch the AFU to the other links, if any */
1081 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
Dan Carpenter4da74db2015-08-18 11:57:43 +03001082 port_sel &= ~(1ULL << port);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001083 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1084 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1085
1086 set_port_offline(fc_regs);
1087 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1088 FC_PORT_STATUS_RETRY_CNT))
1089 pr_err("%s: wait on port %d to go offline timed out\n",
1090 __func__, port);
1091
1092 set_port_online(fc_regs);
1093 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1094 FC_PORT_STATUS_RETRY_CNT))
1095 pr_err("%s: wait on port %d to go online timed out\n",
1096 __func__, port);
1097
1098 /* switch back to include this port */
Dan Carpenter4da74db2015-08-18 11:57:43 +03001099 port_sel |= (1ULL << port);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001100 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1101 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1102
1103 pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
1104}
1105
1106/*
1107 * Asynchronous interrupt information table
1108 */
1109static const struct asyc_intr_info ainfo[] = {
1110 {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
1111 {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
1112 {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
1113 {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, 0},
1114 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
1115 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, 0},
1116 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
1117 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
1118 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
1119 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
1120 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
1121 {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
1122 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
1123 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, 0},
1124 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
1125 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
1126 {0x0, "", 0, 0} /* terminator */
1127};
1128
1129/**
1130 * find_ainfo() - locates and returns asynchronous interrupt information
1131 * @status: Status code set by AFU on error.
1132 *
1133 * Return: The located information or NULL when the status code is invalid.
1134 */
1135static const struct asyc_intr_info *find_ainfo(u64 status)
1136{
1137 const struct asyc_intr_info *info;
1138
1139 for (info = &ainfo[0]; info->status; info++)
1140 if (info->status == status)
1141 return info;
1142
1143 return NULL;
1144}
1145
1146/**
1147 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1148 * @afu: AFU associated with the host.
1149 */
1150static void afu_err_intr_init(struct afu *afu)
1151{
1152 int i;
1153 u64 reg;
1154
1155 /* global async interrupts: AFU clears afu_ctrl on context exit
1156 * if async interrupts were sent to that context. This prevents
1157 * the AFU form sending further async interrupts when
1158 * there is
1159 * nobody to receive them.
1160 */
1161
1162 /* mask all */
1163 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1164 /* set LISN# to send and point to master context */
1165 reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1166
1167 if (afu->internal_lun)
1168 reg |= 1; /* Bit 63 indicates local lun */
1169 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1170 /* clear all */
1171 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1172 /* unmask bits that are of interest */
1173 /* note: afu can send an interrupt after this step */
1174 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1175 /* clear again in case a bit came on after previous clear but before */
1176 /* unmask */
1177 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1178
1179 /* Clear/Set internal lun bits */
1180 reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1181 reg &= SISL_FC_INTERNAL_MASK;
1182 if (afu->internal_lun)
1183 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1184 writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1185
1186 /* now clear FC errors */
1187 for (i = 0; i < NUM_FC_PORTS; i++) {
1188 writeq_be(0xFFFFFFFFU,
1189 &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
1190 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
1191 }
1192
1193 /* sync interrupts for master's IOARRIN write */
1194 /* note that unlike asyncs, there can be no pending sync interrupts */
1195 /* at this time (this is a fresh context and master has not written */
1196 /* IOARRIN yet), so there is nothing to clear. */
1197
1198 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1199 writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1200 writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1201}
1202
1203/**
1204 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1205 * @irq: Interrupt number.
1206 * @data: Private data provided at interrupt registration, the AFU.
1207 *
1208 * Return: Always return IRQ_HANDLED.
1209 */
1210static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1211{
1212 struct afu *afu = (struct afu *)data;
1213 u64 reg;
1214 u64 reg_unmasked;
1215
1216 reg = readq_be(&afu->host_map->intr_status);
1217 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1218
1219 if (reg_unmasked == 0UL) {
1220 pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
1221 __func__, (u64)afu, reg);
1222 goto cxlflash_sync_err_irq_exit;
1223 }
1224
1225 pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
1226 __func__, (u64)afu, reg);
1227
1228 writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1229
1230cxlflash_sync_err_irq_exit:
1231 pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
1232 return IRQ_HANDLED;
1233}
1234
1235/**
1236 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1237 * @irq: Interrupt number.
1238 * @data: Private data provided at interrupt registration, the AFU.
1239 *
1240 * Return: Always return IRQ_HANDLED.
1241 */
1242static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1243{
1244 struct afu *afu = (struct afu *)data;
1245 struct afu_cmd *cmd;
1246 bool toggle = afu->toggle;
1247 u64 entry,
1248 *hrrq_start = afu->hrrq_start,
1249 *hrrq_end = afu->hrrq_end,
1250 *hrrq_curr = afu->hrrq_curr;
1251
1252 /* Process however many RRQ entries that are ready */
1253 while (true) {
1254 entry = *hrrq_curr;
1255
1256 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1257 break;
1258
1259 cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
1260 cmd_complete(cmd);
1261
1262 /* Advance to next entry or wrap and flip the toggle bit */
1263 if (hrrq_curr < hrrq_end)
1264 hrrq_curr++;
1265 else {
1266 hrrq_curr = hrrq_start;
1267 toggle ^= SISL_RESP_HANDLE_T_BIT;
1268 }
1269 }
1270
1271 afu->hrrq_curr = hrrq_curr;
1272 afu->toggle = toggle;
1273
1274 return IRQ_HANDLED;
1275}
1276
1277/**
1278 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1279 * @irq: Interrupt number.
1280 * @data: Private data provided at interrupt registration, the AFU.
1281 *
1282 * Return: Always return IRQ_HANDLED.
1283 */
1284static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1285{
1286 struct afu *afu = (struct afu *)data;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001287 struct cxlflash_cfg *cfg = afu->parent;
1288 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001289 u64 reg_unmasked;
1290 const struct asyc_intr_info *info;
1291 struct sisl_global_map *global = &afu->afu_map->global;
1292 u64 reg;
1293 u8 port;
1294 int i;
1295
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001296 reg = readq_be(&global->regs.aintr_status);
1297 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1298
1299 if (reg_unmasked == 0) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001300 dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
1301 __func__, reg);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001302 goto out;
1303 }
1304
1305 /* it is OK to clear AFU status before FC_ERROR */
1306 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1307
1308 /* check each bit that is on */
1309 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1310 info = find_ainfo(1ULL << i);
1311 if ((reg_unmasked & 0x1) || !info)
1312 continue;
1313
1314 port = info->port;
1315
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001316 dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
1317 __func__, port, info->desc,
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001318 readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1319
1320 /*
1321 * do link reset first, some OTHER errors will set FC_ERROR
1322 * again if cleared before or w/o a reset
1323 */
1324 if (info->action & LINK_RESET) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001325 dev_err(dev, "%s: FC Port %d: resetting link\n",
1326 __func__, port);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001327 cfg->lr_state = LINK_RESET_REQUIRED;
1328 cfg->lr_port = port;
1329 schedule_work(&cfg->work_q);
1330 }
1331
1332 if (info->action & CLR_FC_ERROR) {
1333 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1334
1335 /*
1336 * since all errors are unmasked, FC_ERROR and FC_ERRCAP
1337 * should be the same and tracing one is sufficient.
1338 */
1339
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001340 dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
1341 __func__, port, reg);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001342
1343 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
1344 writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
1345 }
1346 }
1347
1348out:
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001349 dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001350 return IRQ_HANDLED;
1351}
1352
1353/**
1354 * start_context() - starts the master context
1355 * @cxlflash: Internal structure associated with the host.
1356 *
1357 * Return: A success or failure value from CXL services.
1358 */
1359static int start_context(struct cxlflash_cfg *cfg)
1360{
1361 int rc = 0;
1362
1363 rc = cxl_start_context(cfg->mcctx,
1364 cfg->afu->work.work_element_descriptor,
1365 NULL);
1366
1367 pr_debug("%s: returning rc=%d\n", __func__, rc);
1368 return rc;
1369}
1370
1371/**
1372 * read_vpd() - obtains the WWPNs from VPD
1373 * @cxlflash: Internal structure associated with the host.
1374 * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
1375 *
1376 * Return:
1377 * 0 on success
1378 * -ENODEV when VPD or WWPN keywords not found
1379 */
1380static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1381{
1382 struct pci_dev *dev = cfg->parent_dev;
1383 int rc = 0;
1384 int ro_start, ro_size, i, j, k;
1385 ssize_t vpd_size;
1386 char vpd_data[CXLFLASH_VPD_LEN];
1387 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1388 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1389
1390 /* Get the VPD data from the device */
1391 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
1392 if (unlikely(vpd_size <= 0)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001393 dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001394 __func__, vpd_size);
1395 rc = -ENODEV;
1396 goto out;
1397 }
1398
1399 /* Get the read only section offset */
1400 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1401 PCI_VPD_LRDT_RO_DATA);
1402 if (unlikely(ro_start < 0)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001403 dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
1404 __func__);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001405 rc = -ENODEV;
1406 goto out;
1407 }
1408
1409 /* Get the read only section size, cap when extends beyond read VPD */
1410 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1411 j = ro_size;
1412 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1413 if (unlikely((i + j) > vpd_size)) {
1414 pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
1415 __func__, (i + j), vpd_size);
1416 ro_size = vpd_size - i;
1417 }
1418
1419 /*
1420 * Find the offset of the WWPN tag within the read only
1421 * VPD data and validate the found field (partials are
1422 * no good to us). Convert the ASCII data to an integer
1423 * value. Note that we must copy to a temporary buffer
1424 * because the conversion service requires that the ASCII
1425 * string be terminated.
1426 */
1427 for (k = 0; k < NUM_FC_PORTS; k++) {
1428 j = ro_size;
1429 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1430
1431 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1432 if (unlikely(i < 0)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001433 dev_err(&dev->dev, "%s: Port %d WWPN not found "
1434 "in VPD\n", __func__, k);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001435 rc = -ENODEV;
1436 goto out;
1437 }
1438
1439 j = pci_vpd_info_field_size(&vpd_data[i]);
1440 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1441 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001442 dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
1443 "VPD corrupt\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001444 __func__, k);
1445 rc = -ENODEV;
1446 goto out;
1447 }
1448
1449 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1450 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1451 if (unlikely(rc)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001452 dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
1453 "to integer\n", __func__, k);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001454 rc = -ENODEV;
1455 goto out;
1456 }
1457 }
1458
1459out:
1460 pr_debug("%s: returning rc=%d\n", __func__, rc);
1461 return rc;
1462}
1463
1464/**
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001465 * init_pcr() - initialize the provisioning and control registers
1466 * @cxlflash: Internal structure associated with the host.
1467 *
1468 * Also sets up fast access to the mapped registers and initializes AFU
1469 * command fields that never change.
1470 */
Matthew R. Ochs15305512015-10-21 15:12:10 -05001471static void init_pcr(struct cxlflash_cfg *cfg)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001472{
1473 struct afu *afu = cfg->afu;
1474 struct sisl_ctrl_map *ctrl_map;
1475 int i;
1476
1477 for (i = 0; i < MAX_CONTEXT; i++) {
1478 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1479 /* disrupt any clients that could be running */
1480 /* e. g. clients that survived a master restart */
1481 writeq_be(0, &ctrl_map->rht_start);
1482 writeq_be(0, &ctrl_map->rht_cnt_id);
1483 writeq_be(0, &ctrl_map->ctx_cap);
1484 }
1485
1486 /* copy frequently used fields into afu */
1487 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1488 /* ctx_hndl is 16 bits in CAIA */
1489 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1490 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1491
1492 /* Program the Endian Control for the master context */
1493 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1494
1495 /* initialize cmd fields that never change */
1496 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1497 afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
1498 afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
1499 afu->cmd[i].rcb.rrq = 0x0;
1500 }
1501}
1502
1503/**
1504 * init_global() - initialize AFU global registers
1505 * @cxlflash: Internal structure associated with the host.
1506 */
Matthew R. Ochs15305512015-10-21 15:12:10 -05001507static int init_global(struct cxlflash_cfg *cfg)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001508{
1509 struct afu *afu = cfg->afu;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001510 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001511 u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
1512 int i = 0, num_ports = 0;
1513 int rc = 0;
1514 u64 reg;
1515
1516 rc = read_vpd(cfg, &wwpn[0]);
1517 if (rc) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001518 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001519 goto out;
1520 }
1521
1522 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
1523
1524 /* set up RRQ in AFU for master issued cmds */
1525 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1526 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1527
1528 /* AFU configuration */
1529 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1530 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1531 /* enable all auto retry options and control endianness */
1532 /* leave others at default: */
1533 /* CTX_CAP write protected, mbox_r does not clear on read and */
1534 /* checker on if dual afu */
1535 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1536
1537 /* global port select: select either port */
1538 if (afu->internal_lun) {
1539 /* only use port 0 */
1540 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1541 num_ports = NUM_FC_PORTS - 1;
1542 } else {
1543 writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
1544 num_ports = NUM_FC_PORTS;
1545 }
1546
1547 for (i = 0; i < num_ports; i++) {
1548 /* unmask all errors (but they are still masked at AFU) */
1549 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1550 /* clear CRC error cnt & set a threshold */
1551 (void)readq_be(&afu->afu_map->global.
1552 fc_regs[i][FC_CNT_CRCERR / 8]);
1553 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1554 [FC_CRC_THRESH / 8]);
1555
1556 /* set WWPNs. If already programmed, wwpn[i] is 0 */
1557 if (wwpn[i] != 0 &&
1558 afu_set_wwpn(afu, i,
1559 &afu->afu_map->global.fc_regs[i][0],
1560 wwpn[i])) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001561 dev_err(dev, "%s: failed to set WWPN on port %d\n",
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001562 __func__, i);
1563 rc = -EIO;
1564 goto out;
1565 }
1566 /* Programming WWPN back to back causes additional
1567 * offline/online transitions and a PLOGI
1568 */
1569 msleep(100);
1570
1571 }
1572
1573 /* set up master's own CTX_CAP to allow real mode, host translation */
1574 /* tbls, afu cmds and read/write GSCSI cmds. */
1575 /* First, unlock ctx_cap write by reading mbox */
1576 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
1577 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1578 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1579 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1580 &afu->ctrl_map->ctx_cap);
1581 /* init heartbeat */
1582 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1583
1584out:
1585 return rc;
1586}
1587
1588/**
1589 * start_afu() - initializes and starts the AFU
1590 * @cxlflash: Internal structure associated with the host.
1591 */
1592static int start_afu(struct cxlflash_cfg *cfg)
1593{
1594 struct afu *afu = cfg->afu;
1595 struct afu_cmd *cmd;
1596
1597 int i = 0;
1598 int rc = 0;
1599
1600 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1601 cmd = &afu->cmd[i];
1602
1603 init_completion(&cmd->cevent);
1604 spin_lock_init(&cmd->slock);
1605 cmd->parent = afu;
1606 }
1607
1608 init_pcr(cfg);
1609
1610 /* initialize RRQ pointers */
1611 afu->hrrq_start = &afu->rrq_entry[0];
1612 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1613 afu->hrrq_curr = afu->hrrq_start;
1614 afu->toggle = 1;
1615
1616 rc = init_global(cfg);
1617
1618 pr_debug("%s: returning rc=%d\n", __func__, rc);
1619 return rc;
1620}
1621
1622/**
1623 * init_mc() - create and register as the master context
1624 * @cxlflash: Internal structure associated with the host.
1625 *
1626 * Return:
1627 * 0 on success
1628 * -ENOMEM when unable to obtain a context from CXL services
1629 * A failure value from CXL services.
1630 */
1631static int init_mc(struct cxlflash_cfg *cfg)
1632{
1633 struct cxl_context *ctx;
1634 struct device *dev = &cfg->dev->dev;
1635 struct afu *afu = cfg->afu;
1636 int rc = 0;
1637 enum undo_level level;
1638
1639 ctx = cxl_get_context(cfg->dev);
1640 if (unlikely(!ctx))
1641 return -ENOMEM;
1642 cfg->mcctx = ctx;
1643
1644 /* Set it up as a master with the CXL */
1645 cxl_set_master(ctx);
1646
1647 /* During initialization reset the AFU to start from a clean slate */
1648 rc = cxl_afu_reset(cfg->mcctx);
1649 if (unlikely(rc)) {
1650 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1651 __func__, rc);
1652 level = RELEASE_CONTEXT;
1653 goto out;
1654 }
1655
1656 rc = cxl_allocate_afu_irqs(ctx, 3);
1657 if (unlikely(rc)) {
1658 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1659 __func__, rc);
1660 level = RELEASE_CONTEXT;
1661 goto out;
1662 }
1663
1664 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1665 "SISL_MSI_SYNC_ERROR");
1666 if (unlikely(rc <= 0)) {
1667 dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
1668 __func__);
1669 level = FREE_IRQ;
1670 goto out;
1671 }
1672
1673 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1674 "SISL_MSI_RRQ_UPDATED");
1675 if (unlikely(rc <= 0)) {
1676 dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
1677 __func__);
1678 level = UNMAP_ONE;
1679 goto out;
1680 }
1681
1682 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1683 "SISL_MSI_ASYNC_ERROR");
1684 if (unlikely(rc <= 0)) {
1685 dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
1686 __func__);
1687 level = UNMAP_TWO;
1688 goto out;
1689 }
1690
1691 rc = 0;
1692
1693 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1694 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1695 * element (pe) that is embedded in the context (ctx)
1696 */
1697 rc = start_context(cfg);
1698 if (unlikely(rc)) {
1699 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1700 level = UNMAP_THREE;
1701 goto out;
1702 }
1703ret:
1704 pr_debug("%s: returning rc=%d\n", __func__, rc);
1705 return rc;
1706out:
1707 term_mc(cfg, level);
1708 goto ret;
1709}
1710
1711/**
1712 * init_afu() - setup as master context and start AFU
1713 * @cxlflash: Internal structure associated with the host.
1714 *
1715 * This routine is a higher level of control for configuring the
1716 * AFU on probe and reset paths.
1717 *
1718 * Return:
1719 * 0 on success
1720 * -ENOMEM when unable to map the AFU MMIO space
1721 * A failure value from internal services.
1722 */
1723static int init_afu(struct cxlflash_cfg *cfg)
1724{
1725 u64 reg;
1726 int rc = 0;
1727 struct afu *afu = cfg->afu;
1728 struct device *dev = &cfg->dev->dev;
1729
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05001730 cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1731
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001732 rc = init_mc(cfg);
1733 if (rc) {
1734 dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
1735 __func__, rc);
1736 goto err1;
1737 }
1738
1739 /* Map the entire MMIO space of the AFU.
1740 */
1741 afu->afu_map = cxl_psa_map(cfg->mcctx);
1742 if (!afu->afu_map) {
1743 rc = -ENOMEM;
1744 term_mc(cfg, UNDO_START);
1745 dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
1746 goto err1;
1747 }
1748
1749 /* don't byte reverse on reading afu_version, else the string form */
1750 /* will be backwards */
1751 reg = afu->afu_map->global.regs.afu_version;
1752 memcpy(afu->version, &reg, 8);
1753 afu->interface_version =
1754 readq_be(&afu->afu_map->global.regs.interface_version);
1755 pr_debug("%s: afu version %s, interface version 0x%llX\n",
1756 __func__, afu->version, afu->interface_version);
1757
1758 rc = start_afu(cfg);
1759 if (rc) {
1760 dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
1761 __func__, rc);
1762 term_mc(cfg, UNDO_START);
1763 cxl_psa_unmap((void *)afu->afu_map);
1764 afu->afu_map = NULL;
1765 goto err1;
1766 }
1767
1768 afu_err_intr_init(cfg->afu);
1769 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
1770
Matthew R. Ochs2cb79262015-08-13 21:47:53 -05001771 /* Restore the LUN mappings */
1772 cxlflash_restore_luntable(cfg);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001773err1:
1774 pr_debug("%s: returning rc=%d\n", __func__, rc);
1775 return rc;
1776}
1777
1778/**
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001779 * cxlflash_afu_sync() - builds and sends an AFU sync command
1780 * @afu: AFU associated with the host.
1781 * @ctx_hndl_u: Identifies context requesting sync.
1782 * @res_hndl_u: Identifies resource requesting sync.
1783 * @mode: Type of sync to issue (lightweight, heavyweight, global).
1784 *
1785 * The AFU can only take 1 sync command at a time. This routine enforces this
1786 * limitation by using a mutex to provide exlusive access to the AFU during
1787 * the sync. This design point requires calling threads to not be on interrupt
1788 * context due to the possibility of sleeping during concurrent sync operations.
1789 *
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05001790 * AFU sync operations are only necessary and allowed when the device is
1791 * operating normally. When not operating normally, sync requests can occur as
1792 * part of cleaning up resources associated with an adapter prior to removal.
1793 * In this scenario, these requests are simply ignored (safe due to the AFU
1794 * going away).
1795 *
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001796 * Return:
1797 * 0 on success
1798 * -1 on failure
1799 */
1800int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1801 res_hndl_t res_hndl_u, u8 mode)
1802{
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05001803 struct cxlflash_cfg *cfg = afu->parent;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001804 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001805 struct afu_cmd *cmd = NULL;
1806 int rc = 0;
1807 int retry_cnt = 0;
1808 static DEFINE_MUTEX(sync_active);
1809
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05001810 if (cfg->state != STATE_NORMAL) {
1811 pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
1812 return 0;
1813 }
1814
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001815 mutex_lock(&sync_active);
1816retry:
Matthew R. Ochs15305512015-10-21 15:12:10 -05001817 cmd = cmd_checkout(afu);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001818 if (unlikely(!cmd)) {
1819 retry_cnt++;
1820 udelay(1000 * retry_cnt);
1821 if (retry_cnt < MC_RETRY_CNT)
1822 goto retry;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05001823 dev_err(dev, "%s: could not get a free command\n", __func__);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001824 rc = -1;
1825 goto out;
1826 }
1827
1828 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1829
1830 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
1831
1832 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1833 cmd->rcb.port_sel = 0x0; /* NA */
1834 cmd->rcb.lun_id = 0x0; /* NA */
1835 cmd->rcb.data_len = 0x0;
1836 cmd->rcb.data_ea = 0x0;
1837 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
1838
1839 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
1840 cmd->rcb.cdb[1] = mode;
1841
1842 /* The cdb is aligned, no unaligned accessors required */
1843 *((u16 *)&cmd->rcb.cdb[2]) = swab16(ctx_hndl_u);
1844 *((u32 *)&cmd->rcb.cdb[4]) = swab32(res_hndl_u);
1845
Matthew R. Ochs15305512015-10-21 15:12:10 -05001846 rc = send_cmd(afu, cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001847 if (unlikely(rc))
1848 goto out;
1849
Matthew R. Ochs15305512015-10-21 15:12:10 -05001850 wait_resp(afu, cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001851
1852 /* set on timeout */
1853 if (unlikely((cmd->sa.ioasc != 0) ||
1854 (cmd->sa.host_use_b[0] & B_ERROR)))
1855 rc = -1;
1856out:
1857 mutex_unlock(&sync_active);
1858 if (cmd)
Matthew R. Ochs15305512015-10-21 15:12:10 -05001859 cmd_checkin(cmd);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001860 pr_debug("%s: returning rc=%d\n", __func__, rc);
1861 return rc;
1862}
1863
1864/**
Matthew R. Ochs15305512015-10-21 15:12:10 -05001865 * afu_reset() - resets the AFU
1866 * @cfg: Internal structure associated with the host.
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001867 *
1868 * Return:
1869 * 0 on success
1870 * A failure value from internal services.
1871 */
Matthew R. Ochs15305512015-10-21 15:12:10 -05001872static int afu_reset(struct cxlflash_cfg *cfg)
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05001873{
1874 int rc = 0;
1875 /* Stop the context before the reset. Since the context is
1876 * no longer available restart it after the reset is complete
1877 */
1878
1879 term_afu(cfg);
1880
1881 rc = init_afu(cfg);
1882
1883 pr_debug("%s: returning rc=%d\n", __func__, rc);
1884 return rc;
1885}
1886
1887/**
Matthew R. Ochs15305512015-10-21 15:12:10 -05001888 * cxlflash_eh_device_reset_handler() - reset a single LUN
1889 * @scp: SCSI command to send.
1890 *
1891 * Return:
1892 * SUCCESS as defined in scsi/scsi.h
1893 * FAILED as defined in scsi/scsi.h
1894 */
1895static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
1896{
1897 int rc = SUCCESS;
1898 struct Scsi_Host *host = scp->device->host;
1899 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1900 struct afu *afu = cfg->afu;
1901 int rcr = 0;
1902
1903 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1904 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1905 host->host_no, scp->device->channel,
1906 scp->device->id, scp->device->lun,
1907 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1908 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1909 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1910 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1911
1912 switch (cfg->state) {
1913 case STATE_NORMAL:
1914 rcr = send_tmf(afu, scp, TMF_LUN_RESET);
1915 if (unlikely(rcr))
1916 rc = FAILED;
1917 break;
1918 case STATE_RESET:
1919 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1920 if (cfg->state == STATE_NORMAL)
1921 break;
1922 /* fall through */
1923 default:
1924 rc = FAILED;
1925 break;
1926 }
1927
1928 pr_debug("%s: returning rc=%d\n", __func__, rc);
1929 return rc;
1930}
1931
1932/**
1933 * cxlflash_eh_host_reset_handler() - reset the host adapter
1934 * @scp: SCSI command from stack identifying host.
1935 *
1936 * Return:
1937 * SUCCESS as defined in scsi/scsi.h
1938 * FAILED as defined in scsi/scsi.h
1939 */
1940static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
1941{
1942 int rc = SUCCESS;
1943 int rcr = 0;
1944 struct Scsi_Host *host = scp->device->host;
1945 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1946
1947 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1948 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1949 host->host_no, scp->device->channel,
1950 scp->device->id, scp->device->lun,
1951 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1952 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1953 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1954 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1955
1956 switch (cfg->state) {
1957 case STATE_NORMAL:
1958 cfg->state = STATE_RESET;
1959 scsi_block_requests(cfg->host);
1960 cxlflash_mark_contexts_error(cfg);
1961 rcr = afu_reset(cfg);
1962 if (rcr) {
1963 rc = FAILED;
1964 cfg->state = STATE_FAILTERM;
1965 } else
1966 cfg->state = STATE_NORMAL;
1967 wake_up_all(&cfg->reset_waitq);
1968 scsi_unblock_requests(cfg->host);
1969 break;
1970 case STATE_RESET:
1971 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1972 if (cfg->state == STATE_NORMAL)
1973 break;
1974 /* fall through */
1975 default:
1976 rc = FAILED;
1977 break;
1978 }
1979
1980 pr_debug("%s: returning rc=%d\n", __func__, rc);
1981 return rc;
1982}
1983
1984/**
1985 * cxlflash_change_queue_depth() - change the queue depth for the device
1986 * @sdev: SCSI device destined for queue depth change.
1987 * @qdepth: Requested queue depth value to set.
1988 *
1989 * The requested queue depth is capped to the maximum supported value.
1990 *
1991 * Return: The actual queue depth set.
1992 */
1993static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
1994{
1995
1996 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
1997 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
1998
1999 scsi_change_queue_depth(sdev, qdepth);
2000 return sdev->queue_depth;
2001}
2002
2003/**
2004 * cxlflash_show_port_status() - queries and presents the current port status
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002005 * @port: Desired port for status reporting.
2006 * @afu: AFU owning the specified port.
Matthew R. Ochs15305512015-10-21 15:12:10 -05002007 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2008 *
2009 * Return: The size of the ASCII string returned in @buf.
2010 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002011static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
Matthew R. Ochs15305512015-10-21 15:12:10 -05002012{
Matthew R. Ochs15305512015-10-21 15:12:10 -05002013 char *disp_status;
Matthew R. Ochs15305512015-10-21 15:12:10 -05002014 u64 status;
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002015 __be64 __iomem *fc_regs;
Matthew R. Ochs15305512015-10-21 15:12:10 -05002016
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002017 if (port >= NUM_FC_PORTS)
Matthew R. Ochs15305512015-10-21 15:12:10 -05002018 return 0;
2019
2020 fc_regs = &afu->afu_map->global.fc_regs[port][0];
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002021 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
2022 status &= FC_MTIP_STATUS_MASK;
Matthew R. Ochs15305512015-10-21 15:12:10 -05002023
2024 if (status == FC_MTIP_STATUS_ONLINE)
2025 disp_status = "online";
2026 else if (status == FC_MTIP_STATUS_OFFLINE)
2027 disp_status = "offline";
2028 else
2029 disp_status = "unknown";
2030
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002031 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
Matthew R. Ochs15305512015-10-21 15:12:10 -05002032}
2033
2034/**
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002035 * port0_show() - queries and presents the current status of port 0
2036 * @dev: Generic device associated with the host owning the port.
2037 * @attr: Device attribute representing the port.
2038 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
Matthew R. Ochs15305512015-10-21 15:12:10 -05002039 *
2040 * Return: The size of the ASCII string returned in @buf.
2041 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002042static ssize_t port0_show(struct device *dev,
2043 struct device_attribute *attr,
2044 char *buf)
Matthew R. Ochs15305512015-10-21 15:12:10 -05002045{
2046 struct Scsi_Host *shost = class_to_shost(dev);
2047 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2048 struct afu *afu = cfg->afu;
2049
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002050 return cxlflash_show_port_status(0, afu, buf);
Matthew R. Ochs15305512015-10-21 15:12:10 -05002051}
2052
2053/**
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002054 * port1_show() - queries and presents the current status of port 1
2055 * @dev: Generic device associated with the host owning the port.
2056 * @attr: Device attribute representing the port.
2057 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2058 *
2059 * Return: The size of the ASCII string returned in @buf.
2060 */
2061static ssize_t port1_show(struct device *dev,
2062 struct device_attribute *attr,
2063 char *buf)
2064{
2065 struct Scsi_Host *shost = class_to_shost(dev);
2066 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2067 struct afu *afu = cfg->afu;
2068
2069 return cxlflash_show_port_status(1, afu, buf);
2070}
2071
2072/**
2073 * lun_mode_show() - presents the current LUN mode of the host
Matthew R. Ochs15305512015-10-21 15:12:10 -05002074 * @dev: Generic device associated with the host.
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002075 * @attr: Device attribute representing the LUN mode.
2076 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2077 *
2078 * Return: The size of the ASCII string returned in @buf.
2079 */
2080static ssize_t lun_mode_show(struct device *dev,
2081 struct device_attribute *attr, char *buf)
2082{
2083 struct Scsi_Host *shost = class_to_shost(dev);
2084 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2085 struct afu *afu = cfg->afu;
2086
2087 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2088}
2089
2090/**
2091 * lun_mode_store() - sets the LUN mode of the host
2092 * @dev: Generic device associated with the host.
2093 * @attr: Device attribute representing the LUN mode.
Matthew R. Ochs15305512015-10-21 15:12:10 -05002094 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2095 * @count: Length of data resizing in @buf.
2096 *
2097 * The CXL Flash AFU supports a dummy LUN mode where the external
2098 * links and storage are not required. Space on the FPGA is used
2099 * to create 1 or 2 small LUNs which are presented to the system
2100 * as if they were a normal storage device. This feature is useful
2101 * during development and also provides manufacturing with a way
2102 * to test the AFU without an actual device.
2103 *
2104 * 0 = external LUN[s] (default)
2105 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2106 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2107 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2108 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2109 *
2110 * Return: The size of the ASCII string returned in @buf.
2111 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002112static ssize_t lun_mode_store(struct device *dev,
2113 struct device_attribute *attr,
2114 const char *buf, size_t count)
Matthew R. Ochs15305512015-10-21 15:12:10 -05002115{
2116 struct Scsi_Host *shost = class_to_shost(dev);
2117 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2118 struct afu *afu = cfg->afu;
2119 int rc;
2120 u32 lun_mode;
2121
2122 rc = kstrtouint(buf, 10, &lun_mode);
2123 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2124 afu->internal_lun = lun_mode;
2125 afu_reset(cfg);
2126 scsi_scan_host(cfg->host);
2127 }
2128
2129 return count;
2130}
2131
2132/**
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002133 * ioctl_version_show() - presents the current ioctl version of the host
Matthew R. Ochs15305512015-10-21 15:12:10 -05002134 * @dev: Generic device associated with the host.
2135 * @attr: Device attribute representing the ioctl version.
2136 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2137 *
2138 * Return: The size of the ASCII string returned in @buf.
2139 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002140static ssize_t ioctl_version_show(struct device *dev,
2141 struct device_attribute *attr, char *buf)
Matthew R. Ochs15305512015-10-21 15:12:10 -05002142{
2143 return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
2144}
2145
2146/**
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002147 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2148 * @port: Desired port for status reporting.
2149 * @afu: AFU owning the specified port.
2150 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2151 *
2152 * Return: The size of the ASCII string returned in @buf.
2153 */
2154static ssize_t cxlflash_show_port_lun_table(u32 port,
2155 struct afu *afu,
2156 char *buf)
2157{
2158 int i;
2159 ssize_t bytes = 0;
2160 __be64 __iomem *fc_port;
2161
2162 if (port >= NUM_FC_PORTS)
2163 return 0;
2164
2165 fc_port = &afu->afu_map->global.fc_port[port][0];
2166
2167 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2168 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2169 "%03d: %016llX\n", i, readq_be(&fc_port[i]));
2170 return bytes;
2171}
2172
2173/**
2174 * port0_lun_table_show() - presents the current LUN table of port 0
2175 * @dev: Generic device associated with the host owning the port.
2176 * @attr: Device attribute representing the port.
2177 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2178 *
2179 * Return: The size of the ASCII string returned in @buf.
2180 */
2181static ssize_t port0_lun_table_show(struct device *dev,
2182 struct device_attribute *attr,
2183 char *buf)
2184{
2185 struct Scsi_Host *shost = class_to_shost(dev);
2186 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2187 struct afu *afu = cfg->afu;
2188
2189 return cxlflash_show_port_lun_table(0, afu, buf);
2190}
2191
2192/**
2193 * port1_lun_table_show() - presents the current LUN table of port 1
2194 * @dev: Generic device associated with the host owning the port.
2195 * @attr: Device attribute representing the port.
2196 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2197 *
2198 * Return: The size of the ASCII string returned in @buf.
2199 */
2200static ssize_t port1_lun_table_show(struct device *dev,
2201 struct device_attribute *attr,
2202 char *buf)
2203{
2204 struct Scsi_Host *shost = class_to_shost(dev);
2205 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2206 struct afu *afu = cfg->afu;
2207
2208 return cxlflash_show_port_lun_table(1, afu, buf);
2209}
2210
2211/**
2212 * mode_show() - presents the current mode of the device
Matthew R. Ochs15305512015-10-21 15:12:10 -05002213 * @dev: Generic device associated with the device.
2214 * @attr: Device attribute representing the device mode.
2215 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2216 *
2217 * Return: The size of the ASCII string returned in @buf.
2218 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002219static ssize_t mode_show(struct device *dev,
2220 struct device_attribute *attr, char *buf)
Matthew R. Ochs15305512015-10-21 15:12:10 -05002221{
2222 struct scsi_device *sdev = to_scsi_device(dev);
2223
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002224 return scnprintf(buf, PAGE_SIZE, "%s\n",
2225 sdev->hostdata ? "superpipe" : "legacy");
Matthew R. Ochs15305512015-10-21 15:12:10 -05002226}
2227
2228/*
2229 * Host attributes
2230 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002231static DEVICE_ATTR_RO(port0);
2232static DEVICE_ATTR_RO(port1);
2233static DEVICE_ATTR_RW(lun_mode);
2234static DEVICE_ATTR_RO(ioctl_version);
2235static DEVICE_ATTR_RO(port0_lun_table);
2236static DEVICE_ATTR_RO(port1_lun_table);
Matthew R. Ochs15305512015-10-21 15:12:10 -05002237
2238static struct device_attribute *cxlflash_host_attrs[] = {
2239 &dev_attr_port0,
2240 &dev_attr_port1,
2241 &dev_attr_lun_mode,
2242 &dev_attr_ioctl_version,
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002243 &dev_attr_port0_lun_table,
2244 &dev_attr_port1_lun_table,
Matthew R. Ochs15305512015-10-21 15:12:10 -05002245 NULL
2246};
2247
2248/*
2249 * Device attributes
2250 */
Matthew R. Ochse0f01a22015-10-21 15:12:39 -05002251static DEVICE_ATTR_RO(mode);
Matthew R. Ochs15305512015-10-21 15:12:10 -05002252
2253static struct device_attribute *cxlflash_dev_attrs[] = {
2254 &dev_attr_mode,
2255 NULL
2256};
2257
2258/*
2259 * Host template
2260 */
2261static struct scsi_host_template driver_template = {
2262 .module = THIS_MODULE,
2263 .name = CXLFLASH_ADAPTER_NAME,
2264 .info = cxlflash_driver_info,
2265 .ioctl = cxlflash_ioctl,
2266 .proc_name = CXLFLASH_NAME,
2267 .queuecommand = cxlflash_queuecommand,
2268 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
2269 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
2270 .change_queue_depth = cxlflash_change_queue_depth,
2271 .cmd_per_lun = 16,
2272 .can_queue = CXLFLASH_MAX_CMDS,
2273 .this_id = -1,
2274 .sg_tablesize = SG_NONE, /* No scatter gather support. */
2275 .max_sectors = CXLFLASH_MAX_SECTORS,
2276 .use_clustering = ENABLE_CLUSTERING,
2277 .shost_attrs = cxlflash_host_attrs,
2278 .sdev_attrs = cxlflash_dev_attrs,
2279};
2280
2281/*
2282 * Device dependent values
2283 */
2284static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
2285
2286/*
2287 * PCI device binding table
2288 */
2289static struct pci_device_id cxlflash_pci_table[] = {
2290 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
2291 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2292 {}
2293};
2294
2295MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2296
2297/**
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002298 * cxlflash_worker_thread() - work thread handler for the AFU
2299 * @work: Work structure contained within cxlflash associated with host.
2300 *
2301 * Handles the following events:
2302 * - Link reset which cannot be performed on interrupt context due to
2303 * blocking up to a few seconds
2304 * - Read AFU command room
2305 */
2306static void cxlflash_worker_thread(struct work_struct *work)
2307{
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002308 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2309 work_q);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002310 struct afu *afu = cfg->afu;
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05002311 struct device *dev = &cfg->dev->dev;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002312 int port;
2313 ulong lock_flags;
2314
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002315 /* Avoid MMIO if the device has failed */
2316
2317 if (cfg->state != STATE_NORMAL)
2318 return;
2319
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002320 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2321
2322 if (cfg->lr_state == LINK_RESET_REQUIRED) {
2323 port = cfg->lr_port;
2324 if (port < 0)
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05002325 dev_err(dev, "%s: invalid port index %d\n",
2326 __func__, port);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002327 else {
2328 spin_unlock_irqrestore(cfg->host->host_lock,
2329 lock_flags);
2330
2331 /* The reset can block... */
2332 afu_link_reset(afu, port,
2333 &afu->afu_map->
2334 global.fc_regs[port][0]);
2335 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2336 }
2337
2338 cfg->lr_state = LINK_RESET_COMPLETE;
2339 }
2340
2341 if (afu->read_room) {
2342 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
2343 afu->read_room = false;
2344 }
2345
2346 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2347}
2348
2349/**
2350 * cxlflash_probe() - PCI entry point to add host
2351 * @pdev: PCI device associated with the host.
2352 * @dev_id: PCI device id associated with device.
2353 *
2354 * Return: 0 on success / non-zero on failure
2355 */
2356static int cxlflash_probe(struct pci_dev *pdev,
2357 const struct pci_device_id *dev_id)
2358{
2359 struct Scsi_Host *host;
2360 struct cxlflash_cfg *cfg = NULL;
2361 struct device *phys_dev;
2362 struct dev_dependent_vals *ddv;
2363 int rc = 0;
2364
2365 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2366 __func__, pdev->irq);
2367
2368 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2369 driver_template.max_sectors = ddv->max_sectors;
2370
2371 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2372 if (!host) {
2373 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2374 __func__);
2375 rc = -ENOMEM;
2376 goto out;
2377 }
2378
2379 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2380 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2381 host->max_channel = NUM_FC_PORTS - 1;
2382 host->unique_id = host->host_no;
2383 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2384
2385 cfg = (struct cxlflash_cfg *)host->hostdata;
2386 cfg->host = host;
2387 rc = alloc_mem(cfg);
2388 if (rc) {
2389 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2390 __func__);
2391 rc = -ENOMEM;
2392 goto out;
2393 }
2394
2395 cfg->init_state = INIT_STATE_NONE;
2396 cfg->dev = pdev;
Matthew R. Ochs2cb79262015-08-13 21:47:53 -05002397
2398 /*
2399 * The promoted LUNs move to the top of the LUN table. The rest stay
2400 * on the bottom half. The bottom half grows from the end
2401 * (index = 255), whereas the top half grows from the beginning
2402 * (index = 0).
2403 */
2404 cfg->promote_lun_index = 0;
2405 cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
2406 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2407
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002408 cfg->dev_id = (struct pci_device_id *)dev_id;
2409 cfg->mcctx = NULL;
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002410
2411 init_waitqueue_head(&cfg->tmf_waitq);
Matthew R. Ochs439e85c2015-10-21 15:12:00 -05002412 init_waitqueue_head(&cfg->reset_waitq);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002413
2414 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2415 cfg->lr_state = LINK_RESET_INVALID;
2416 cfg->lr_port = -1;
Matthew R. Ochs65be2c72015-08-13 21:47:43 -05002417 mutex_init(&cfg->ctx_tbl_list_mutex);
2418 mutex_init(&cfg->ctx_recovery_mutex);
Matthew R. Ochs0a27ae52015-10-21 15:11:52 -05002419 init_rwsem(&cfg->ioctl_rwsem);
Matthew R. Ochs65be2c72015-08-13 21:47:43 -05002420 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2421 INIT_LIST_HEAD(&cfg->lluns);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002422
2423 pci_set_drvdata(pdev, cfg);
2424
2425 /* Use the special service provided to look up the physical
2426 * PCI device, since we are called on the probe of the virtual
2427 * PCI host bus (vphb)
2428 */
2429 phys_dev = cxl_get_phys_dev(pdev);
2430 if (!dev_is_pci(phys_dev)) {
Matthew R. Ochs4392ba42015-10-21 15:13:11 -05002431 dev_err(&pdev->dev, "%s: not a pci dev\n", __func__);
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002432 rc = -ENODEV;
2433 goto out_remove;
2434 }
2435 cfg->parent_dev = to_pci_dev(phys_dev);
2436
2437 cfg->cxl_afu = cxl_pci_to_afu(pdev);
2438
2439 rc = init_pci(cfg);
2440 if (rc) {
2441 dev_err(&pdev->dev, "%s: call to init_pci "
2442 "failed rc=%d!\n", __func__, rc);
2443 goto out_remove;
2444 }
2445 cfg->init_state = INIT_STATE_PCI;
2446
2447 rc = init_afu(cfg);
2448 if (rc) {
2449 dev_err(&pdev->dev, "%s: call to init_afu "
2450 "failed rc=%d!\n", __func__, rc);
2451 goto out_remove;
2452 }
2453 cfg->init_state = INIT_STATE_AFU;
2454
2455
2456 rc = init_scsi(cfg);
2457 if (rc) {
2458 dev_err(&pdev->dev, "%s: call to init_scsi "
2459 "failed rc=%d!\n", __func__, rc);
2460 goto out_remove;
2461 }
2462 cfg->init_state = INIT_STATE_SCSI;
2463
2464out:
2465 pr_debug("%s: returning rc=%d\n", __func__, rc);
2466 return rc;
2467
2468out_remove:
2469 cxlflash_remove(pdev);
2470 goto out;
2471}
2472
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002473/**
Matthew R. Ochs0a27ae52015-10-21 15:11:52 -05002474 * drain_ioctls() - wait until all currently executing ioctls have completed
2475 * @cfg: Internal structure associated with the host.
2476 *
2477 * Obtain write access to read/write semaphore that wraps ioctl
2478 * handling to 'drain' ioctls currently executing.
2479 */
2480static void drain_ioctls(struct cxlflash_cfg *cfg)
2481{
2482 down_write(&cfg->ioctl_rwsem);
2483 up_write(&cfg->ioctl_rwsem);
2484}
2485
2486/**
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002487 * cxlflash_pci_error_detected() - called when a PCI error is detected
2488 * @pdev: PCI device struct.
2489 * @state: PCI channel state.
2490 *
2491 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2492 */
2493static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2494 pci_channel_state_t state)
2495{
Matthew R. Ochs65be2c72015-08-13 21:47:43 -05002496 int rc = 0;
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002497 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2498 struct device *dev = &cfg->dev->dev;
2499
2500 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2501
2502 switch (state) {
2503 case pci_channel_io_frozen:
Matthew R. Ochs439e85c2015-10-21 15:12:00 -05002504 cfg->state = STATE_RESET;
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002505 scsi_block_requests(cfg->host);
Matthew R. Ochs0a27ae52015-10-21 15:11:52 -05002506 drain_ioctls(cfg);
Matthew R. Ochs65be2c72015-08-13 21:47:43 -05002507 rc = cxlflash_mark_contexts_error(cfg);
2508 if (unlikely(rc))
2509 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2510 __func__, rc);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002511 term_mc(cfg, UNDO_START);
2512 stop_afu(cfg);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002513 return PCI_ERS_RESULT_NEED_RESET;
2514 case pci_channel_io_perm_failure:
2515 cfg->state = STATE_FAILTERM;
Matthew R. Ochs439e85c2015-10-21 15:12:00 -05002516 wake_up_all(&cfg->reset_waitq);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002517 scsi_unblock_requests(cfg->host);
2518 return PCI_ERS_RESULT_DISCONNECT;
2519 default:
2520 break;
2521 }
2522 return PCI_ERS_RESULT_NEED_RESET;
2523}
2524
2525/**
2526 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2527 * @pdev: PCI device struct.
2528 *
2529 * This routine is called by the pci error recovery code after the PCI
2530 * slot has been reset, just before we should resume normal operations.
2531 *
2532 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2533 */
2534static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2535{
2536 int rc = 0;
2537 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2538 struct device *dev = &cfg->dev->dev;
2539
2540 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2541
2542 rc = init_afu(cfg);
2543 if (unlikely(rc)) {
2544 dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
2545 return PCI_ERS_RESULT_DISCONNECT;
2546 }
2547
2548 return PCI_ERS_RESULT_RECOVERED;
2549}
2550
2551/**
2552 * cxlflash_pci_resume() - called when normal operation can resume
2553 * @pdev: PCI device struct
2554 */
2555static void cxlflash_pci_resume(struct pci_dev *pdev)
2556{
2557 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2558 struct device *dev = &cfg->dev->dev;
2559
2560 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2561
2562 cfg->state = STATE_NORMAL;
Matthew R. Ochs439e85c2015-10-21 15:12:00 -05002563 wake_up_all(&cfg->reset_waitq);
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002564 scsi_unblock_requests(cfg->host);
2565}
2566
2567static const struct pci_error_handlers cxlflash_err_handler = {
2568 .error_detected = cxlflash_pci_error_detected,
2569 .slot_reset = cxlflash_pci_slot_reset,
2570 .resume = cxlflash_pci_resume,
2571};
2572
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002573/*
2574 * PCI device structure
2575 */
2576static struct pci_driver cxlflash_driver = {
2577 .name = CXLFLASH_NAME,
2578 .id_table = cxlflash_pci_table,
2579 .probe = cxlflash_probe,
2580 .remove = cxlflash_remove,
Matthew R. Ochs5cdac812015-08-13 21:47:34 -05002581 .err_handler = &cxlflash_err_handler,
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002582};
2583
2584/**
2585 * init_cxlflash() - module entry point
2586 *
2587 * Return: 0 on success / non-zero on failure
2588 */
2589static int __init init_cxlflash(void)
2590{
2591 pr_info("%s: IBM Power CXL Flash Adapter: %s\n",
2592 __func__, CXLFLASH_DRIVER_DATE);
2593
Matthew R. Ochs65be2c72015-08-13 21:47:43 -05002594 cxlflash_list_init();
2595
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002596 return pci_register_driver(&cxlflash_driver);
2597}
2598
2599/**
2600 * exit_cxlflash() - module exit point
2601 */
2602static void __exit exit_cxlflash(void)
2603{
Matthew R. Ochs65be2c72015-08-13 21:47:43 -05002604 cxlflash_term_global_luns();
2605 cxlflash_free_errpage();
2606
Matthew R. Ochsc21e0bb2015-06-09 17:15:52 -05002607 pci_unregister_driver(&cxlflash_driver);
2608}
2609
2610module_init(init_cxlflash);
2611module_exit(exit_cxlflash);