Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1 | /* |
| 2 | * CXL Flash Device Driver |
| 3 | * |
| 4 | * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation |
| 5 | * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation |
| 6 | * |
| 7 | * Copyright (C) 2015 IBM Corporation |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or |
| 10 | * modify it under the terms of the GNU General Public License |
| 11 | * as published by the Free Software Foundation; either version |
| 12 | * 2 of the License, or (at your option) any later version. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/delay.h> |
| 16 | #include <linux/list.h> |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/pci.h> |
| 19 | |
| 20 | #include <asm/unaligned.h> |
| 21 | |
| 22 | #include <misc/cxl.h> |
| 23 | |
| 24 | #include <scsi/scsi_cmnd.h> |
| 25 | #include <scsi/scsi_host.h> |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 26 | #include <uapi/scsi/cxlflash_ioctl.h> |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 27 | |
| 28 | #include "main.h" |
| 29 | #include "sislite.h" |
| 30 | #include "common.h" |
| 31 | |
| 32 | MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); |
| 33 | MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); |
| 34 | MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); |
| 35 | MODULE_LICENSE("GPL"); |
| 36 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 37 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 38 | * process_cmd_err() - command error handler |
| 39 | * @cmd: AFU command that experienced the error. |
| 40 | * @scp: SCSI command associated with the AFU command in error. |
| 41 | * |
| 42 | * Translates error bits from AFU command to SCSI command results. |
| 43 | */ |
| 44 | static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) |
| 45 | { |
| 46 | struct sisl_ioarcb *ioarcb; |
| 47 | struct sisl_ioasa *ioasa; |
Matthew R. Ochs | 8396012 | 2015-10-21 15:13:29 -0500 | [diff] [blame] | 48 | u32 resid; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 49 | |
| 50 | if (unlikely(!cmd)) |
| 51 | return; |
| 52 | |
| 53 | ioarcb = &(cmd->rcb); |
| 54 | ioasa = &(cmd->sa); |
| 55 | |
| 56 | if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { |
Matthew R. Ochs | 8396012 | 2015-10-21 15:13:29 -0500 | [diff] [blame] | 57 | resid = ioasa->resid; |
| 58 | scsi_set_resid(scp, resid); |
| 59 | pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n", |
| 60 | __func__, cmd, scp, resid); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { |
| 64 | pr_debug("%s: cmd underrun cmd = %p scp = %p\n", |
| 65 | __func__, cmd, scp); |
| 66 | scp->result = (DID_ERROR << 16); |
| 67 | } |
| 68 | |
| 69 | pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d " |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 70 | "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n", |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 71 | __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc, |
| 72 | ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra, |
| 73 | ioasa->fc_extra); |
| 74 | |
| 75 | if (ioasa->rc.scsi_rc) { |
| 76 | /* We have a SCSI status */ |
| 77 | if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { |
| 78 | memcpy(scp->sense_buffer, ioasa->sense_data, |
| 79 | SISL_SENSE_DATA_LEN); |
| 80 | scp->result = ioasa->rc.scsi_rc; |
| 81 | } else |
| 82 | scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); |
| 83 | } |
| 84 | |
| 85 | /* |
| 86 | * We encountered an error. Set scp->result based on nature |
| 87 | * of error. |
| 88 | */ |
| 89 | if (ioasa->rc.fc_rc) { |
| 90 | /* We have an FC status */ |
| 91 | switch (ioasa->rc.fc_rc) { |
| 92 | case SISL_FC_RC_LINKDOWN: |
| 93 | scp->result = (DID_REQUEUE << 16); |
| 94 | break; |
| 95 | case SISL_FC_RC_RESID: |
| 96 | /* This indicates an FCP resid underrun */ |
| 97 | if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { |
| 98 | /* If the SISL_RC_FLAGS_OVERRUN flag was set, |
| 99 | * then we will handle this error else where. |
| 100 | * If not then we must handle it here. |
Matthew R. Ochs | 8396012 | 2015-10-21 15:13:29 -0500 | [diff] [blame] | 101 | * This is probably an AFU bug. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 102 | */ |
| 103 | scp->result = (DID_ERROR << 16); |
| 104 | } |
| 105 | break; |
| 106 | case SISL_FC_RC_RESIDERR: |
| 107 | /* Resid mismatch between adapter and device */ |
| 108 | case SISL_FC_RC_TGTABORT: |
| 109 | case SISL_FC_RC_ABORTOK: |
| 110 | case SISL_FC_RC_ABORTFAIL: |
| 111 | case SISL_FC_RC_NOLOGI: |
| 112 | case SISL_FC_RC_ABORTPEND: |
| 113 | case SISL_FC_RC_WRABORTPEND: |
| 114 | case SISL_FC_RC_NOEXP: |
| 115 | case SISL_FC_RC_INUSE: |
| 116 | scp->result = (DID_ERROR << 16); |
| 117 | break; |
| 118 | } |
| 119 | } |
| 120 | |
| 121 | if (ioasa->rc.afu_rc) { |
| 122 | /* We have an AFU error */ |
| 123 | switch (ioasa->rc.afu_rc) { |
| 124 | case SISL_AFU_RC_NO_CHANNELS: |
Matthew R. Ochs | 8396012 | 2015-10-21 15:13:29 -0500 | [diff] [blame] | 125 | scp->result = (DID_NO_CONNECT << 16); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 126 | break; |
| 127 | case SISL_AFU_RC_DATA_DMA_ERR: |
| 128 | switch (ioasa->afu_extra) { |
| 129 | case SISL_AFU_DMA_ERR_PAGE_IN: |
| 130 | /* Retry */ |
| 131 | scp->result = (DID_IMM_RETRY << 16); |
| 132 | break; |
| 133 | case SISL_AFU_DMA_ERR_INVALID_EA: |
| 134 | default: |
| 135 | scp->result = (DID_ERROR << 16); |
| 136 | } |
| 137 | break; |
| 138 | case SISL_AFU_RC_OUT_OF_DATA_BUFS: |
| 139 | /* Retry */ |
| 140 | scp->result = (DID_ALLOC_FAILURE << 16); |
| 141 | break; |
| 142 | default: |
| 143 | scp->result = (DID_ERROR << 16); |
| 144 | } |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | /** |
| 149 | * cmd_complete() - command completion handler |
| 150 | * @cmd: AFU command that has completed. |
| 151 | * |
| 152 | * Prepares and submits command that has either completed or timed out to |
| 153 | * the SCSI stack. Checks AFU command back into command pool for non-internal |
Matthew R. Ochs | fe7f969 | 2016-11-28 18:43:18 -0600 | [diff] [blame] | 154 | * (cmd->scp populated) commands. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 155 | */ |
| 156 | static void cmd_complete(struct afu_cmd *cmd) |
| 157 | { |
| 158 | struct scsi_cmnd *scp; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 159 | ulong lock_flags; |
| 160 | struct afu *afu = cmd->parent; |
| 161 | struct cxlflash_cfg *cfg = afu->parent; |
| 162 | bool cmd_is_tmf; |
| 163 | |
Matthew R. Ochs | fe7f969 | 2016-11-28 18:43:18 -0600 | [diff] [blame] | 164 | if (cmd->scp) { |
| 165 | scp = cmd->scp; |
Matthew R. Ochs | 8396012 | 2015-10-21 15:13:29 -0500 | [diff] [blame] | 166 | if (unlikely(cmd->sa.ioasc)) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 167 | process_cmd_err(cmd, scp); |
| 168 | else |
| 169 | scp->result = (DID_OK << 16); |
| 170 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 171 | cmd_is_tmf = cmd->cmd_tmf; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 172 | |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 173 | pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X " |
| 174 | "ioasc=%d\n", __func__, scp, scp->result, |
| 175 | cmd->sa.ioasc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 176 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 177 | scsi_dma_unmap(scp); |
| 178 | scp->scsi_done(scp); |
| 179 | |
| 180 | if (cmd_is_tmf) { |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 181 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 182 | cfg->tmf_active = false; |
| 183 | wake_up_all_locked(&cfg->tmf_waitq); |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 184 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 185 | } |
| 186 | } else |
| 187 | complete(&cmd->cevent); |
| 188 | } |
| 189 | |
| 190 | /** |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 191 | * context_reset_ioarrin() - reset command owner context via IOARRIN register |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 192 | * @cmd: AFU command that timed out. |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 193 | */ |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 194 | static void context_reset_ioarrin(struct afu_cmd *cmd) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 195 | { |
| 196 | int nretry = 0; |
| 197 | u64 rrin = 0x1; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 198 | struct afu *afu = cmd->parent; |
Uma Krishnan | 3d2f617 | 2016-11-28 18:41:36 -0600 | [diff] [blame] | 199 | struct cxlflash_cfg *cfg = afu->parent; |
| 200 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 201 | |
| 202 | pr_debug("%s: cmd=%p\n", __func__, cmd); |
| 203 | |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 204 | writeq_be(rrin, &afu->host_map->ioarrin); |
| 205 | do { |
| 206 | rrin = readq_be(&afu->host_map->ioarrin); |
| 207 | if (rrin != 0x1) |
| 208 | break; |
| 209 | /* Double delay each time */ |
Manoj N. Kumar | ea76543 | 2016-03-25 14:26:49 -0500 | [diff] [blame] | 210 | udelay(1 << nretry); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 211 | } while (nretry++ < MC_ROOM_RETRY_CNT); |
Uma Krishnan | 3d2f617 | 2016-11-28 18:41:36 -0600 | [diff] [blame] | 212 | |
| 213 | dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n", |
| 214 | __func__, rrin, nretry); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | /** |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 218 | * send_cmd_ioarrin() - sends an AFU command via IOARRIN register |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 219 | * @afu: AFU associated with the host. |
| 220 | * @cmd: AFU command to send. |
| 221 | * |
| 222 | * Return: |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 223 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 224 | */ |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 225 | static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 226 | { |
| 227 | struct cxlflash_cfg *cfg = afu->parent; |
| 228 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 229 | int rc = 0; |
Uma Krishnan | 11f7b18 | 2016-11-28 18:41:45 -0600 | [diff] [blame] | 230 | s64 room; |
| 231 | ulong lock_flags; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 232 | |
| 233 | /* |
Uma Krishnan | 11f7b18 | 2016-11-28 18:41:45 -0600 | [diff] [blame] | 234 | * To avoid the performance penalty of MMIO, spread the update of |
| 235 | * 'room' over multiple commands. |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 236 | */ |
Uma Krishnan | 11f7b18 | 2016-11-28 18:41:45 -0600 | [diff] [blame] | 237 | spin_lock_irqsave(&afu->rrin_slock, lock_flags); |
| 238 | if (--afu->room < 0) { |
| 239 | room = readq_be(&afu->host_map->cmd_room); |
| 240 | if (room <= 0) { |
| 241 | dev_dbg_ratelimited(dev, "%s: no cmd_room to send " |
| 242 | "0x%02X, room=0x%016llX\n", |
| 243 | __func__, cmd->rcb.cdb[0], room); |
| 244 | afu->room = 0; |
| 245 | rc = SCSI_MLQUEUE_HOST_BUSY; |
| 246 | goto out; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 247 | } |
Uma Krishnan | 11f7b18 | 2016-11-28 18:41:45 -0600 | [diff] [blame] | 248 | afu->room = room - 1; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 249 | } |
| 250 | |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 251 | writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); |
| 252 | out: |
Uma Krishnan | 11f7b18 | 2016-11-28 18:41:45 -0600 | [diff] [blame] | 253 | spin_unlock_irqrestore(&afu->rrin_slock, lock_flags); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 254 | pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd, |
| 255 | cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc); |
| 256 | return rc; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 257 | } |
| 258 | |
| 259 | /** |
| 260 | * wait_resp() - polls for a response or timeout to a sent AFU command |
| 261 | * @afu: AFU associated with the host. |
| 262 | * @cmd: AFU command that was sent. |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 263 | * |
| 264 | * Return: |
| 265 | * 0 on success, -1 on timeout/error |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 266 | */ |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 267 | static int wait_resp(struct afu *afu, struct afu_cmd *cmd) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 268 | { |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 269 | int rc = 0; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 270 | ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); |
| 271 | |
| 272 | timeout = wait_for_completion_timeout(&cmd->cevent, timeout); |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 273 | if (!timeout) { |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 274 | afu->context_reset(cmd); |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 275 | rc = -1; |
| 276 | } |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 277 | |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 278 | if (unlikely(cmd->sa.ioasc != 0)) { |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 279 | pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, " |
| 280 | "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0], |
| 281 | cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc, |
| 282 | cmd->sa.rc.fc_rc); |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 283 | rc = -1; |
| 284 | } |
| 285 | |
| 286 | return rc; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 287 | } |
| 288 | |
| 289 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 290 | * send_tmf() - sends a Task Management Function (TMF) |
| 291 | * @afu: AFU to checkout from. |
| 292 | * @scp: SCSI command from stack. |
| 293 | * @tmfcmd: TMF command to send. |
| 294 | * |
| 295 | * Return: |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 296 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 297 | */ |
| 298 | static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) |
| 299 | { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 300 | u32 port_sel = scp->device->channel + 1; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 301 | struct Scsi_Host *host = scp->device->host; |
| 302 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; |
Matthew R. Ochs | d4ace35 | 2016-11-28 18:42:50 -0600 | [diff] [blame] | 303 | struct afu_cmd *cmd = sc_to_afucz(scp); |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 304 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 305 | ulong lock_flags; |
| 306 | int rc = 0; |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 307 | ulong to; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 308 | |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 309 | /* When Task Management Function is active do not send another */ |
| 310 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 311 | if (cfg->tmf_active) |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 312 | wait_event_interruptible_lock_irq(cfg->tmf_waitq, |
| 313 | !cfg->tmf_active, |
| 314 | cfg->tmf_slock); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 315 | cfg->tmf_active = true; |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 316 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 317 | |
Matthew R. Ochs | fe7f969 | 2016-11-28 18:43:18 -0600 | [diff] [blame] | 318 | cmd->scp = scp; |
Matthew R. Ochs | d4ace35 | 2016-11-28 18:42:50 -0600 | [diff] [blame] | 319 | cmd->parent = afu; |
| 320 | cmd->cmd_tmf = true; |
| 321 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 322 | cmd->rcb.ctx_id = afu->ctx_hndl; |
Matthew R. Ochs | 5fbb96c | 2016-11-28 18:42:19 -0600 | [diff] [blame] | 323 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 324 | cmd->rcb.port_sel = port_sel; |
| 325 | cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 326 | cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | |
Matthew R. Ochs | d4ace35 | 2016-11-28 18:42:50 -0600 | [diff] [blame] | 327 | SISL_REQ_FLAGS_SUP_UNDERRUN | |
| 328 | SISL_REQ_FLAGS_TMF_CMD); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 329 | memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); |
| 330 | |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 331 | rc = afu->send_cmd(afu, cmd); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 332 | if (unlikely(rc)) { |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 333 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 334 | cfg->tmf_active = false; |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 335 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 336 | goto out; |
| 337 | } |
| 338 | |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 339 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
| 340 | to = msecs_to_jiffies(5000); |
| 341 | to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, |
| 342 | !cfg->tmf_active, |
| 343 | cfg->tmf_slock, |
| 344 | to); |
| 345 | if (!to) { |
| 346 | cfg->tmf_active = false; |
| 347 | dev_err(dev, "%s: TMF timed out!\n", __func__); |
| 348 | rc = -1; |
| 349 | } |
| 350 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 351 | out: |
| 352 | return rc; |
| 353 | } |
| 354 | |
Manoj Kumar | b45cdbaf | 2015-12-14 15:07:23 -0600 | [diff] [blame] | 355 | static void afu_unmap(struct kref *ref) |
| 356 | { |
| 357 | struct afu *afu = container_of(ref, struct afu, mapcount); |
| 358 | |
| 359 | if (likely(afu->afu_map)) { |
| 360 | cxl_psa_unmap((void __iomem *)afu->afu_map); |
| 361 | afu->afu_map = NULL; |
| 362 | } |
| 363 | } |
| 364 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 365 | /** |
| 366 | * cxlflash_driver_info() - information handler for this host driver |
| 367 | * @host: SCSI host associated with device. |
| 368 | * |
| 369 | * Return: A string describing the device. |
| 370 | */ |
| 371 | static const char *cxlflash_driver_info(struct Scsi_Host *host) |
| 372 | { |
| 373 | return CXLFLASH_ADAPTER_NAME; |
| 374 | } |
| 375 | |
| 376 | /** |
| 377 | * cxlflash_queuecommand() - sends a mid-layer request |
| 378 | * @host: SCSI host associated with device. |
| 379 | * @scp: SCSI command to send. |
| 380 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 381 | * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 382 | */ |
| 383 | static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) |
| 384 | { |
| 385 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; |
| 386 | struct afu *afu = cfg->afu; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 387 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | 5fbb96c | 2016-11-28 18:42:19 -0600 | [diff] [blame] | 388 | struct afu_cmd *cmd = sc_to_afucz(scp); |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 389 | struct scatterlist *sg = scsi_sglist(scp); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 390 | u32 port_sel = scp->device->channel + 1; |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 391 | u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 392 | ulong lock_flags; |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 393 | int nseg = 0; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 394 | int rc = 0; |
Manoj Kumar | b45cdbaf | 2015-12-14 15:07:23 -0600 | [diff] [blame] | 395 | int kref_got = 0; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 396 | |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 397 | dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " |
| 398 | "cdb=(%08X-%08X-%08X-%08X)\n", |
| 399 | __func__, scp, host->host_no, scp->device->channel, |
| 400 | scp->device->id, scp->device->lun, |
| 401 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), |
| 402 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), |
| 403 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), |
| 404 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 405 | |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 406 | /* |
| 407 | * If a Task Management Function is active, wait for it to complete |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 408 | * before continuing with regular commands. |
| 409 | */ |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 410 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 411 | if (cfg->tmf_active) { |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 412 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 413 | rc = SCSI_MLQUEUE_HOST_BUSY; |
| 414 | goto out; |
| 415 | } |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 416 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 417 | |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 418 | switch (cfg->state) { |
Matthew R. Ochs | 439e85c | 2015-10-21 15:12:00 -0500 | [diff] [blame] | 419 | case STATE_RESET: |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 420 | dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 421 | rc = SCSI_MLQUEUE_HOST_BUSY; |
| 422 | goto out; |
| 423 | case STATE_FAILTERM: |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 424 | dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 425 | scp->result = (DID_NO_CONNECT << 16); |
| 426 | scp->scsi_done(scp); |
| 427 | rc = 0; |
| 428 | goto out; |
| 429 | default: |
| 430 | break; |
| 431 | } |
| 432 | |
Manoj Kumar | b45cdbaf | 2015-12-14 15:07:23 -0600 | [diff] [blame] | 433 | kref_get(&cfg->afu->mapcount); |
| 434 | kref_got = 1; |
| 435 | |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 436 | if (likely(sg)) { |
| 437 | nseg = scsi_dma_map(scp); |
| 438 | if (unlikely(nseg < 0)) { |
| 439 | dev_err(dev, "%s: Fail DMA map!\n", __func__); |
| 440 | rc = SCSI_MLQUEUE_HOST_BUSY; |
| 441 | goto out; |
| 442 | } |
| 443 | |
| 444 | cmd->rcb.data_len = sg_dma_len(sg); |
| 445 | cmd->rcb.data_ea = sg_dma_address(sg); |
| 446 | } |
| 447 | |
Matthew R. Ochs | fe7f969 | 2016-11-28 18:43:18 -0600 | [diff] [blame] | 448 | cmd->scp = scp; |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 449 | cmd->parent = afu; |
| 450 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 451 | cmd->rcb.ctx_id = afu->ctx_hndl; |
Matthew R. Ochs | 5fbb96c | 2016-11-28 18:42:19 -0600 | [diff] [blame] | 452 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 453 | cmd->rcb.port_sel = port_sel; |
| 454 | cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); |
| 455 | |
| 456 | if (scp->sc_data_direction == DMA_TO_DEVICE) |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 457 | req_flags |= SISL_REQ_FLAGS_HOST_WRITE; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 458 | |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 459 | cmd->rcb.req_flags = req_flags; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 460 | memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); |
| 461 | |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 462 | rc = afu->send_cmd(afu, cmd); |
Matthew R. Ochs | 5fbb96c | 2016-11-28 18:42:19 -0600 | [diff] [blame] | 463 | if (unlikely(rc)) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 464 | scsi_dma_unmap(scp); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 465 | out: |
Manoj Kumar | b45cdbaf | 2015-12-14 15:07:23 -0600 | [diff] [blame] | 466 | if (kref_got) |
| 467 | kref_put(&afu->mapcount, afu_unmap); |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 468 | pr_devel("%s: returning rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 469 | return rc; |
| 470 | } |
| 471 | |
| 472 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 473 | * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 474 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 475 | */ |
| 476 | static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) |
| 477 | { |
| 478 | struct pci_dev *pdev = cfg->dev; |
| 479 | |
| 480 | if (pci_channel_offline(pdev)) |
Matthew R. Ochs | 439e85c | 2015-10-21 15:12:00 -0500 | [diff] [blame] | 481 | wait_event_timeout(cfg->reset_waitq, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 482 | !pci_channel_offline(pdev), |
| 483 | CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); |
| 484 | } |
| 485 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 486 | /** |
| 487 | * free_mem() - free memory associated with the AFU |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 488 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 489 | */ |
| 490 | static void free_mem(struct cxlflash_cfg *cfg) |
| 491 | { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 492 | struct afu *afu = cfg->afu; |
| 493 | |
| 494 | if (cfg->afu) { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 495 | free_pages((ulong)afu, get_order(sizeof(struct afu))); |
| 496 | cfg->afu = NULL; |
| 497 | } |
| 498 | } |
| 499 | |
| 500 | /** |
| 501 | * stop_afu() - stops the AFU command timers and unmaps the MMIO space |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 502 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 503 | * |
| 504 | * Safe to call with AFU in a partially allocated/initialized state. |
Manoj Kumar | ee91e33 | 2015-12-14 15:07:02 -0600 | [diff] [blame] | 505 | * |
Matthew R. Ochs | de01283 | 2016-11-28 18:42:33 -0600 | [diff] [blame] | 506 | * Waits for any active internal AFU commands to timeout and then unmaps |
Manoj Kumar | ee91e33 | 2015-12-14 15:07:02 -0600 | [diff] [blame] | 507 | * the MMIO space. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 508 | */ |
| 509 | static void stop_afu(struct cxlflash_cfg *cfg) |
| 510 | { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 511 | struct afu *afu = cfg->afu; |
| 512 | |
| 513 | if (likely(afu)) { |
Matthew R. Ochs | de01283 | 2016-11-28 18:42:33 -0600 | [diff] [blame] | 514 | while (atomic_read(&afu->cmds_active)) |
| 515 | ssleep(1); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 516 | if (likely(afu->afu_map)) { |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 517 | cxl_psa_unmap((void __iomem *)afu->afu_map); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 518 | afu->afu_map = NULL; |
| 519 | } |
Manoj Kumar | b45cdbaf | 2015-12-14 15:07:23 -0600 | [diff] [blame] | 520 | kref_put(&afu->mapcount, afu_unmap); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 521 | } |
| 522 | } |
| 523 | |
| 524 | /** |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 525 | * term_intr() - disables all AFU interrupts |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 526 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 527 | * @level: Depth of allocation, where to begin waterfall tear down. |
| 528 | * |
| 529 | * Safe to call with AFU/MC in partially allocated/initialized state. |
| 530 | */ |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 531 | static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 532 | { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 533 | struct afu *afu = cfg->afu; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 534 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 535 | |
| 536 | if (!afu || !cfg->mcctx) { |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 537 | dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 538 | return; |
| 539 | } |
| 540 | |
| 541 | switch (level) { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 542 | case UNMAP_THREE: |
| 543 | cxl_unmap_afu_irq(cfg->mcctx, 3, afu); |
| 544 | case UNMAP_TWO: |
| 545 | cxl_unmap_afu_irq(cfg->mcctx, 2, afu); |
| 546 | case UNMAP_ONE: |
| 547 | cxl_unmap_afu_irq(cfg->mcctx, 1, afu); |
| 548 | case FREE_IRQ: |
| 549 | cxl_free_afu_irqs(cfg->mcctx); |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 550 | /* fall through */ |
| 551 | case UNDO_NOOP: |
| 552 | /* No action required */ |
| 553 | break; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 554 | } |
| 555 | } |
| 556 | |
| 557 | /** |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 558 | * term_mc() - terminates the master context |
| 559 | * @cfg: Internal structure associated with the host. |
| 560 | * @level: Depth of allocation, where to begin waterfall tear down. |
| 561 | * |
| 562 | * Safe to call with AFU/MC in partially allocated/initialized state. |
| 563 | */ |
| 564 | static void term_mc(struct cxlflash_cfg *cfg) |
| 565 | { |
| 566 | int rc = 0; |
| 567 | struct afu *afu = cfg->afu; |
| 568 | struct device *dev = &cfg->dev->dev; |
| 569 | |
| 570 | if (!afu || !cfg->mcctx) { |
| 571 | dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); |
| 572 | return; |
| 573 | } |
| 574 | |
| 575 | rc = cxl_stop_context(cfg->mcctx); |
| 576 | WARN_ON(rc); |
| 577 | cfg->mcctx = NULL; |
| 578 | } |
| 579 | |
| 580 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 581 | * term_afu() - terminates the AFU |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 582 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 583 | * |
| 584 | * Safe to call with AFU/MC in partially allocated/initialized state. |
| 585 | */ |
| 586 | static void term_afu(struct cxlflash_cfg *cfg) |
| 587 | { |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 588 | /* |
| 589 | * Tear down is carefully orchestrated to ensure |
| 590 | * no interrupts can come in when the problem state |
| 591 | * area is unmapped. |
| 592 | * |
| 593 | * 1) Disable all AFU interrupts |
| 594 | * 2) Unmap the problem state area |
| 595 | * 3) Stop the master context |
| 596 | */ |
| 597 | term_intr(cfg, UNMAP_THREE); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 598 | if (cfg->afu) |
| 599 | stop_afu(cfg); |
| 600 | |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 601 | term_mc(cfg); |
Uma Krishnan | 6ded8b3 | 2016-03-04 15:55:15 -0600 | [diff] [blame] | 602 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 603 | pr_debug("%s: returning\n", __func__); |
| 604 | } |
| 605 | |
| 606 | /** |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 607 | * notify_shutdown() - notifies device of pending shutdown |
| 608 | * @cfg: Internal structure associated with the host. |
| 609 | * @wait: Whether to wait for shutdown processing to complete. |
| 610 | * |
| 611 | * This function will notify the AFU that the adapter is being shutdown |
| 612 | * and will wait for shutdown processing to complete if wait is true. |
| 613 | * This notification should flush pending I/Os to the device and halt |
| 614 | * further I/Os until the next AFU reset is issued and device restarted. |
| 615 | */ |
| 616 | static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) |
| 617 | { |
| 618 | struct afu *afu = cfg->afu; |
| 619 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | 1bd2b28 | 2016-07-21 15:44:04 -0500 | [diff] [blame] | 620 | struct sisl_global_map __iomem *global; |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 621 | struct dev_dependent_vals *ddv; |
| 622 | u64 reg, status; |
| 623 | int i, retry_cnt = 0; |
| 624 | |
| 625 | ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data; |
| 626 | if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN)) |
| 627 | return; |
| 628 | |
Uma Krishnan | 1bd2b28 | 2016-07-21 15:44:04 -0500 | [diff] [blame] | 629 | if (!afu || !afu->afu_map) { |
| 630 | dev_dbg(dev, "%s: The problem state area is not mapped\n", |
| 631 | __func__); |
| 632 | return; |
| 633 | } |
| 634 | |
| 635 | global = &afu->afu_map->global; |
| 636 | |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 637 | /* Notify AFU */ |
| 638 | for (i = 0; i < NUM_FC_PORTS; i++) { |
| 639 | reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]); |
| 640 | reg |= SISL_FC_SHUTDOWN_NORMAL; |
| 641 | writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]); |
| 642 | } |
| 643 | |
| 644 | if (!wait) |
| 645 | return; |
| 646 | |
| 647 | /* Wait up to 1.5 seconds for shutdown processing to complete */ |
| 648 | for (i = 0; i < NUM_FC_PORTS; i++) { |
| 649 | retry_cnt = 0; |
| 650 | while (true) { |
| 651 | status = readq_be(&global->fc_regs[i][FC_STATUS / 8]); |
| 652 | if (status & SISL_STATUS_SHUTDOWN_COMPLETE) |
| 653 | break; |
| 654 | if (++retry_cnt >= MC_RETRY_CNT) { |
| 655 | dev_dbg(dev, "%s: port %d shutdown processing " |
| 656 | "not yet completed\n", __func__, i); |
| 657 | break; |
| 658 | } |
| 659 | msleep(100 * retry_cnt); |
| 660 | } |
| 661 | } |
| 662 | } |
| 663 | |
| 664 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 665 | * cxlflash_remove() - PCI entry point to tear down host |
| 666 | * @pdev: PCI device associated with the host. |
| 667 | * |
| 668 | * Safe to use as a cleanup in partially allocated/initialized state. |
| 669 | */ |
| 670 | static void cxlflash_remove(struct pci_dev *pdev) |
| 671 | { |
| 672 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
| 673 | ulong lock_flags; |
| 674 | |
Uma Krishnan | babf985 | 2016-09-02 15:39:16 -0500 | [diff] [blame] | 675 | if (!pci_is_enabled(pdev)) { |
| 676 | pr_debug("%s: Device is disabled\n", __func__); |
| 677 | return; |
| 678 | } |
| 679 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 680 | /* If a Task Management Function is active, wait for it to complete |
| 681 | * before continuing with remove. |
| 682 | */ |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 683 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 684 | if (cfg->tmf_active) |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 685 | wait_event_interruptible_lock_irq(cfg->tmf_waitq, |
| 686 | !cfg->tmf_active, |
| 687 | cfg->tmf_slock); |
| 688 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 689 | |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 690 | /* Notify AFU and wait for shutdown processing to complete */ |
| 691 | notify_shutdown(cfg, true); |
| 692 | |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 693 | cfg->state = STATE_FAILTERM; |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 694 | cxlflash_stop_term_user_contexts(cfg); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 695 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 696 | switch (cfg->init_state) { |
| 697 | case INIT_STATE_SCSI: |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 698 | cxlflash_term_local_luns(cfg); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 699 | scsi_remove_host(cfg->host); |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 700 | /* fall through */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 701 | case INIT_STATE_AFU: |
Matthew R. Ochs | d804621 | 2015-10-21 15:14:17 -0500 | [diff] [blame] | 702 | cancel_work_sync(&cfg->work_q); |
Manoj Kumar | b45cdbaf | 2015-12-14 15:07:23 -0600 | [diff] [blame] | 703 | term_afu(cfg); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 704 | case INIT_STATE_PCI: |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 705 | pci_disable_device(pdev); |
| 706 | case INIT_STATE_NONE: |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 707 | free_mem(cfg); |
Matthew R. Ochs | 8b5b1e8 | 2015-10-21 15:14:09 -0500 | [diff] [blame] | 708 | scsi_host_put(cfg->host); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 709 | break; |
| 710 | } |
| 711 | |
| 712 | pr_debug("%s: returning\n", __func__); |
| 713 | } |
| 714 | |
| 715 | /** |
| 716 | * alloc_mem() - allocates the AFU and its command pool |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 717 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 718 | * |
| 719 | * A partially allocated state remains on failure. |
| 720 | * |
| 721 | * Return: |
| 722 | * 0 on success |
| 723 | * -ENOMEM on failure to allocate memory |
| 724 | */ |
| 725 | static int alloc_mem(struct cxlflash_cfg *cfg) |
| 726 | { |
| 727 | int rc = 0; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 728 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 729 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 730 | /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 731 | cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
| 732 | get_order(sizeof(struct afu))); |
| 733 | if (unlikely(!cfg->afu)) { |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 734 | dev_err(dev, "%s: cannot get %d free pages\n", |
| 735 | __func__, get_order(sizeof(struct afu))); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 736 | rc = -ENOMEM; |
| 737 | goto out; |
| 738 | } |
| 739 | cfg->afu->parent = cfg; |
| 740 | cfg->afu->afu_map = NULL; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 741 | out: |
| 742 | return rc; |
| 743 | } |
| 744 | |
| 745 | /** |
| 746 | * init_pci() - initializes the host as a PCI device |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 747 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 748 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 749 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 750 | */ |
| 751 | static int init_pci(struct cxlflash_cfg *cfg) |
| 752 | { |
| 753 | struct pci_dev *pdev = cfg->dev; |
| 754 | int rc = 0; |
| 755 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 756 | rc = pci_enable_device(pdev); |
| 757 | if (rc || pci_channel_offline(pdev)) { |
| 758 | if (pci_channel_offline(pdev)) { |
| 759 | cxlflash_wait_for_pci_err_recovery(cfg); |
| 760 | rc = pci_enable_device(pdev); |
| 761 | } |
| 762 | |
| 763 | if (rc) { |
| 764 | dev_err(&pdev->dev, "%s: Cannot enable adapter\n", |
| 765 | __func__); |
| 766 | cxlflash_wait_for_pci_err_recovery(cfg); |
Manoj N. Kumar | 961487e | 2016-03-04 15:55:14 -0600 | [diff] [blame] | 767 | goto out; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 768 | } |
| 769 | } |
| 770 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 771 | out: |
| 772 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
| 773 | return rc; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 774 | } |
| 775 | |
| 776 | /** |
| 777 | * init_scsi() - adds the host to the SCSI stack and kicks off host scan |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 778 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 779 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 780 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 781 | */ |
| 782 | static int init_scsi(struct cxlflash_cfg *cfg) |
| 783 | { |
| 784 | struct pci_dev *pdev = cfg->dev; |
| 785 | int rc = 0; |
| 786 | |
| 787 | rc = scsi_add_host(cfg->host, &pdev->dev); |
| 788 | if (rc) { |
| 789 | dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n", |
| 790 | __func__, rc); |
| 791 | goto out; |
| 792 | } |
| 793 | |
| 794 | scsi_scan_host(cfg->host); |
| 795 | |
| 796 | out: |
| 797 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
| 798 | return rc; |
| 799 | } |
| 800 | |
| 801 | /** |
| 802 | * set_port_online() - transitions the specified host FC port to online state |
| 803 | * @fc_regs: Top of MMIO region defined for specified port. |
| 804 | * |
| 805 | * The provided MMIO region must be mapped prior to call. Online state means |
| 806 | * that the FC link layer has synced, completed the handshaking process, and |
| 807 | * is ready for login to start. |
| 808 | */ |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 809 | static void set_port_online(__be64 __iomem *fc_regs) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 810 | { |
| 811 | u64 cmdcfg; |
| 812 | |
| 813 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); |
| 814 | cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ |
| 815 | cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ |
| 816 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); |
| 817 | } |
| 818 | |
| 819 | /** |
| 820 | * set_port_offline() - transitions the specified host FC port to offline state |
| 821 | * @fc_regs: Top of MMIO region defined for specified port. |
| 822 | * |
| 823 | * The provided MMIO region must be mapped prior to call. |
| 824 | */ |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 825 | static void set_port_offline(__be64 __iomem *fc_regs) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 826 | { |
| 827 | u64 cmdcfg; |
| 828 | |
| 829 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); |
| 830 | cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ |
| 831 | cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ |
| 832 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); |
| 833 | } |
| 834 | |
| 835 | /** |
| 836 | * wait_port_online() - waits for the specified host FC port come online |
| 837 | * @fc_regs: Top of MMIO region defined for specified port. |
| 838 | * @delay_us: Number of microseconds to delay between reading port status. |
| 839 | * @nretry: Number of cycles to retry reading port status. |
| 840 | * |
| 841 | * The provided MMIO region must be mapped prior to call. This will timeout |
| 842 | * when the cable is not plugged in. |
| 843 | * |
| 844 | * Return: |
| 845 | * TRUE (1) when the specified port is online |
| 846 | * FALSE (0) when the specified port fails to come online after timeout |
| 847 | * -EINVAL when @delay_us is less than 1000 |
| 848 | */ |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 849 | static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 850 | { |
| 851 | u64 status; |
| 852 | |
| 853 | if (delay_us < 1000) { |
| 854 | pr_err("%s: invalid delay specified %d\n", __func__, delay_us); |
| 855 | return -EINVAL; |
| 856 | } |
| 857 | |
| 858 | do { |
| 859 | msleep(delay_us / 1000); |
| 860 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); |
Matthew R. Ochs | 05dab43 | 2016-09-02 15:40:03 -0500 | [diff] [blame] | 861 | if (status == U64_MAX) |
| 862 | nretry /= 2; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 863 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && |
| 864 | nretry--); |
| 865 | |
| 866 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); |
| 867 | } |
| 868 | |
| 869 | /** |
| 870 | * wait_port_offline() - waits for the specified host FC port go offline |
| 871 | * @fc_regs: Top of MMIO region defined for specified port. |
| 872 | * @delay_us: Number of microseconds to delay between reading port status. |
| 873 | * @nretry: Number of cycles to retry reading port status. |
| 874 | * |
| 875 | * The provided MMIO region must be mapped prior to call. |
| 876 | * |
| 877 | * Return: |
| 878 | * TRUE (1) when the specified port is offline |
| 879 | * FALSE (0) when the specified port fails to go offline after timeout |
| 880 | * -EINVAL when @delay_us is less than 1000 |
| 881 | */ |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 882 | static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 883 | { |
| 884 | u64 status; |
| 885 | |
| 886 | if (delay_us < 1000) { |
| 887 | pr_err("%s: invalid delay specified %d\n", __func__, delay_us); |
| 888 | return -EINVAL; |
| 889 | } |
| 890 | |
| 891 | do { |
| 892 | msleep(delay_us / 1000); |
| 893 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); |
Matthew R. Ochs | 05dab43 | 2016-09-02 15:40:03 -0500 | [diff] [blame] | 894 | if (status == U64_MAX) |
| 895 | nretry /= 2; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 896 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && |
| 897 | nretry--); |
| 898 | |
| 899 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); |
| 900 | } |
| 901 | |
| 902 | /** |
| 903 | * afu_set_wwpn() - configures the WWPN for the specified host FC port |
| 904 | * @afu: AFU associated with the host that owns the specified FC port. |
| 905 | * @port: Port number being configured. |
| 906 | * @fc_regs: Top of MMIO region defined for specified port. |
| 907 | * @wwpn: The world-wide-port-number previously discovered for port. |
| 908 | * |
| 909 | * The provided MMIO region must be mapped prior to call. As part of the |
| 910 | * sequence to configure the WWPN, the port is toggled offline and then back |
| 911 | * online. This toggling action can cause this routine to delay up to a few |
| 912 | * seconds. When configured to use the internal LUN feature of the AFU, a |
| 913 | * failure to come online is overridden. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 914 | */ |
Matthew R. Ochs | f801326 | 2016-09-02 15:40:20 -0500 | [diff] [blame] | 915 | static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, |
| 916 | u64 wwpn) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 917 | { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 918 | set_port_offline(fc_regs); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 919 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
| 920 | FC_PORT_STATUS_RETRY_CNT)) { |
| 921 | pr_debug("%s: wait on port %d to go offline timed out\n", |
| 922 | __func__, port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 923 | } |
| 924 | |
Matthew R. Ochs | f801326 | 2016-09-02 15:40:20 -0500 | [diff] [blame] | 925 | writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); |
Matthew R. Ochs | 964497b | 2015-10-21 15:13:54 -0500 | [diff] [blame] | 926 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 927 | set_port_online(fc_regs); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 928 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
| 929 | FC_PORT_STATUS_RETRY_CNT)) { |
Matthew R. Ochs | f801326 | 2016-09-02 15:40:20 -0500 | [diff] [blame] | 930 | pr_debug("%s: wait on port %d to go online timed out\n", |
| 931 | __func__, port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 932 | } |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 933 | } |
| 934 | |
| 935 | /** |
| 936 | * afu_link_reset() - resets the specified host FC port |
| 937 | * @afu: AFU associated with the host that owns the specified FC port. |
| 938 | * @port: Port number being configured. |
| 939 | * @fc_regs: Top of MMIO region defined for specified port. |
| 940 | * |
| 941 | * The provided MMIO region must be mapped prior to call. The sequence to |
| 942 | * reset the port involves toggling it offline and then back online. This |
| 943 | * action can cause this routine to delay up to a few seconds. An effort |
| 944 | * is made to maintain link with the device by switching to host to use |
| 945 | * the alternate port exclusively while the reset takes place. |
| 946 | * failure to come online is overridden. |
| 947 | */ |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 948 | static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 949 | { |
| 950 | u64 port_sel; |
| 951 | |
| 952 | /* first switch the AFU to the other links, if any */ |
| 953 | port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); |
Dan Carpenter | 4da74db | 2015-08-18 11:57:43 +0300 | [diff] [blame] | 954 | port_sel &= ~(1ULL << port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 955 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); |
| 956 | cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); |
| 957 | |
| 958 | set_port_offline(fc_regs); |
| 959 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
| 960 | FC_PORT_STATUS_RETRY_CNT)) |
| 961 | pr_err("%s: wait on port %d to go offline timed out\n", |
| 962 | __func__, port); |
| 963 | |
| 964 | set_port_online(fc_regs); |
| 965 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
| 966 | FC_PORT_STATUS_RETRY_CNT)) |
| 967 | pr_err("%s: wait on port %d to go online timed out\n", |
| 968 | __func__, port); |
| 969 | |
| 970 | /* switch back to include this port */ |
Dan Carpenter | 4da74db | 2015-08-18 11:57:43 +0300 | [diff] [blame] | 971 | port_sel |= (1ULL << port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 972 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); |
| 973 | cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); |
| 974 | |
| 975 | pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel); |
| 976 | } |
| 977 | |
| 978 | /* |
| 979 | * Asynchronous interrupt information table |
| 980 | */ |
| 981 | static const struct asyc_intr_info ainfo[] = { |
| 982 | {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET}, |
| 983 | {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0}, |
| 984 | {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET}, |
Manoj Kumar | e6e6df3 | 2015-10-21 15:16:07 -0500 | [diff] [blame] | 985 | {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET}, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 986 | {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR}, |
Matthew R. Ochs | ef51074 | 2015-10-21 15:13:37 -0500 | [diff] [blame] | 987 | {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST}, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 988 | {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0}, |
Uma Krishnan | bbbfae9 | 2016-09-02 15:38:48 -0500 | [diff] [blame] | 989 | {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0}, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 990 | {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET}, |
| 991 | {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0}, |
| 992 | {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET}, |
Manoj Kumar | a9be294 | 2015-12-14 14:55:09 -0600 | [diff] [blame] | 993 | {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET}, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 994 | {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR}, |
Matthew R. Ochs | ef51074 | 2015-10-21 15:13:37 -0500 | [diff] [blame] | 995 | {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST}, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 996 | {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0}, |
Uma Krishnan | bbbfae9 | 2016-09-02 15:38:48 -0500 | [diff] [blame] | 997 | {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0}, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 998 | {0x0, "", 0, 0} /* terminator */ |
| 999 | }; |
| 1000 | |
| 1001 | /** |
| 1002 | * find_ainfo() - locates and returns asynchronous interrupt information |
| 1003 | * @status: Status code set by AFU on error. |
| 1004 | * |
| 1005 | * Return: The located information or NULL when the status code is invalid. |
| 1006 | */ |
| 1007 | static const struct asyc_intr_info *find_ainfo(u64 status) |
| 1008 | { |
| 1009 | const struct asyc_intr_info *info; |
| 1010 | |
| 1011 | for (info = &ainfo[0]; info->status; info++) |
| 1012 | if (info->status == status) |
| 1013 | return info; |
| 1014 | |
| 1015 | return NULL; |
| 1016 | } |
| 1017 | |
| 1018 | /** |
| 1019 | * afu_err_intr_init() - clears and initializes the AFU for error interrupts |
| 1020 | * @afu: AFU associated with the host. |
| 1021 | */ |
| 1022 | static void afu_err_intr_init(struct afu *afu) |
| 1023 | { |
| 1024 | int i; |
| 1025 | u64 reg; |
| 1026 | |
| 1027 | /* global async interrupts: AFU clears afu_ctrl on context exit |
| 1028 | * if async interrupts were sent to that context. This prevents |
| 1029 | * the AFU form sending further async interrupts when |
| 1030 | * there is |
| 1031 | * nobody to receive them. |
| 1032 | */ |
| 1033 | |
| 1034 | /* mask all */ |
| 1035 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); |
| 1036 | /* set LISN# to send and point to master context */ |
| 1037 | reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); |
| 1038 | |
| 1039 | if (afu->internal_lun) |
| 1040 | reg |= 1; /* Bit 63 indicates local lun */ |
| 1041 | writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); |
| 1042 | /* clear all */ |
| 1043 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); |
| 1044 | /* unmask bits that are of interest */ |
| 1045 | /* note: afu can send an interrupt after this step */ |
| 1046 | writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); |
| 1047 | /* clear again in case a bit came on after previous clear but before */ |
| 1048 | /* unmask */ |
| 1049 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); |
| 1050 | |
| 1051 | /* Clear/Set internal lun bits */ |
| 1052 | reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); |
| 1053 | reg &= SISL_FC_INTERNAL_MASK; |
| 1054 | if (afu->internal_lun) |
| 1055 | reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); |
| 1056 | writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); |
| 1057 | |
| 1058 | /* now clear FC errors */ |
| 1059 | for (i = 0; i < NUM_FC_PORTS; i++) { |
| 1060 | writeq_be(0xFFFFFFFFU, |
| 1061 | &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]); |
| 1062 | writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]); |
| 1063 | } |
| 1064 | |
| 1065 | /* sync interrupts for master's IOARRIN write */ |
| 1066 | /* note that unlike asyncs, there can be no pending sync interrupts */ |
| 1067 | /* at this time (this is a fresh context and master has not written */ |
| 1068 | /* IOARRIN yet), so there is nothing to clear. */ |
| 1069 | |
| 1070 | /* set LISN#, it is always sent to the context that wrote IOARRIN */ |
| 1071 | writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl); |
| 1072 | writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask); |
| 1073 | } |
| 1074 | |
| 1075 | /** |
| 1076 | * cxlflash_sync_err_irq() - interrupt handler for synchronous errors |
| 1077 | * @irq: Interrupt number. |
| 1078 | * @data: Private data provided at interrupt registration, the AFU. |
| 1079 | * |
| 1080 | * Return: Always return IRQ_HANDLED. |
| 1081 | */ |
| 1082 | static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) |
| 1083 | { |
| 1084 | struct afu *afu = (struct afu *)data; |
| 1085 | u64 reg; |
| 1086 | u64 reg_unmasked; |
| 1087 | |
| 1088 | reg = readq_be(&afu->host_map->intr_status); |
| 1089 | reg_unmasked = (reg & SISL_ISTATUS_UNMASK); |
| 1090 | |
| 1091 | if (reg_unmasked == 0UL) { |
| 1092 | pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n", |
| 1093 | __func__, (u64)afu, reg); |
| 1094 | goto cxlflash_sync_err_irq_exit; |
| 1095 | } |
| 1096 | |
| 1097 | pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n", |
| 1098 | __func__, (u64)afu, reg); |
| 1099 | |
| 1100 | writeq_be(reg_unmasked, &afu->host_map->intr_clear); |
| 1101 | |
| 1102 | cxlflash_sync_err_irq_exit: |
| 1103 | pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED); |
| 1104 | return IRQ_HANDLED; |
| 1105 | } |
| 1106 | |
| 1107 | /** |
| 1108 | * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) |
| 1109 | * @irq: Interrupt number. |
| 1110 | * @data: Private data provided at interrupt registration, the AFU. |
| 1111 | * |
| 1112 | * Return: Always return IRQ_HANDLED. |
| 1113 | */ |
| 1114 | static irqreturn_t cxlflash_rrq_irq(int irq, void *data) |
| 1115 | { |
| 1116 | struct afu *afu = (struct afu *)data; |
| 1117 | struct afu_cmd *cmd; |
| 1118 | bool toggle = afu->toggle; |
| 1119 | u64 entry, |
| 1120 | *hrrq_start = afu->hrrq_start, |
| 1121 | *hrrq_end = afu->hrrq_end, |
| 1122 | *hrrq_curr = afu->hrrq_curr; |
| 1123 | |
| 1124 | /* Process however many RRQ entries that are ready */ |
| 1125 | while (true) { |
| 1126 | entry = *hrrq_curr; |
| 1127 | |
| 1128 | if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) |
| 1129 | break; |
| 1130 | |
| 1131 | cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT); |
| 1132 | cmd_complete(cmd); |
| 1133 | |
| 1134 | /* Advance to next entry or wrap and flip the toggle bit */ |
| 1135 | if (hrrq_curr < hrrq_end) |
| 1136 | hrrq_curr++; |
| 1137 | else { |
| 1138 | hrrq_curr = hrrq_start; |
| 1139 | toggle ^= SISL_RESP_HANDLE_T_BIT; |
| 1140 | } |
| 1141 | } |
| 1142 | |
| 1143 | afu->hrrq_curr = hrrq_curr; |
| 1144 | afu->toggle = toggle; |
| 1145 | |
| 1146 | return IRQ_HANDLED; |
| 1147 | } |
| 1148 | |
| 1149 | /** |
| 1150 | * cxlflash_async_err_irq() - interrupt handler for asynchronous errors |
| 1151 | * @irq: Interrupt number. |
| 1152 | * @data: Private data provided at interrupt registration, the AFU. |
| 1153 | * |
| 1154 | * Return: Always return IRQ_HANDLED. |
| 1155 | */ |
| 1156 | static irqreturn_t cxlflash_async_err_irq(int irq, void *data) |
| 1157 | { |
| 1158 | struct afu *afu = (struct afu *)data; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1159 | struct cxlflash_cfg *cfg = afu->parent; |
| 1160 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1161 | u64 reg_unmasked; |
| 1162 | const struct asyc_intr_info *info; |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 1163 | struct sisl_global_map __iomem *global = &afu->afu_map->global; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1164 | u64 reg; |
| 1165 | u8 port; |
| 1166 | int i; |
| 1167 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1168 | reg = readq_be(&global->regs.aintr_status); |
| 1169 | reg_unmasked = (reg & SISL_ASTATUS_UNMASK); |
| 1170 | |
| 1171 | if (reg_unmasked == 0) { |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1172 | dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n", |
| 1173 | __func__, reg); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1174 | goto out; |
| 1175 | } |
| 1176 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1177 | /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1178 | writeq_be(reg_unmasked, &global->regs.aintr_clear); |
| 1179 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1180 | /* Check each bit that is on */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1181 | for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) { |
| 1182 | info = find_ainfo(1ULL << i); |
Matthew R. Ochs | 16798d3 | 2015-10-21 15:13:45 -0500 | [diff] [blame] | 1183 | if (((reg_unmasked & 0x1) == 0) || !info) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1184 | continue; |
| 1185 | |
| 1186 | port = info->port; |
| 1187 | |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1188 | dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n", |
| 1189 | __func__, port, info->desc, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1190 | readq_be(&global->fc_regs[port][FC_STATUS / 8])); |
| 1191 | |
| 1192 | /* |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1193 | * Do link reset first, some OTHER errors will set FC_ERROR |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1194 | * again if cleared before or w/o a reset |
| 1195 | */ |
| 1196 | if (info->action & LINK_RESET) { |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1197 | dev_err(dev, "%s: FC Port %d: resetting link\n", |
| 1198 | __func__, port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1199 | cfg->lr_state = LINK_RESET_REQUIRED; |
| 1200 | cfg->lr_port = port; |
Manoj Kumar | b45cdbaf | 2015-12-14 15:07:23 -0600 | [diff] [blame] | 1201 | kref_get(&cfg->afu->mapcount); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1202 | schedule_work(&cfg->work_q); |
| 1203 | } |
| 1204 | |
| 1205 | if (info->action & CLR_FC_ERROR) { |
| 1206 | reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]); |
| 1207 | |
| 1208 | /* |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1209 | * Since all errors are unmasked, FC_ERROR and FC_ERRCAP |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1210 | * should be the same and tracing one is sufficient. |
| 1211 | */ |
| 1212 | |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1213 | dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n", |
| 1214 | __func__, port, reg); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1215 | |
| 1216 | writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]); |
| 1217 | writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]); |
| 1218 | } |
Matthew R. Ochs | ef51074 | 2015-10-21 15:13:37 -0500 | [diff] [blame] | 1219 | |
| 1220 | if (info->action & SCAN_HOST) { |
| 1221 | atomic_inc(&cfg->scan_host_needed); |
Manoj Kumar | b45cdbaf | 2015-12-14 15:07:23 -0600 | [diff] [blame] | 1222 | kref_get(&cfg->afu->mapcount); |
Matthew R. Ochs | ef51074 | 2015-10-21 15:13:37 -0500 | [diff] [blame] | 1223 | schedule_work(&cfg->work_q); |
| 1224 | } |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1225 | } |
| 1226 | |
| 1227 | out: |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1228 | dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1229 | return IRQ_HANDLED; |
| 1230 | } |
| 1231 | |
| 1232 | /** |
| 1233 | * start_context() - starts the master context |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1234 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1235 | * |
| 1236 | * Return: A success or failure value from CXL services. |
| 1237 | */ |
| 1238 | static int start_context(struct cxlflash_cfg *cfg) |
| 1239 | { |
| 1240 | int rc = 0; |
| 1241 | |
| 1242 | rc = cxl_start_context(cfg->mcctx, |
| 1243 | cfg->afu->work.work_element_descriptor, |
| 1244 | NULL); |
| 1245 | |
| 1246 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
| 1247 | return rc; |
| 1248 | } |
| 1249 | |
| 1250 | /** |
| 1251 | * read_vpd() - obtains the WWPNs from VPD |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1252 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1253 | * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs |
| 1254 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1255 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1256 | */ |
| 1257 | static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) |
| 1258 | { |
Frederic Barrat | ca946d4e | 2016-03-04 12:26:43 +0100 | [diff] [blame] | 1259 | struct pci_dev *dev = cfg->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1260 | int rc = 0; |
| 1261 | int ro_start, ro_size, i, j, k; |
| 1262 | ssize_t vpd_size; |
| 1263 | char vpd_data[CXLFLASH_VPD_LEN]; |
| 1264 | char tmp_buf[WWPN_BUF_LEN] = { 0 }; |
| 1265 | char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" }; |
| 1266 | |
| 1267 | /* Get the VPD data from the device */ |
Frederic Barrat | ca946d4e | 2016-03-04 12:26:43 +0100 | [diff] [blame] | 1268 | vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data)); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1269 | if (unlikely(vpd_size <= 0)) { |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1270 | dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n", |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1271 | __func__, vpd_size); |
| 1272 | rc = -ENODEV; |
| 1273 | goto out; |
| 1274 | } |
| 1275 | |
| 1276 | /* Get the read only section offset */ |
| 1277 | ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, |
| 1278 | PCI_VPD_LRDT_RO_DATA); |
| 1279 | if (unlikely(ro_start < 0)) { |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1280 | dev_err(&dev->dev, "%s: VPD Read-only data not found\n", |
| 1281 | __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1282 | rc = -ENODEV; |
| 1283 | goto out; |
| 1284 | } |
| 1285 | |
| 1286 | /* Get the read only section size, cap when extends beyond read VPD */ |
| 1287 | ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); |
| 1288 | j = ro_size; |
| 1289 | i = ro_start + PCI_VPD_LRDT_TAG_SIZE; |
| 1290 | if (unlikely((i + j) > vpd_size)) { |
| 1291 | pr_debug("%s: Might need to read more VPD (%d > %ld)\n", |
| 1292 | __func__, (i + j), vpd_size); |
| 1293 | ro_size = vpd_size - i; |
| 1294 | } |
| 1295 | |
| 1296 | /* |
| 1297 | * Find the offset of the WWPN tag within the read only |
| 1298 | * VPD data and validate the found field (partials are |
| 1299 | * no good to us). Convert the ASCII data to an integer |
| 1300 | * value. Note that we must copy to a temporary buffer |
| 1301 | * because the conversion service requires that the ASCII |
| 1302 | * string be terminated. |
| 1303 | */ |
| 1304 | for (k = 0; k < NUM_FC_PORTS; k++) { |
| 1305 | j = ro_size; |
| 1306 | i = ro_start + PCI_VPD_LRDT_TAG_SIZE; |
| 1307 | |
| 1308 | i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); |
| 1309 | if (unlikely(i < 0)) { |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1310 | dev_err(&dev->dev, "%s: Port %d WWPN not found " |
| 1311 | "in VPD\n", __func__, k); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1312 | rc = -ENODEV; |
| 1313 | goto out; |
| 1314 | } |
| 1315 | |
| 1316 | j = pci_vpd_info_field_size(&vpd_data[i]); |
| 1317 | i += PCI_VPD_INFO_FLD_HDR_SIZE; |
| 1318 | if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1319 | dev_err(&dev->dev, "%s: Port %d WWPN incomplete or " |
| 1320 | "VPD corrupt\n", |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1321 | __func__, k); |
| 1322 | rc = -ENODEV; |
| 1323 | goto out; |
| 1324 | } |
| 1325 | |
| 1326 | memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); |
| 1327 | rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); |
| 1328 | if (unlikely(rc)) { |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1329 | dev_err(&dev->dev, "%s: Fail to convert port %d WWPN " |
| 1330 | "to integer\n", __func__, k); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1331 | rc = -ENODEV; |
| 1332 | goto out; |
| 1333 | } |
| 1334 | } |
| 1335 | |
| 1336 | out: |
| 1337 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
| 1338 | return rc; |
| 1339 | } |
| 1340 | |
| 1341 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1342 | * init_pcr() - initialize the provisioning and control registers |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1343 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1344 | * |
| 1345 | * Also sets up fast access to the mapped registers and initializes AFU |
| 1346 | * command fields that never change. |
| 1347 | */ |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1348 | static void init_pcr(struct cxlflash_cfg *cfg) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1349 | { |
| 1350 | struct afu *afu = cfg->afu; |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 1351 | struct sisl_ctrl_map __iomem *ctrl_map; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1352 | int i; |
| 1353 | |
| 1354 | for (i = 0; i < MAX_CONTEXT; i++) { |
| 1355 | ctrl_map = &afu->afu_map->ctrls[i].ctrl; |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1356 | /* Disrupt any clients that could be running */ |
| 1357 | /* e.g. clients that survived a master restart */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1358 | writeq_be(0, &ctrl_map->rht_start); |
| 1359 | writeq_be(0, &ctrl_map->rht_cnt_id); |
| 1360 | writeq_be(0, &ctrl_map->ctx_cap); |
| 1361 | } |
| 1362 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1363 | /* Copy frequently used fields into afu */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1364 | afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1365 | afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host; |
| 1366 | afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl; |
| 1367 | |
| 1368 | /* Program the Endian Control for the master context */ |
| 1369 | writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1370 | } |
| 1371 | |
| 1372 | /** |
| 1373 | * init_global() - initialize AFU global registers |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1374 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1375 | */ |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1376 | static int init_global(struct cxlflash_cfg *cfg) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1377 | { |
| 1378 | struct afu *afu = cfg->afu; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1379 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1380 | u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */ |
| 1381 | int i = 0, num_ports = 0; |
| 1382 | int rc = 0; |
| 1383 | u64 reg; |
| 1384 | |
| 1385 | rc = read_vpd(cfg, &wwpn[0]); |
| 1386 | if (rc) { |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1387 | dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1388 | goto out; |
| 1389 | } |
| 1390 | |
| 1391 | pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]); |
| 1392 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1393 | /* Set up RRQ in AFU for master issued cmds */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1394 | writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); |
| 1395 | writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end); |
| 1396 | |
| 1397 | /* AFU configuration */ |
| 1398 | reg = readq_be(&afu->afu_map->global.regs.afu_config); |
| 1399 | reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; |
| 1400 | /* enable all auto retry options and control endianness */ |
| 1401 | /* leave others at default: */ |
| 1402 | /* CTX_CAP write protected, mbox_r does not clear on read and */ |
| 1403 | /* checker on if dual afu */ |
| 1404 | writeq_be(reg, &afu->afu_map->global.regs.afu_config); |
| 1405 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1406 | /* Global port select: select either port */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1407 | if (afu->internal_lun) { |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1408 | /* Only use port 0 */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1409 | writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); |
| 1410 | num_ports = NUM_FC_PORTS - 1; |
| 1411 | } else { |
| 1412 | writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel); |
| 1413 | num_ports = NUM_FC_PORTS; |
| 1414 | } |
| 1415 | |
| 1416 | for (i = 0; i < num_ports; i++) { |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1417 | /* Unmask all errors (but they are still masked at AFU) */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1418 | writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]); |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1419 | /* Clear CRC error cnt & set a threshold */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1420 | (void)readq_be(&afu->afu_map->global. |
| 1421 | fc_regs[i][FC_CNT_CRCERR / 8]); |
| 1422 | writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i] |
| 1423 | [FC_CRC_THRESH / 8]); |
| 1424 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1425 | /* Set WWPNs. If already programmed, wwpn[i] is 0 */ |
Matthew R. Ochs | f801326 | 2016-09-02 15:40:20 -0500 | [diff] [blame] | 1426 | if (wwpn[i] != 0) |
| 1427 | afu_set_wwpn(afu, i, |
| 1428 | &afu->afu_map->global.fc_regs[i][0], |
| 1429 | wwpn[i]); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1430 | /* Programming WWPN back to back causes additional |
| 1431 | * offline/online transitions and a PLOGI |
| 1432 | */ |
| 1433 | msleep(100); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1434 | } |
| 1435 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1436 | /* Set up master's own CTX_CAP to allow real mode, host translation */ |
| 1437 | /* tables, afu cmds and read/write GSCSI cmds. */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1438 | /* First, unlock ctx_cap write by reading mbox */ |
| 1439 | (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */ |
| 1440 | writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | |
| 1441 | SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | |
| 1442 | SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), |
| 1443 | &afu->ctrl_map->ctx_cap); |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1444 | /* Initialize heartbeat */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1445 | afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); |
| 1446 | |
| 1447 | out: |
| 1448 | return rc; |
| 1449 | } |
| 1450 | |
| 1451 | /** |
| 1452 | * start_afu() - initializes and starts the AFU |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1453 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1454 | */ |
| 1455 | static int start_afu(struct cxlflash_cfg *cfg) |
| 1456 | { |
| 1457 | struct afu *afu = cfg->afu; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1458 | int rc = 0; |
| 1459 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1460 | init_pcr(cfg); |
| 1461 | |
Matthew R. Ochs | af10483 | 2015-10-21 15:15:14 -0500 | [diff] [blame] | 1462 | /* After an AFU reset, RRQ entries are stale, clear them */ |
| 1463 | memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry)); |
| 1464 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1465 | /* Initialize RRQ pointers */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1466 | afu->hrrq_start = &afu->rrq_entry[0]; |
| 1467 | afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1]; |
| 1468 | afu->hrrq_curr = afu->hrrq_start; |
| 1469 | afu->toggle = 1; |
| 1470 | |
| 1471 | rc = init_global(cfg); |
| 1472 | |
| 1473 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
| 1474 | return rc; |
| 1475 | } |
| 1476 | |
| 1477 | /** |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1478 | * init_intr() - setup interrupt handlers for the master context |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1479 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1480 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1481 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1482 | */ |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1483 | static enum undo_level init_intr(struct cxlflash_cfg *cfg, |
| 1484 | struct cxl_context *ctx) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1485 | { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1486 | struct afu *afu = cfg->afu; |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1487 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1488 | int rc = 0; |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1489 | enum undo_level level = UNDO_NOOP; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1490 | |
| 1491 | rc = cxl_allocate_afu_irqs(ctx, 3); |
| 1492 | if (unlikely(rc)) { |
| 1493 | dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n", |
| 1494 | __func__, rc); |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1495 | level = UNDO_NOOP; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1496 | goto out; |
| 1497 | } |
| 1498 | |
| 1499 | rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu, |
| 1500 | "SISL_MSI_SYNC_ERROR"); |
| 1501 | if (unlikely(rc <= 0)) { |
| 1502 | dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n", |
| 1503 | __func__); |
| 1504 | level = FREE_IRQ; |
| 1505 | goto out; |
| 1506 | } |
| 1507 | |
| 1508 | rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu, |
| 1509 | "SISL_MSI_RRQ_UPDATED"); |
| 1510 | if (unlikely(rc <= 0)) { |
| 1511 | dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n", |
| 1512 | __func__); |
| 1513 | level = UNMAP_ONE; |
| 1514 | goto out; |
| 1515 | } |
| 1516 | |
| 1517 | rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu, |
| 1518 | "SISL_MSI_ASYNC_ERROR"); |
| 1519 | if (unlikely(rc <= 0)) { |
| 1520 | dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n", |
| 1521 | __func__); |
| 1522 | level = UNMAP_TWO; |
| 1523 | goto out; |
| 1524 | } |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1525 | out: |
| 1526 | return level; |
| 1527 | } |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1528 | |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1529 | /** |
| 1530 | * init_mc() - create and register as the master context |
| 1531 | * @cfg: Internal structure associated with the host. |
| 1532 | * |
| 1533 | * Return: 0 on success, -errno on failure |
| 1534 | */ |
| 1535 | static int init_mc(struct cxlflash_cfg *cfg) |
| 1536 | { |
| 1537 | struct cxl_context *ctx; |
| 1538 | struct device *dev = &cfg->dev->dev; |
| 1539 | int rc = 0; |
| 1540 | enum undo_level level; |
| 1541 | |
| 1542 | ctx = cxl_get_context(cfg->dev); |
| 1543 | if (unlikely(!ctx)) { |
| 1544 | rc = -ENOMEM; |
| 1545 | goto ret; |
| 1546 | } |
| 1547 | cfg->mcctx = ctx; |
| 1548 | |
| 1549 | /* Set it up as a master with the CXL */ |
| 1550 | cxl_set_master(ctx); |
| 1551 | |
| 1552 | /* During initialization reset the AFU to start from a clean slate */ |
| 1553 | rc = cxl_afu_reset(cfg->mcctx); |
| 1554 | if (unlikely(rc)) { |
| 1555 | dev_err(dev, "%s: initial AFU reset failed rc=%d\n", |
| 1556 | __func__, rc); |
| 1557 | goto ret; |
| 1558 | } |
| 1559 | |
| 1560 | level = init_intr(cfg, ctx); |
| 1561 | if (unlikely(level)) { |
| 1562 | dev_err(dev, "%s: setting up interrupts failed rc=%d\n", |
| 1563 | __func__, rc); |
| 1564 | goto out; |
| 1565 | } |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1566 | |
| 1567 | /* This performs the equivalent of the CXL_IOCTL_START_WORK. |
| 1568 | * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process |
| 1569 | * element (pe) that is embedded in the context (ctx) |
| 1570 | */ |
| 1571 | rc = start_context(cfg); |
| 1572 | if (unlikely(rc)) { |
| 1573 | dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); |
| 1574 | level = UNMAP_THREE; |
| 1575 | goto out; |
| 1576 | } |
| 1577 | ret: |
| 1578 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
| 1579 | return rc; |
| 1580 | out: |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1581 | term_intr(cfg, level); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1582 | goto ret; |
| 1583 | } |
| 1584 | |
| 1585 | /** |
| 1586 | * init_afu() - setup as master context and start AFU |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1587 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1588 | * |
| 1589 | * This routine is a higher level of control for configuring the |
| 1590 | * AFU on probe and reset paths. |
| 1591 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1592 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1593 | */ |
| 1594 | static int init_afu(struct cxlflash_cfg *cfg) |
| 1595 | { |
| 1596 | u64 reg; |
| 1597 | int rc = 0; |
| 1598 | struct afu *afu = cfg->afu; |
| 1599 | struct device *dev = &cfg->dev->dev; |
| 1600 | |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 1601 | cxl_perst_reloads_same_image(cfg->cxl_afu, true); |
| 1602 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1603 | rc = init_mc(cfg); |
| 1604 | if (rc) { |
| 1605 | dev_err(dev, "%s: call to init_mc failed, rc=%d!\n", |
| 1606 | __func__, rc); |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1607 | goto out; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1608 | } |
| 1609 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1610 | /* Map the entire MMIO space of the AFU */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1611 | afu->afu_map = cxl_psa_map(cfg->mcctx); |
| 1612 | if (!afu->afu_map) { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1613 | dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__); |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1614 | rc = -ENOMEM; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1615 | goto err1; |
| 1616 | } |
Manoj Kumar | b45cdbaf | 2015-12-14 15:07:23 -0600 | [diff] [blame] | 1617 | kref_init(&afu->mapcount); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1618 | |
Matthew R. Ochs | e5ce067 | 2015-10-21 15:14:01 -0500 | [diff] [blame] | 1619 | /* No byte reverse on reading afu_version or string will be backwards */ |
| 1620 | reg = readq(&afu->afu_map->global.regs.afu_version); |
| 1621 | memcpy(afu->version, ®, sizeof(reg)); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1622 | afu->interface_version = |
| 1623 | readq_be(&afu->afu_map->global.regs.interface_version); |
Matthew R. Ochs | e5ce067 | 2015-10-21 15:14:01 -0500 | [diff] [blame] | 1624 | if ((afu->interface_version + 1) == 0) { |
| 1625 | pr_err("Back level AFU, please upgrade. AFU version %s " |
| 1626 | "interface version 0x%llx\n", afu->version, |
| 1627 | afu->interface_version); |
| 1628 | rc = -EINVAL; |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1629 | goto err2; |
| 1630 | } |
| 1631 | |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 1632 | afu->send_cmd = send_cmd_ioarrin; |
| 1633 | afu->context_reset = context_reset_ioarrin; |
| 1634 | |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1635 | pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__, |
| 1636 | afu->version, afu->interface_version); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1637 | |
| 1638 | rc = start_afu(cfg); |
| 1639 | if (rc) { |
| 1640 | dev_err(dev, "%s: call to start_afu failed, rc=%d!\n", |
| 1641 | __func__, rc); |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1642 | goto err2; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1643 | } |
| 1644 | |
| 1645 | afu_err_intr_init(cfg->afu); |
Uma Krishnan | 11f7b18 | 2016-11-28 18:41:45 -0600 | [diff] [blame] | 1646 | spin_lock_init(&afu->rrin_slock); |
| 1647 | afu->room = readq_be(&afu->host_map->cmd_room); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1648 | |
Matthew R. Ochs | 2cb7926 | 2015-08-13 21:47:53 -0500 | [diff] [blame] | 1649 | /* Restore the LUN mappings */ |
| 1650 | cxlflash_restore_luntable(cfg); |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1651 | out: |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1652 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
| 1653 | return rc; |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1654 | |
| 1655 | err2: |
Manoj Kumar | b45cdbaf | 2015-12-14 15:07:23 -0600 | [diff] [blame] | 1656 | kref_put(&afu->mapcount, afu_unmap); |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1657 | err1: |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1658 | term_intr(cfg, UNMAP_THREE); |
| 1659 | term_mc(cfg); |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1660 | goto out; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1661 | } |
| 1662 | |
| 1663 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1664 | * cxlflash_afu_sync() - builds and sends an AFU sync command |
| 1665 | * @afu: AFU associated with the host. |
| 1666 | * @ctx_hndl_u: Identifies context requesting sync. |
| 1667 | * @res_hndl_u: Identifies resource requesting sync. |
| 1668 | * @mode: Type of sync to issue (lightweight, heavyweight, global). |
| 1669 | * |
| 1670 | * The AFU can only take 1 sync command at a time. This routine enforces this |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1671 | * limitation by using a mutex to provide exclusive access to the AFU during |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1672 | * the sync. This design point requires calling threads to not be on interrupt |
| 1673 | * context due to the possibility of sleeping during concurrent sync operations. |
| 1674 | * |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 1675 | * AFU sync operations are only necessary and allowed when the device is |
| 1676 | * operating normally. When not operating normally, sync requests can occur as |
| 1677 | * part of cleaning up resources associated with an adapter prior to removal. |
| 1678 | * In this scenario, these requests are simply ignored (safe due to the AFU |
| 1679 | * going away). |
| 1680 | * |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1681 | * Return: |
| 1682 | * 0 on success |
| 1683 | * -1 on failure |
| 1684 | */ |
| 1685 | int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, |
| 1686 | res_hndl_t res_hndl_u, u8 mode) |
| 1687 | { |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 1688 | struct cxlflash_cfg *cfg = afu->parent; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1689 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1690 | struct afu_cmd *cmd = NULL; |
Matthew R. Ochs | 350bb47 | 2016-11-28 18:42:11 -0600 | [diff] [blame] | 1691 | char *buf = NULL; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1692 | int rc = 0; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1693 | static DEFINE_MUTEX(sync_active); |
| 1694 | |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 1695 | if (cfg->state != STATE_NORMAL) { |
| 1696 | pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state); |
| 1697 | return 0; |
| 1698 | } |
| 1699 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1700 | mutex_lock(&sync_active); |
Matthew R. Ochs | de01283 | 2016-11-28 18:42:33 -0600 | [diff] [blame] | 1701 | atomic_inc(&afu->cmds_active); |
Matthew R. Ochs | 350bb47 | 2016-11-28 18:42:11 -0600 | [diff] [blame] | 1702 | buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); |
| 1703 | if (unlikely(!buf)) { |
| 1704 | dev_err(dev, "%s: no memory for command\n", __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1705 | rc = -1; |
| 1706 | goto out; |
| 1707 | } |
| 1708 | |
Matthew R. Ochs | 350bb47 | 2016-11-28 18:42:11 -0600 | [diff] [blame] | 1709 | cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); |
| 1710 | init_completion(&cmd->cevent); |
Matthew R. Ochs | 350bb47 | 2016-11-28 18:42:11 -0600 | [diff] [blame] | 1711 | cmd->parent = afu; |
| 1712 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1713 | pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); |
| 1714 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1715 | cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; |
Matthew R. Ochs | 350bb47 | 2016-11-28 18:42:11 -0600 | [diff] [blame] | 1716 | cmd->rcb.ctx_id = afu->ctx_hndl; |
| 1717 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1718 | cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; |
| 1719 | |
| 1720 | cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ |
| 1721 | cmd->rcb.cdb[1] = mode; |
| 1722 | |
| 1723 | /* The cdb is aligned, no unaligned accessors required */ |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 1724 | *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u); |
| 1725 | *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1726 | |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 1727 | rc = afu->send_cmd(afu, cmd); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1728 | if (unlikely(rc)) |
| 1729 | goto out; |
| 1730 | |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 1731 | rc = wait_resp(afu, cmd); |
| 1732 | if (unlikely(rc)) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1733 | rc = -1; |
| 1734 | out: |
Matthew R. Ochs | de01283 | 2016-11-28 18:42:33 -0600 | [diff] [blame] | 1735 | atomic_dec(&afu->cmds_active); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1736 | mutex_unlock(&sync_active); |
Matthew R. Ochs | 350bb47 | 2016-11-28 18:42:11 -0600 | [diff] [blame] | 1737 | kfree(buf); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1738 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
| 1739 | return rc; |
| 1740 | } |
| 1741 | |
| 1742 | /** |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1743 | * afu_reset() - resets the AFU |
| 1744 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1745 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1746 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1747 | */ |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1748 | static int afu_reset(struct cxlflash_cfg *cfg) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1749 | { |
| 1750 | int rc = 0; |
| 1751 | /* Stop the context before the reset. Since the context is |
| 1752 | * no longer available restart it after the reset is complete |
| 1753 | */ |
| 1754 | |
| 1755 | term_afu(cfg); |
| 1756 | |
| 1757 | rc = init_afu(cfg); |
| 1758 | |
| 1759 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
| 1760 | return rc; |
| 1761 | } |
| 1762 | |
| 1763 | /** |
Manoj N. Kumar | f411396 | 2016-06-15 18:49:20 -0500 | [diff] [blame] | 1764 | * drain_ioctls() - wait until all currently executing ioctls have completed |
| 1765 | * @cfg: Internal structure associated with the host. |
| 1766 | * |
| 1767 | * Obtain write access to read/write semaphore that wraps ioctl |
| 1768 | * handling to 'drain' ioctls currently executing. |
| 1769 | */ |
| 1770 | static void drain_ioctls(struct cxlflash_cfg *cfg) |
| 1771 | { |
| 1772 | down_write(&cfg->ioctl_rwsem); |
| 1773 | up_write(&cfg->ioctl_rwsem); |
| 1774 | } |
| 1775 | |
| 1776 | /** |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1777 | * cxlflash_eh_device_reset_handler() - reset a single LUN |
| 1778 | * @scp: SCSI command to send. |
| 1779 | * |
| 1780 | * Return: |
| 1781 | * SUCCESS as defined in scsi/scsi.h |
| 1782 | * FAILED as defined in scsi/scsi.h |
| 1783 | */ |
| 1784 | static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) |
| 1785 | { |
| 1786 | int rc = SUCCESS; |
| 1787 | struct Scsi_Host *host = scp->device->host; |
| 1788 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; |
| 1789 | struct afu *afu = cfg->afu; |
| 1790 | int rcr = 0; |
| 1791 | |
| 1792 | pr_debug("%s: (scp=%p) %d/%d/%d/%llu " |
| 1793 | "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, |
| 1794 | host->host_no, scp->device->channel, |
| 1795 | scp->device->id, scp->device->lun, |
| 1796 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), |
| 1797 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), |
| 1798 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), |
| 1799 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); |
| 1800 | |
Matthew R. Ochs | ed486da | 2015-10-21 15:14:24 -0500 | [diff] [blame] | 1801 | retry: |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1802 | switch (cfg->state) { |
| 1803 | case STATE_NORMAL: |
| 1804 | rcr = send_tmf(afu, scp, TMF_LUN_RESET); |
| 1805 | if (unlikely(rcr)) |
| 1806 | rc = FAILED; |
| 1807 | break; |
| 1808 | case STATE_RESET: |
| 1809 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); |
Matthew R. Ochs | ed486da | 2015-10-21 15:14:24 -0500 | [diff] [blame] | 1810 | goto retry; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1811 | default: |
| 1812 | rc = FAILED; |
| 1813 | break; |
| 1814 | } |
| 1815 | |
| 1816 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
| 1817 | return rc; |
| 1818 | } |
| 1819 | |
| 1820 | /** |
| 1821 | * cxlflash_eh_host_reset_handler() - reset the host adapter |
| 1822 | * @scp: SCSI command from stack identifying host. |
| 1823 | * |
Matthew R. Ochs | 1d3324c | 2016-09-02 15:39:30 -0500 | [diff] [blame] | 1824 | * Following a reset, the state is evaluated again in case an EEH occurred |
| 1825 | * during the reset. In such a scenario, the host reset will either yield |
| 1826 | * until the EEH recovery is complete or return success or failure based |
| 1827 | * upon the current device state. |
| 1828 | * |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1829 | * Return: |
| 1830 | * SUCCESS as defined in scsi/scsi.h |
| 1831 | * FAILED as defined in scsi/scsi.h |
| 1832 | */ |
| 1833 | static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) |
| 1834 | { |
| 1835 | int rc = SUCCESS; |
| 1836 | int rcr = 0; |
| 1837 | struct Scsi_Host *host = scp->device->host; |
| 1838 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; |
| 1839 | |
| 1840 | pr_debug("%s: (scp=%p) %d/%d/%d/%llu " |
| 1841 | "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, |
| 1842 | host->host_no, scp->device->channel, |
| 1843 | scp->device->id, scp->device->lun, |
| 1844 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), |
| 1845 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), |
| 1846 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), |
| 1847 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); |
| 1848 | |
| 1849 | switch (cfg->state) { |
| 1850 | case STATE_NORMAL: |
| 1851 | cfg->state = STATE_RESET; |
Manoj N. Kumar | f411396 | 2016-06-15 18:49:20 -0500 | [diff] [blame] | 1852 | drain_ioctls(cfg); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1853 | cxlflash_mark_contexts_error(cfg); |
| 1854 | rcr = afu_reset(cfg); |
| 1855 | if (rcr) { |
| 1856 | rc = FAILED; |
| 1857 | cfg->state = STATE_FAILTERM; |
| 1858 | } else |
| 1859 | cfg->state = STATE_NORMAL; |
| 1860 | wake_up_all(&cfg->reset_waitq); |
Matthew R. Ochs | 1d3324c | 2016-09-02 15:39:30 -0500 | [diff] [blame] | 1861 | ssleep(1); |
| 1862 | /* fall through */ |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1863 | case STATE_RESET: |
| 1864 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); |
| 1865 | if (cfg->state == STATE_NORMAL) |
| 1866 | break; |
| 1867 | /* fall through */ |
| 1868 | default: |
| 1869 | rc = FAILED; |
| 1870 | break; |
| 1871 | } |
| 1872 | |
| 1873 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
| 1874 | return rc; |
| 1875 | } |
| 1876 | |
| 1877 | /** |
| 1878 | * cxlflash_change_queue_depth() - change the queue depth for the device |
| 1879 | * @sdev: SCSI device destined for queue depth change. |
| 1880 | * @qdepth: Requested queue depth value to set. |
| 1881 | * |
| 1882 | * The requested queue depth is capped to the maximum supported value. |
| 1883 | * |
| 1884 | * Return: The actual queue depth set. |
| 1885 | */ |
| 1886 | static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) |
| 1887 | { |
| 1888 | |
| 1889 | if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) |
| 1890 | qdepth = CXLFLASH_MAX_CMDS_PER_LUN; |
| 1891 | |
| 1892 | scsi_change_queue_depth(sdev, qdepth); |
| 1893 | return sdev->queue_depth; |
| 1894 | } |
| 1895 | |
| 1896 | /** |
| 1897 | * cxlflash_show_port_status() - queries and presents the current port status |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 1898 | * @port: Desired port for status reporting. |
| 1899 | * @afu: AFU owning the specified port. |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1900 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 1901 | * |
| 1902 | * Return: The size of the ASCII string returned in @buf. |
| 1903 | */ |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 1904 | static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1905 | { |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1906 | char *disp_status; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1907 | u64 status; |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 1908 | __be64 __iomem *fc_regs; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1909 | |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 1910 | if (port >= NUM_FC_PORTS) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1911 | return 0; |
| 1912 | |
| 1913 | fc_regs = &afu->afu_map->global.fc_regs[port][0]; |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 1914 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); |
| 1915 | status &= FC_MTIP_STATUS_MASK; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1916 | |
| 1917 | if (status == FC_MTIP_STATUS_ONLINE) |
| 1918 | disp_status = "online"; |
| 1919 | else if (status == FC_MTIP_STATUS_OFFLINE) |
| 1920 | disp_status = "offline"; |
| 1921 | else |
| 1922 | disp_status = "unknown"; |
| 1923 | |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 1924 | return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1925 | } |
| 1926 | |
| 1927 | /** |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 1928 | * port0_show() - queries and presents the current status of port 0 |
| 1929 | * @dev: Generic device associated with the host owning the port. |
| 1930 | * @attr: Device attribute representing the port. |
| 1931 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1932 | * |
| 1933 | * Return: The size of the ASCII string returned in @buf. |
| 1934 | */ |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 1935 | static ssize_t port0_show(struct device *dev, |
| 1936 | struct device_attribute *attr, |
| 1937 | char *buf) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1938 | { |
| 1939 | struct Scsi_Host *shost = class_to_shost(dev); |
| 1940 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; |
| 1941 | struct afu *afu = cfg->afu; |
| 1942 | |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 1943 | return cxlflash_show_port_status(0, afu, buf); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1944 | } |
| 1945 | |
| 1946 | /** |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 1947 | * port1_show() - queries and presents the current status of port 1 |
| 1948 | * @dev: Generic device associated with the host owning the port. |
| 1949 | * @attr: Device attribute representing the port. |
| 1950 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 1951 | * |
| 1952 | * Return: The size of the ASCII string returned in @buf. |
| 1953 | */ |
| 1954 | static ssize_t port1_show(struct device *dev, |
| 1955 | struct device_attribute *attr, |
| 1956 | char *buf) |
| 1957 | { |
| 1958 | struct Scsi_Host *shost = class_to_shost(dev); |
| 1959 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; |
| 1960 | struct afu *afu = cfg->afu; |
| 1961 | |
| 1962 | return cxlflash_show_port_status(1, afu, buf); |
| 1963 | } |
| 1964 | |
| 1965 | /** |
| 1966 | * lun_mode_show() - presents the current LUN mode of the host |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1967 | * @dev: Generic device associated with the host. |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 1968 | * @attr: Device attribute representing the LUN mode. |
| 1969 | * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. |
| 1970 | * |
| 1971 | * Return: The size of the ASCII string returned in @buf. |
| 1972 | */ |
| 1973 | static ssize_t lun_mode_show(struct device *dev, |
| 1974 | struct device_attribute *attr, char *buf) |
| 1975 | { |
| 1976 | struct Scsi_Host *shost = class_to_shost(dev); |
| 1977 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; |
| 1978 | struct afu *afu = cfg->afu; |
| 1979 | |
| 1980 | return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); |
| 1981 | } |
| 1982 | |
| 1983 | /** |
| 1984 | * lun_mode_store() - sets the LUN mode of the host |
| 1985 | * @dev: Generic device associated with the host. |
| 1986 | * @attr: Device attribute representing the LUN mode. |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1987 | * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. |
| 1988 | * @count: Length of data resizing in @buf. |
| 1989 | * |
| 1990 | * The CXL Flash AFU supports a dummy LUN mode where the external |
| 1991 | * links and storage are not required. Space on the FPGA is used |
| 1992 | * to create 1 or 2 small LUNs which are presented to the system |
| 1993 | * as if they were a normal storage device. This feature is useful |
| 1994 | * during development and also provides manufacturing with a way |
| 1995 | * to test the AFU without an actual device. |
| 1996 | * |
| 1997 | * 0 = external LUN[s] (default) |
| 1998 | * 1 = internal LUN (1 x 64K, 512B blocks, id 0) |
| 1999 | * 2 = internal LUN (1 x 64K, 4K blocks, id 0) |
| 2000 | * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) |
| 2001 | * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) |
| 2002 | * |
| 2003 | * Return: The size of the ASCII string returned in @buf. |
| 2004 | */ |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2005 | static ssize_t lun_mode_store(struct device *dev, |
| 2006 | struct device_attribute *attr, |
| 2007 | const char *buf, size_t count) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2008 | { |
| 2009 | struct Scsi_Host *shost = class_to_shost(dev); |
| 2010 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; |
| 2011 | struct afu *afu = cfg->afu; |
| 2012 | int rc; |
| 2013 | u32 lun_mode; |
| 2014 | |
| 2015 | rc = kstrtouint(buf, 10, &lun_mode); |
| 2016 | if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { |
| 2017 | afu->internal_lun = lun_mode; |
Manoj N. Kumar | 603ecce | 2016-03-04 15:55:19 -0600 | [diff] [blame] | 2018 | |
| 2019 | /* |
| 2020 | * When configured for internal LUN, there is only one channel, |
| 2021 | * channel number 0, else there will be 2 (default). |
| 2022 | */ |
| 2023 | if (afu->internal_lun) |
| 2024 | shost->max_channel = 0; |
| 2025 | else |
| 2026 | shost->max_channel = NUM_FC_PORTS - 1; |
| 2027 | |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2028 | afu_reset(cfg); |
| 2029 | scsi_scan_host(cfg->host); |
| 2030 | } |
| 2031 | |
| 2032 | return count; |
| 2033 | } |
| 2034 | |
| 2035 | /** |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2036 | * ioctl_version_show() - presents the current ioctl version of the host |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2037 | * @dev: Generic device associated with the host. |
| 2038 | * @attr: Device attribute representing the ioctl version. |
| 2039 | * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. |
| 2040 | * |
| 2041 | * Return: The size of the ASCII string returned in @buf. |
| 2042 | */ |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2043 | static ssize_t ioctl_version_show(struct device *dev, |
| 2044 | struct device_attribute *attr, char *buf) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2045 | { |
| 2046 | return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0); |
| 2047 | } |
| 2048 | |
| 2049 | /** |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2050 | * cxlflash_show_port_lun_table() - queries and presents the port LUN table |
| 2051 | * @port: Desired port for status reporting. |
| 2052 | * @afu: AFU owning the specified port. |
| 2053 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 2054 | * |
| 2055 | * Return: The size of the ASCII string returned in @buf. |
| 2056 | */ |
| 2057 | static ssize_t cxlflash_show_port_lun_table(u32 port, |
| 2058 | struct afu *afu, |
| 2059 | char *buf) |
| 2060 | { |
| 2061 | int i; |
| 2062 | ssize_t bytes = 0; |
| 2063 | __be64 __iomem *fc_port; |
| 2064 | |
| 2065 | if (port >= NUM_FC_PORTS) |
| 2066 | return 0; |
| 2067 | |
| 2068 | fc_port = &afu->afu_map->global.fc_port[port][0]; |
| 2069 | |
| 2070 | for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) |
| 2071 | bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, |
| 2072 | "%03d: %016llX\n", i, readq_be(&fc_port[i])); |
| 2073 | return bytes; |
| 2074 | } |
| 2075 | |
| 2076 | /** |
| 2077 | * port0_lun_table_show() - presents the current LUN table of port 0 |
| 2078 | * @dev: Generic device associated with the host owning the port. |
| 2079 | * @attr: Device attribute representing the port. |
| 2080 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 2081 | * |
| 2082 | * Return: The size of the ASCII string returned in @buf. |
| 2083 | */ |
| 2084 | static ssize_t port0_lun_table_show(struct device *dev, |
| 2085 | struct device_attribute *attr, |
| 2086 | char *buf) |
| 2087 | { |
| 2088 | struct Scsi_Host *shost = class_to_shost(dev); |
| 2089 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; |
| 2090 | struct afu *afu = cfg->afu; |
| 2091 | |
| 2092 | return cxlflash_show_port_lun_table(0, afu, buf); |
| 2093 | } |
| 2094 | |
| 2095 | /** |
| 2096 | * port1_lun_table_show() - presents the current LUN table of port 1 |
| 2097 | * @dev: Generic device associated with the host owning the port. |
| 2098 | * @attr: Device attribute representing the port. |
| 2099 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 2100 | * |
| 2101 | * Return: The size of the ASCII string returned in @buf. |
| 2102 | */ |
| 2103 | static ssize_t port1_lun_table_show(struct device *dev, |
| 2104 | struct device_attribute *attr, |
| 2105 | char *buf) |
| 2106 | { |
| 2107 | struct Scsi_Host *shost = class_to_shost(dev); |
| 2108 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; |
| 2109 | struct afu *afu = cfg->afu; |
| 2110 | |
| 2111 | return cxlflash_show_port_lun_table(1, afu, buf); |
| 2112 | } |
| 2113 | |
| 2114 | /** |
| 2115 | * mode_show() - presents the current mode of the device |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2116 | * @dev: Generic device associated with the device. |
| 2117 | * @attr: Device attribute representing the device mode. |
| 2118 | * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. |
| 2119 | * |
| 2120 | * Return: The size of the ASCII string returned in @buf. |
| 2121 | */ |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2122 | static ssize_t mode_show(struct device *dev, |
| 2123 | struct device_attribute *attr, char *buf) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2124 | { |
| 2125 | struct scsi_device *sdev = to_scsi_device(dev); |
| 2126 | |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2127 | return scnprintf(buf, PAGE_SIZE, "%s\n", |
| 2128 | sdev->hostdata ? "superpipe" : "legacy"); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2129 | } |
| 2130 | |
| 2131 | /* |
| 2132 | * Host attributes |
| 2133 | */ |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2134 | static DEVICE_ATTR_RO(port0); |
| 2135 | static DEVICE_ATTR_RO(port1); |
| 2136 | static DEVICE_ATTR_RW(lun_mode); |
| 2137 | static DEVICE_ATTR_RO(ioctl_version); |
| 2138 | static DEVICE_ATTR_RO(port0_lun_table); |
| 2139 | static DEVICE_ATTR_RO(port1_lun_table); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2140 | |
| 2141 | static struct device_attribute *cxlflash_host_attrs[] = { |
| 2142 | &dev_attr_port0, |
| 2143 | &dev_attr_port1, |
| 2144 | &dev_attr_lun_mode, |
| 2145 | &dev_attr_ioctl_version, |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2146 | &dev_attr_port0_lun_table, |
| 2147 | &dev_attr_port1_lun_table, |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2148 | NULL |
| 2149 | }; |
| 2150 | |
| 2151 | /* |
| 2152 | * Device attributes |
| 2153 | */ |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2154 | static DEVICE_ATTR_RO(mode); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2155 | |
| 2156 | static struct device_attribute *cxlflash_dev_attrs[] = { |
| 2157 | &dev_attr_mode, |
| 2158 | NULL |
| 2159 | }; |
| 2160 | |
| 2161 | /* |
| 2162 | * Host template |
| 2163 | */ |
| 2164 | static struct scsi_host_template driver_template = { |
| 2165 | .module = THIS_MODULE, |
| 2166 | .name = CXLFLASH_ADAPTER_NAME, |
| 2167 | .info = cxlflash_driver_info, |
| 2168 | .ioctl = cxlflash_ioctl, |
| 2169 | .proc_name = CXLFLASH_NAME, |
| 2170 | .queuecommand = cxlflash_queuecommand, |
| 2171 | .eh_device_reset_handler = cxlflash_eh_device_reset_handler, |
| 2172 | .eh_host_reset_handler = cxlflash_eh_host_reset_handler, |
| 2173 | .change_queue_depth = cxlflash_change_queue_depth, |
Manoj N. Kumar | 8343083 | 2016-03-04 15:55:20 -0600 | [diff] [blame] | 2174 | .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2175 | .can_queue = CXLFLASH_MAX_CMDS, |
Matthew R. Ochs | 5fbb96c | 2016-11-28 18:42:19 -0600 | [diff] [blame] | 2176 | .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1, |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2177 | .this_id = -1, |
Uma Krishnan | 68ab2d7 | 2016-11-28 18:41:06 -0600 | [diff] [blame] | 2178 | .sg_tablesize = 1, /* No scatter gather support */ |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2179 | .max_sectors = CXLFLASH_MAX_SECTORS, |
| 2180 | .use_clustering = ENABLE_CLUSTERING, |
| 2181 | .shost_attrs = cxlflash_host_attrs, |
| 2182 | .sdev_attrs = cxlflash_dev_attrs, |
| 2183 | }; |
| 2184 | |
| 2185 | /* |
| 2186 | * Device dependent values |
| 2187 | */ |
Uma Krishnan | 96e1b66 | 2016-06-15 18:49:38 -0500 | [diff] [blame] | 2188 | static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, |
| 2189 | 0ULL }; |
| 2190 | static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 2191 | CXLFLASH_NOTIFY_SHUTDOWN }; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2192 | |
| 2193 | /* |
| 2194 | * PCI device binding table |
| 2195 | */ |
| 2196 | static struct pci_device_id cxlflash_pci_table[] = { |
| 2197 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, |
| 2198 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, |
Manoj Kumar | a2746fb | 2015-12-14 15:07:43 -0600 | [diff] [blame] | 2199 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, |
| 2200 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2201 | {} |
| 2202 | }; |
| 2203 | |
| 2204 | MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); |
| 2205 | |
| 2206 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2207 | * cxlflash_worker_thread() - work thread handler for the AFU |
| 2208 | * @work: Work structure contained within cxlflash associated with host. |
| 2209 | * |
| 2210 | * Handles the following events: |
| 2211 | * - Link reset which cannot be performed on interrupt context due to |
| 2212 | * blocking up to a few seconds |
Matthew R. Ochs | ef51074 | 2015-10-21 15:13:37 -0500 | [diff] [blame] | 2213 | * - Rescan the host |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2214 | */ |
| 2215 | static void cxlflash_worker_thread(struct work_struct *work) |
| 2216 | { |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2217 | struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, |
| 2218 | work_q); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2219 | struct afu *afu = cfg->afu; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 2220 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2221 | int port; |
| 2222 | ulong lock_flags; |
| 2223 | |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2224 | /* Avoid MMIO if the device has failed */ |
| 2225 | |
| 2226 | if (cfg->state != STATE_NORMAL) |
| 2227 | return; |
| 2228 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2229 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); |
| 2230 | |
| 2231 | if (cfg->lr_state == LINK_RESET_REQUIRED) { |
| 2232 | port = cfg->lr_port; |
| 2233 | if (port < 0) |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 2234 | dev_err(dev, "%s: invalid port index %d\n", |
| 2235 | __func__, port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2236 | else { |
| 2237 | spin_unlock_irqrestore(cfg->host->host_lock, |
| 2238 | lock_flags); |
| 2239 | |
| 2240 | /* The reset can block... */ |
| 2241 | afu_link_reset(afu, port, |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 2242 | &afu->afu_map->global.fc_regs[port][0]); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2243 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); |
| 2244 | } |
| 2245 | |
| 2246 | cfg->lr_state = LINK_RESET_COMPLETE; |
| 2247 | } |
| 2248 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2249 | spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); |
Matthew R. Ochs | ef51074 | 2015-10-21 15:13:37 -0500 | [diff] [blame] | 2250 | |
| 2251 | if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) |
| 2252 | scsi_scan_host(cfg->host); |
Manoj Kumar | b45cdbaf | 2015-12-14 15:07:23 -0600 | [diff] [blame] | 2253 | kref_put(&afu->mapcount, afu_unmap); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2254 | } |
| 2255 | |
| 2256 | /** |
| 2257 | * cxlflash_probe() - PCI entry point to add host |
| 2258 | * @pdev: PCI device associated with the host. |
| 2259 | * @dev_id: PCI device id associated with device. |
| 2260 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 2261 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2262 | */ |
| 2263 | static int cxlflash_probe(struct pci_dev *pdev, |
| 2264 | const struct pci_device_id *dev_id) |
| 2265 | { |
| 2266 | struct Scsi_Host *host; |
| 2267 | struct cxlflash_cfg *cfg = NULL; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2268 | struct dev_dependent_vals *ddv; |
| 2269 | int rc = 0; |
| 2270 | |
| 2271 | dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", |
| 2272 | __func__, pdev->irq); |
| 2273 | |
| 2274 | ddv = (struct dev_dependent_vals *)dev_id->driver_data; |
| 2275 | driver_template.max_sectors = ddv->max_sectors; |
| 2276 | |
| 2277 | host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); |
| 2278 | if (!host) { |
| 2279 | dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n", |
| 2280 | __func__); |
| 2281 | rc = -ENOMEM; |
| 2282 | goto out; |
| 2283 | } |
| 2284 | |
| 2285 | host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; |
| 2286 | host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; |
| 2287 | host->max_channel = NUM_FC_PORTS - 1; |
| 2288 | host->unique_id = host->host_no; |
| 2289 | host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; |
| 2290 | |
| 2291 | cfg = (struct cxlflash_cfg *)host->hostdata; |
| 2292 | cfg->host = host; |
| 2293 | rc = alloc_mem(cfg); |
| 2294 | if (rc) { |
Matthew R. Ochs | fa3f2c6 | 2015-10-21 15:15:45 -0500 | [diff] [blame] | 2295 | dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n", |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2296 | __func__); |
| 2297 | rc = -ENOMEM; |
Matthew R. Ochs | 8b5b1e8 | 2015-10-21 15:14:09 -0500 | [diff] [blame] | 2298 | scsi_host_put(cfg->host); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2299 | goto out; |
| 2300 | } |
| 2301 | |
| 2302 | cfg->init_state = INIT_STATE_NONE; |
| 2303 | cfg->dev = pdev; |
Matthew R. Ochs | 17ead26 | 2015-10-21 15:15:37 -0500 | [diff] [blame] | 2304 | cfg->cxl_fops = cxlflash_cxl_fops; |
Matthew R. Ochs | 2cb7926 | 2015-08-13 21:47:53 -0500 | [diff] [blame] | 2305 | |
| 2306 | /* |
| 2307 | * The promoted LUNs move to the top of the LUN table. The rest stay |
| 2308 | * on the bottom half. The bottom half grows from the end |
| 2309 | * (index = 255), whereas the top half grows from the beginning |
| 2310 | * (index = 0). |
| 2311 | */ |
| 2312 | cfg->promote_lun_index = 0; |
| 2313 | cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1; |
| 2314 | cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1; |
| 2315 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2316 | cfg->dev_id = (struct pci_device_id *)dev_id; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2317 | |
| 2318 | init_waitqueue_head(&cfg->tmf_waitq); |
Matthew R. Ochs | 439e85c | 2015-10-21 15:12:00 -0500 | [diff] [blame] | 2319 | init_waitqueue_head(&cfg->reset_waitq); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2320 | |
| 2321 | INIT_WORK(&cfg->work_q, cxlflash_worker_thread); |
| 2322 | cfg->lr_state = LINK_RESET_INVALID; |
| 2323 | cfg->lr_port = -1; |
Matthew R. Ochs | 0d73122 | 2015-10-21 15:16:24 -0500 | [diff] [blame] | 2324 | spin_lock_init(&cfg->tmf_slock); |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 2325 | mutex_init(&cfg->ctx_tbl_list_mutex); |
| 2326 | mutex_init(&cfg->ctx_recovery_mutex); |
Matthew R. Ochs | 0a27ae5 | 2015-10-21 15:11:52 -0500 | [diff] [blame] | 2327 | init_rwsem(&cfg->ioctl_rwsem); |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 2328 | INIT_LIST_HEAD(&cfg->ctx_err_recovery); |
| 2329 | INIT_LIST_HEAD(&cfg->lluns); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2330 | |
| 2331 | pci_set_drvdata(pdev, cfg); |
| 2332 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2333 | cfg->cxl_afu = cxl_pci_to_afu(pdev); |
| 2334 | |
| 2335 | rc = init_pci(cfg); |
| 2336 | if (rc) { |
| 2337 | dev_err(&pdev->dev, "%s: call to init_pci " |
| 2338 | "failed rc=%d!\n", __func__, rc); |
| 2339 | goto out_remove; |
| 2340 | } |
| 2341 | cfg->init_state = INIT_STATE_PCI; |
| 2342 | |
| 2343 | rc = init_afu(cfg); |
| 2344 | if (rc) { |
| 2345 | dev_err(&pdev->dev, "%s: call to init_afu " |
| 2346 | "failed rc=%d!\n", __func__, rc); |
| 2347 | goto out_remove; |
| 2348 | } |
| 2349 | cfg->init_state = INIT_STATE_AFU; |
| 2350 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2351 | rc = init_scsi(cfg); |
| 2352 | if (rc) { |
| 2353 | dev_err(&pdev->dev, "%s: call to init_scsi " |
| 2354 | "failed rc=%d!\n", __func__, rc); |
| 2355 | goto out_remove; |
| 2356 | } |
| 2357 | cfg->init_state = INIT_STATE_SCSI; |
| 2358 | |
| 2359 | out: |
| 2360 | pr_debug("%s: returning rc=%d\n", __func__, rc); |
| 2361 | return rc; |
| 2362 | |
| 2363 | out_remove: |
| 2364 | cxlflash_remove(pdev); |
| 2365 | goto out; |
| 2366 | } |
| 2367 | |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2368 | /** |
| 2369 | * cxlflash_pci_error_detected() - called when a PCI error is detected |
| 2370 | * @pdev: PCI device struct. |
| 2371 | * @state: PCI channel state. |
| 2372 | * |
Matthew R. Ochs | 1d3324c | 2016-09-02 15:39:30 -0500 | [diff] [blame] | 2373 | * When an EEH occurs during an active reset, wait until the reset is |
| 2374 | * complete and then take action based upon the device state. |
| 2375 | * |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2376 | * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT |
| 2377 | */ |
| 2378 | static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, |
| 2379 | pci_channel_state_t state) |
| 2380 | { |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 2381 | int rc = 0; |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2382 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
| 2383 | struct device *dev = &cfg->dev->dev; |
| 2384 | |
| 2385 | dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); |
| 2386 | |
| 2387 | switch (state) { |
| 2388 | case pci_channel_io_frozen: |
Matthew R. Ochs | 1d3324c | 2016-09-02 15:39:30 -0500 | [diff] [blame] | 2389 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); |
| 2390 | if (cfg->state == STATE_FAILTERM) |
| 2391 | return PCI_ERS_RESULT_DISCONNECT; |
| 2392 | |
Matthew R. Ochs | 439e85c | 2015-10-21 15:12:00 -0500 | [diff] [blame] | 2393 | cfg->state = STATE_RESET; |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2394 | scsi_block_requests(cfg->host); |
Matthew R. Ochs | 0a27ae5 | 2015-10-21 15:11:52 -0500 | [diff] [blame] | 2395 | drain_ioctls(cfg); |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 2396 | rc = cxlflash_mark_contexts_error(cfg); |
| 2397 | if (unlikely(rc)) |
| 2398 | dev_err(dev, "%s: Failed to mark user contexts!(%d)\n", |
| 2399 | __func__, rc); |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 2400 | term_afu(cfg); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2401 | return PCI_ERS_RESULT_NEED_RESET; |
| 2402 | case pci_channel_io_perm_failure: |
| 2403 | cfg->state = STATE_FAILTERM; |
Matthew R. Ochs | 439e85c | 2015-10-21 15:12:00 -0500 | [diff] [blame] | 2404 | wake_up_all(&cfg->reset_waitq); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2405 | scsi_unblock_requests(cfg->host); |
| 2406 | return PCI_ERS_RESULT_DISCONNECT; |
| 2407 | default: |
| 2408 | break; |
| 2409 | } |
| 2410 | return PCI_ERS_RESULT_NEED_RESET; |
| 2411 | } |
| 2412 | |
| 2413 | /** |
| 2414 | * cxlflash_pci_slot_reset() - called when PCI slot has been reset |
| 2415 | * @pdev: PCI device struct. |
| 2416 | * |
| 2417 | * This routine is called by the pci error recovery code after the PCI |
| 2418 | * slot has been reset, just before we should resume normal operations. |
| 2419 | * |
| 2420 | * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT |
| 2421 | */ |
| 2422 | static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) |
| 2423 | { |
| 2424 | int rc = 0; |
| 2425 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
| 2426 | struct device *dev = &cfg->dev->dev; |
| 2427 | |
| 2428 | dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); |
| 2429 | |
| 2430 | rc = init_afu(cfg); |
| 2431 | if (unlikely(rc)) { |
| 2432 | dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc); |
| 2433 | return PCI_ERS_RESULT_DISCONNECT; |
| 2434 | } |
| 2435 | |
| 2436 | return PCI_ERS_RESULT_RECOVERED; |
| 2437 | } |
| 2438 | |
| 2439 | /** |
| 2440 | * cxlflash_pci_resume() - called when normal operation can resume |
| 2441 | * @pdev: PCI device struct |
| 2442 | */ |
| 2443 | static void cxlflash_pci_resume(struct pci_dev *pdev) |
| 2444 | { |
| 2445 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
| 2446 | struct device *dev = &cfg->dev->dev; |
| 2447 | |
| 2448 | dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); |
| 2449 | |
| 2450 | cfg->state = STATE_NORMAL; |
Matthew R. Ochs | 439e85c | 2015-10-21 15:12:00 -0500 | [diff] [blame] | 2451 | wake_up_all(&cfg->reset_waitq); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2452 | scsi_unblock_requests(cfg->host); |
| 2453 | } |
| 2454 | |
| 2455 | static const struct pci_error_handlers cxlflash_err_handler = { |
| 2456 | .error_detected = cxlflash_pci_error_detected, |
| 2457 | .slot_reset = cxlflash_pci_slot_reset, |
| 2458 | .resume = cxlflash_pci_resume, |
| 2459 | }; |
| 2460 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2461 | /* |
| 2462 | * PCI device structure |
| 2463 | */ |
| 2464 | static struct pci_driver cxlflash_driver = { |
| 2465 | .name = CXLFLASH_NAME, |
| 2466 | .id_table = cxlflash_pci_table, |
| 2467 | .probe = cxlflash_probe, |
| 2468 | .remove = cxlflash_remove, |
Uma Krishnan | babf985 | 2016-09-02 15:39:16 -0500 | [diff] [blame] | 2469 | .shutdown = cxlflash_remove, |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2470 | .err_handler = &cxlflash_err_handler, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2471 | }; |
| 2472 | |
| 2473 | /** |
| 2474 | * init_cxlflash() - module entry point |
| 2475 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 2476 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2477 | */ |
| 2478 | static int __init init_cxlflash(void) |
| 2479 | { |
Uma Krishnan | 8559921 | 2015-12-14 15:06:33 -0600 | [diff] [blame] | 2480 | pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2481 | |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 2482 | cxlflash_list_init(); |
| 2483 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2484 | return pci_register_driver(&cxlflash_driver); |
| 2485 | } |
| 2486 | |
| 2487 | /** |
| 2488 | * exit_cxlflash() - module exit point |
| 2489 | */ |
| 2490 | static void __exit exit_cxlflash(void) |
| 2491 | { |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 2492 | cxlflash_term_global_luns(); |
| 2493 | cxlflash_free_errpage(); |
| 2494 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2495 | pci_unregister_driver(&cxlflash_driver); |
| 2496 | } |
| 2497 | |
| 2498 | module_init(init_cxlflash); |
| 2499 | module_exit(exit_cxlflash); |