Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1 | /* |
| 2 | * CXL Flash Device Driver |
| 3 | * |
| 4 | * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation |
| 5 | * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation |
| 6 | * |
| 7 | * Copyright (C) 2015 IBM Corporation |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or |
| 10 | * modify it under the terms of the GNU General Public License |
| 11 | * as published by the Free Software Foundation; either version |
| 12 | * 2 of the License, or (at your option) any later version. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/delay.h> |
| 16 | #include <linux/list.h> |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/pci.h> |
| 19 | |
| 20 | #include <asm/unaligned.h> |
| 21 | |
| 22 | #include <misc/cxl.h> |
| 23 | |
| 24 | #include <scsi/scsi_cmnd.h> |
| 25 | #include <scsi/scsi_host.h> |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 26 | #include <uapi/scsi/cxlflash_ioctl.h> |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 27 | |
| 28 | #include "main.h" |
| 29 | #include "sislite.h" |
| 30 | #include "common.h" |
| 31 | |
| 32 | MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); |
| 33 | MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); |
| 34 | MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); |
| 35 | MODULE_LICENSE("GPL"); |
| 36 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 37 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 38 | * process_cmd_err() - command error handler |
| 39 | * @cmd: AFU command that experienced the error. |
| 40 | * @scp: SCSI command associated with the AFU command in error. |
| 41 | * |
| 42 | * Translates error bits from AFU command to SCSI command results. |
| 43 | */ |
| 44 | static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) |
| 45 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 46 | struct afu *afu = cmd->parent; |
| 47 | struct cxlflash_cfg *cfg = afu->parent; |
| 48 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 49 | struct sisl_ioarcb *ioarcb; |
| 50 | struct sisl_ioasa *ioasa; |
Matthew R. Ochs | 8396012 | 2015-10-21 15:13:29 -0500 | [diff] [blame] | 51 | u32 resid; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 52 | |
| 53 | if (unlikely(!cmd)) |
| 54 | return; |
| 55 | |
| 56 | ioarcb = &(cmd->rcb); |
| 57 | ioasa = &(cmd->sa); |
| 58 | |
| 59 | if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { |
Matthew R. Ochs | 8396012 | 2015-10-21 15:13:29 -0500 | [diff] [blame] | 60 | resid = ioasa->resid; |
| 61 | scsi_set_resid(scp, resid); |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 62 | dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n", |
| 63 | __func__, cmd, scp, resid); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 67 | dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n", |
| 68 | __func__, cmd, scp); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 69 | scp->result = (DID_ERROR << 16); |
| 70 | } |
| 71 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 72 | dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x " |
| 73 | "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__, |
| 74 | ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc, |
| 75 | ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 76 | |
| 77 | if (ioasa->rc.scsi_rc) { |
| 78 | /* We have a SCSI status */ |
| 79 | if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { |
| 80 | memcpy(scp->sense_buffer, ioasa->sense_data, |
| 81 | SISL_SENSE_DATA_LEN); |
| 82 | scp->result = ioasa->rc.scsi_rc; |
| 83 | } else |
| 84 | scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); |
| 85 | } |
| 86 | |
| 87 | /* |
| 88 | * We encountered an error. Set scp->result based on nature |
| 89 | * of error. |
| 90 | */ |
| 91 | if (ioasa->rc.fc_rc) { |
| 92 | /* We have an FC status */ |
| 93 | switch (ioasa->rc.fc_rc) { |
| 94 | case SISL_FC_RC_LINKDOWN: |
| 95 | scp->result = (DID_REQUEUE << 16); |
| 96 | break; |
| 97 | case SISL_FC_RC_RESID: |
| 98 | /* This indicates an FCP resid underrun */ |
| 99 | if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { |
| 100 | /* If the SISL_RC_FLAGS_OVERRUN flag was set, |
| 101 | * then we will handle this error else where. |
| 102 | * If not then we must handle it here. |
Matthew R. Ochs | 8396012 | 2015-10-21 15:13:29 -0500 | [diff] [blame] | 103 | * This is probably an AFU bug. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 104 | */ |
| 105 | scp->result = (DID_ERROR << 16); |
| 106 | } |
| 107 | break; |
| 108 | case SISL_FC_RC_RESIDERR: |
| 109 | /* Resid mismatch between adapter and device */ |
| 110 | case SISL_FC_RC_TGTABORT: |
| 111 | case SISL_FC_RC_ABORTOK: |
| 112 | case SISL_FC_RC_ABORTFAIL: |
| 113 | case SISL_FC_RC_NOLOGI: |
| 114 | case SISL_FC_RC_ABORTPEND: |
| 115 | case SISL_FC_RC_WRABORTPEND: |
| 116 | case SISL_FC_RC_NOEXP: |
| 117 | case SISL_FC_RC_INUSE: |
| 118 | scp->result = (DID_ERROR << 16); |
| 119 | break; |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | if (ioasa->rc.afu_rc) { |
| 124 | /* We have an AFU error */ |
| 125 | switch (ioasa->rc.afu_rc) { |
| 126 | case SISL_AFU_RC_NO_CHANNELS: |
Matthew R. Ochs | 8396012 | 2015-10-21 15:13:29 -0500 | [diff] [blame] | 127 | scp->result = (DID_NO_CONNECT << 16); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 128 | break; |
| 129 | case SISL_AFU_RC_DATA_DMA_ERR: |
| 130 | switch (ioasa->afu_extra) { |
| 131 | case SISL_AFU_DMA_ERR_PAGE_IN: |
| 132 | /* Retry */ |
| 133 | scp->result = (DID_IMM_RETRY << 16); |
| 134 | break; |
| 135 | case SISL_AFU_DMA_ERR_INVALID_EA: |
| 136 | default: |
| 137 | scp->result = (DID_ERROR << 16); |
| 138 | } |
| 139 | break; |
| 140 | case SISL_AFU_RC_OUT_OF_DATA_BUFS: |
| 141 | /* Retry */ |
| 142 | scp->result = (DID_ALLOC_FAILURE << 16); |
| 143 | break; |
| 144 | default: |
| 145 | scp->result = (DID_ERROR << 16); |
| 146 | } |
| 147 | } |
| 148 | } |
| 149 | |
| 150 | /** |
| 151 | * cmd_complete() - command completion handler |
| 152 | * @cmd: AFU command that has completed. |
| 153 | * |
| 154 | * Prepares and submits command that has either completed or timed out to |
| 155 | * the SCSI stack. Checks AFU command back into command pool for non-internal |
Matthew R. Ochs | fe7f969 | 2016-11-28 18:43:18 -0600 | [diff] [blame] | 156 | * (cmd->scp populated) commands. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 157 | */ |
| 158 | static void cmd_complete(struct afu_cmd *cmd) |
| 159 | { |
| 160 | struct scsi_cmnd *scp; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 161 | ulong lock_flags; |
| 162 | struct afu *afu = cmd->parent; |
| 163 | struct cxlflash_cfg *cfg = afu->parent; |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 164 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 165 | bool cmd_is_tmf; |
| 166 | |
Matthew R. Ochs | fe7f969 | 2016-11-28 18:43:18 -0600 | [diff] [blame] | 167 | if (cmd->scp) { |
| 168 | scp = cmd->scp; |
Matthew R. Ochs | 8396012 | 2015-10-21 15:13:29 -0500 | [diff] [blame] | 169 | if (unlikely(cmd->sa.ioasc)) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 170 | process_cmd_err(cmd, scp); |
| 171 | else |
| 172 | scp->result = (DID_OK << 16); |
| 173 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 174 | cmd_is_tmf = cmd->cmd_tmf; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 175 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 176 | dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n", |
| 177 | __func__, scp, scp->result, cmd->sa.ioasc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 178 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 179 | scp->scsi_done(scp); |
| 180 | |
| 181 | if (cmd_is_tmf) { |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 182 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 183 | cfg->tmf_active = false; |
| 184 | wake_up_all_locked(&cfg->tmf_waitq); |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 185 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 186 | } |
| 187 | } else |
| 188 | complete(&cmd->cevent); |
| 189 | } |
| 190 | |
| 191 | /** |
Matthew R. Ochs | 9c7d1ee | 2017-01-11 19:19:08 -0600 | [diff] [blame] | 192 | * context_reset() - reset command owner context via specified register |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 193 | * @cmd: AFU command that timed out. |
Matthew R. Ochs | 9c7d1ee | 2017-01-11 19:19:08 -0600 | [diff] [blame] | 194 | * @reset_reg: MMIO register to perform reset. |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 195 | */ |
Matthew R. Ochs | 9c7d1ee | 2017-01-11 19:19:08 -0600 | [diff] [blame] | 196 | static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 197 | { |
| 198 | int nretry = 0; |
| 199 | u64 rrin = 0x1; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 200 | struct afu *afu = cmd->parent; |
Uma Krishnan | 3d2f617 | 2016-11-28 18:41:36 -0600 | [diff] [blame] | 201 | struct cxlflash_cfg *cfg = afu->parent; |
| 202 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 203 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 204 | dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 205 | |
Matthew R. Ochs | 9c7d1ee | 2017-01-11 19:19:08 -0600 | [diff] [blame] | 206 | writeq_be(rrin, reset_reg); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 207 | do { |
Matthew R. Ochs | 9c7d1ee | 2017-01-11 19:19:08 -0600 | [diff] [blame] | 208 | rrin = readq_be(reset_reg); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 209 | if (rrin != 0x1) |
| 210 | break; |
| 211 | /* Double delay each time */ |
Manoj N. Kumar | ea76543 | 2016-03-25 14:26:49 -0500 | [diff] [blame] | 212 | udelay(1 << nretry); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 213 | } while (nretry++ < MC_ROOM_RETRY_CNT); |
Uma Krishnan | 3d2f617 | 2016-11-28 18:41:36 -0600 | [diff] [blame] | 214 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 215 | dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n", |
Uma Krishnan | 3d2f617 | 2016-11-28 18:41:36 -0600 | [diff] [blame] | 216 | __func__, rrin, nretry); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 217 | } |
| 218 | |
| 219 | /** |
Matthew R. Ochs | 9c7d1ee | 2017-01-11 19:19:08 -0600 | [diff] [blame] | 220 | * context_reset_ioarrin() - reset command owner context via IOARRIN register |
| 221 | * @cmd: AFU command that timed out. |
| 222 | */ |
| 223 | static void context_reset_ioarrin(struct afu_cmd *cmd) |
| 224 | { |
| 225 | struct afu *afu = cmd->parent; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 226 | struct hwq *hwq = get_hwq(afu, cmd->hwq_index); |
Matthew R. Ochs | 9c7d1ee | 2017-01-11 19:19:08 -0600 | [diff] [blame] | 227 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 228 | context_reset(cmd, &hwq->host_map->ioarrin); |
Matthew R. Ochs | 9c7d1ee | 2017-01-11 19:19:08 -0600 | [diff] [blame] | 229 | } |
| 230 | |
| 231 | /** |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 232 | * context_reset_sq() - reset command owner context w/ SQ Context Reset register |
| 233 | * @cmd: AFU command that timed out. |
| 234 | */ |
| 235 | static void context_reset_sq(struct afu_cmd *cmd) |
| 236 | { |
| 237 | struct afu *afu = cmd->parent; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 238 | struct hwq *hwq = get_hwq(afu, cmd->hwq_index); |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 239 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 240 | context_reset(cmd, &hwq->host_map->sq_ctx_reset); |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 241 | } |
| 242 | |
| 243 | /** |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 244 | * send_cmd_ioarrin() - sends an AFU command via IOARRIN register |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 245 | * @afu: AFU associated with the host. |
| 246 | * @cmd: AFU command to send. |
| 247 | * |
| 248 | * Return: |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 249 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 250 | */ |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 251 | static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 252 | { |
| 253 | struct cxlflash_cfg *cfg = afu->parent; |
| 254 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 255 | struct hwq *hwq = get_hwq(afu, cmd->hwq_index); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 256 | int rc = 0; |
Uma Krishnan | 11f7b18 | 2016-11-28 18:41:45 -0600 | [diff] [blame] | 257 | s64 room; |
| 258 | ulong lock_flags; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 259 | |
| 260 | /* |
Uma Krishnan | 11f7b18 | 2016-11-28 18:41:45 -0600 | [diff] [blame] | 261 | * To avoid the performance penalty of MMIO, spread the update of |
| 262 | * 'room' over multiple commands. |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 263 | */ |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 264 | spin_lock_irqsave(&hwq->rrin_slock, lock_flags); |
| 265 | if (--hwq->room < 0) { |
| 266 | room = readq_be(&hwq->host_map->cmd_room); |
Uma Krishnan | 11f7b18 | 2016-11-28 18:41:45 -0600 | [diff] [blame] | 267 | if (room <= 0) { |
| 268 | dev_dbg_ratelimited(dev, "%s: no cmd_room to send " |
| 269 | "0x%02X, room=0x%016llX\n", |
| 270 | __func__, cmd->rcb.cdb[0], room); |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 271 | hwq->room = 0; |
Uma Krishnan | 11f7b18 | 2016-11-28 18:41:45 -0600 | [diff] [blame] | 272 | rc = SCSI_MLQUEUE_HOST_BUSY; |
| 273 | goto out; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 274 | } |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 275 | hwq->room = room - 1; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 276 | } |
| 277 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 278 | writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 279 | out: |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 280 | spin_unlock_irqrestore(&hwq->rrin_slock, lock_flags); |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 281 | dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__, |
| 282 | cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 283 | return rc; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | /** |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 287 | * send_cmd_sq() - sends an AFU command via SQ ring |
| 288 | * @afu: AFU associated with the host. |
| 289 | * @cmd: AFU command to send. |
| 290 | * |
| 291 | * Return: |
| 292 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
| 293 | */ |
| 294 | static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) |
| 295 | { |
| 296 | struct cxlflash_cfg *cfg = afu->parent; |
| 297 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 298 | struct hwq *hwq = get_hwq(afu, cmd->hwq_index); |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 299 | int rc = 0; |
| 300 | int newval; |
| 301 | ulong lock_flags; |
| 302 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 303 | newval = atomic_dec_if_positive(&hwq->hsq_credits); |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 304 | if (newval <= 0) { |
| 305 | rc = SCSI_MLQUEUE_HOST_BUSY; |
| 306 | goto out; |
| 307 | } |
| 308 | |
| 309 | cmd->rcb.ioasa = &cmd->sa; |
| 310 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 311 | spin_lock_irqsave(&hwq->hsq_slock, lock_flags); |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 312 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 313 | *hwq->hsq_curr = cmd->rcb; |
| 314 | if (hwq->hsq_curr < hwq->hsq_end) |
| 315 | hwq->hsq_curr++; |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 316 | else |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 317 | hwq->hsq_curr = hwq->hsq_start; |
| 318 | writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail); |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 319 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 320 | spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 321 | out: |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 322 | dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p " |
| 323 | "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len, |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 324 | cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr, |
| 325 | readq_be(&hwq->host_map->sq_head), |
| 326 | readq_be(&hwq->host_map->sq_tail)); |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 327 | return rc; |
| 328 | } |
| 329 | |
| 330 | /** |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 331 | * wait_resp() - polls for a response or timeout to a sent AFU command |
| 332 | * @afu: AFU associated with the host. |
| 333 | * @cmd: AFU command that was sent. |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 334 | * |
| 335 | * Return: |
| 336 | * 0 on success, -1 on timeout/error |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 337 | */ |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 338 | static int wait_resp(struct afu *afu, struct afu_cmd *cmd) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 339 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 340 | struct cxlflash_cfg *cfg = afu->parent; |
| 341 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 342 | int rc = 0; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 343 | ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); |
| 344 | |
| 345 | timeout = wait_for_completion_timeout(&cmd->cevent, timeout); |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 346 | if (!timeout) { |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 347 | afu->context_reset(cmd); |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 348 | rc = -1; |
| 349 | } |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 350 | |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 351 | if (unlikely(cmd->sa.ioasc != 0)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 352 | dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n", |
| 353 | __func__, cmd->rcb.cdb[0], cmd->sa.ioasc); |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 354 | rc = -1; |
| 355 | } |
| 356 | |
| 357 | return rc; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 358 | } |
| 359 | |
| 360 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 361 | * send_tmf() - sends a Task Management Function (TMF) |
| 362 | * @afu: AFU to checkout from. |
| 363 | * @scp: SCSI command from stack. |
| 364 | * @tmfcmd: TMF command to send. |
| 365 | * |
| 366 | * Return: |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 367 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 368 | */ |
| 369 | static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) |
| 370 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 371 | struct cxlflash_cfg *cfg = shost_priv(scp->device->host); |
Matthew R. Ochs | d4ace35 | 2016-11-28 18:42:50 -0600 | [diff] [blame] | 372 | struct afu_cmd *cmd = sc_to_afucz(scp); |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 373 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 374 | struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 375 | ulong lock_flags; |
| 376 | int rc = 0; |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 377 | ulong to; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 378 | |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 379 | /* When Task Management Function is active do not send another */ |
| 380 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 381 | if (cfg->tmf_active) |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 382 | wait_event_interruptible_lock_irq(cfg->tmf_waitq, |
| 383 | !cfg->tmf_active, |
| 384 | cfg->tmf_slock); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 385 | cfg->tmf_active = true; |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 386 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 387 | |
Matthew R. Ochs | fe7f969 | 2016-11-28 18:43:18 -0600 | [diff] [blame] | 388 | cmd->scp = scp; |
Matthew R. Ochs | d4ace35 | 2016-11-28 18:42:50 -0600 | [diff] [blame] | 389 | cmd->parent = afu; |
| 390 | cmd->cmd_tmf = true; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 391 | cmd->hwq_index = hwq->index; |
Matthew R. Ochs | d4ace35 | 2016-11-28 18:42:50 -0600 | [diff] [blame] | 392 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 393 | cmd->rcb.ctx_id = hwq->ctx_hndl; |
Matthew R. Ochs | 5fbb96c | 2016-11-28 18:42:19 -0600 | [diff] [blame] | 394 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; |
Matthew R. Ochs | 8fa4f17 | 2017-04-12 14:14:05 -0500 | [diff] [blame] | 395 | cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 396 | cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 397 | cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | |
Matthew R. Ochs | d4ace35 | 2016-11-28 18:42:50 -0600 | [diff] [blame] | 398 | SISL_REQ_FLAGS_SUP_UNDERRUN | |
| 399 | SISL_REQ_FLAGS_TMF_CMD); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 400 | memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); |
| 401 | |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 402 | rc = afu->send_cmd(afu, cmd); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 403 | if (unlikely(rc)) { |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 404 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 405 | cfg->tmf_active = false; |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 406 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 407 | goto out; |
| 408 | } |
| 409 | |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 410 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
| 411 | to = msecs_to_jiffies(5000); |
| 412 | to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, |
| 413 | !cfg->tmf_active, |
| 414 | cfg->tmf_slock, |
| 415 | to); |
| 416 | if (!to) { |
| 417 | cfg->tmf_active = false; |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 418 | dev_err(dev, "%s: TMF timed out\n", __func__); |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 419 | rc = -1; |
| 420 | } |
| 421 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 422 | out: |
| 423 | return rc; |
| 424 | } |
| 425 | |
| 426 | /** |
| 427 | * cxlflash_driver_info() - information handler for this host driver |
| 428 | * @host: SCSI host associated with device. |
| 429 | * |
| 430 | * Return: A string describing the device. |
| 431 | */ |
| 432 | static const char *cxlflash_driver_info(struct Scsi_Host *host) |
| 433 | { |
| 434 | return CXLFLASH_ADAPTER_NAME; |
| 435 | } |
| 436 | |
| 437 | /** |
| 438 | * cxlflash_queuecommand() - sends a mid-layer request |
| 439 | * @host: SCSI host associated with device. |
| 440 | * @scp: SCSI command to send. |
| 441 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 442 | * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 443 | */ |
| 444 | static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) |
| 445 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 446 | struct cxlflash_cfg *cfg = shost_priv(host); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 447 | struct afu *afu = cfg->afu; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 448 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | 5fbb96c | 2016-11-28 18:42:19 -0600 | [diff] [blame] | 449 | struct afu_cmd *cmd = sc_to_afucz(scp); |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 450 | struct scatterlist *sg = scsi_sglist(scp); |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 451 | struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 452 | u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 453 | ulong lock_flags; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 454 | int rc = 0; |
| 455 | |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 456 | dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 457 | "cdb=(%08x-%08x-%08x-%08x)\n", |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 458 | __func__, scp, host->host_no, scp->device->channel, |
| 459 | scp->device->id, scp->device->lun, |
| 460 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), |
| 461 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), |
| 462 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), |
| 463 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 464 | |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 465 | /* |
| 466 | * If a Task Management Function is active, wait for it to complete |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 467 | * before continuing with regular commands. |
| 468 | */ |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 469 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 470 | if (cfg->tmf_active) { |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 471 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 472 | rc = SCSI_MLQUEUE_HOST_BUSY; |
| 473 | goto out; |
| 474 | } |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 475 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 476 | |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 477 | switch (cfg->state) { |
Matthew R. Ochs | 323e334 | 2017-04-12 14:14:51 -0500 | [diff] [blame] | 478 | case STATE_PROBING: |
| 479 | case STATE_PROBED: |
Matthew R. Ochs | 439e85c | 2015-10-21 15:12:00 -0500 | [diff] [blame] | 480 | case STATE_RESET: |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 481 | dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 482 | rc = SCSI_MLQUEUE_HOST_BUSY; |
| 483 | goto out; |
| 484 | case STATE_FAILTERM: |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 485 | dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 486 | scp->result = (DID_NO_CONNECT << 16); |
| 487 | scp->scsi_done(scp); |
| 488 | rc = 0; |
| 489 | goto out; |
| 490 | default: |
| 491 | break; |
| 492 | } |
| 493 | |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 494 | if (likely(sg)) { |
Matthew R. Ochs | 50b787f | 2017-04-12 14:15:02 -0500 | [diff] [blame] | 495 | cmd->rcb.data_len = sg->length; |
| 496 | cmd->rcb.data_ea = (uintptr_t)sg_virt(sg); |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 497 | } |
| 498 | |
Matthew R. Ochs | fe7f969 | 2016-11-28 18:43:18 -0600 | [diff] [blame] | 499 | cmd->scp = scp; |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 500 | cmd->parent = afu; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 501 | cmd->hwq_index = hwq->index; |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 502 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 503 | cmd->rcb.ctx_id = hwq->ctx_hndl; |
Matthew R. Ochs | 5fbb96c | 2016-11-28 18:42:19 -0600 | [diff] [blame] | 504 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; |
Matthew R. Ochs | 8fa4f17 | 2017-04-12 14:14:05 -0500 | [diff] [blame] | 505 | cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 506 | cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); |
| 507 | |
| 508 | if (scp->sc_data_direction == DMA_TO_DEVICE) |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 509 | req_flags |= SISL_REQ_FLAGS_HOST_WRITE; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 510 | |
Matthew R. Ochs | 9d89326 | 2016-11-28 18:43:01 -0600 | [diff] [blame] | 511 | cmd->rcb.req_flags = req_flags; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 512 | memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); |
| 513 | |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 514 | rc = afu->send_cmd(afu, cmd); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 515 | out: |
| 516 | return rc; |
| 517 | } |
| 518 | |
| 519 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 520 | * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 521 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 522 | */ |
| 523 | static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) |
| 524 | { |
| 525 | struct pci_dev *pdev = cfg->dev; |
| 526 | |
| 527 | if (pci_channel_offline(pdev)) |
Matthew R. Ochs | 439e85c | 2015-10-21 15:12:00 -0500 | [diff] [blame] | 528 | wait_event_timeout(cfg->reset_waitq, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 529 | !pci_channel_offline(pdev), |
| 530 | CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); |
| 531 | } |
| 532 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 533 | /** |
| 534 | * free_mem() - free memory associated with the AFU |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 535 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 536 | */ |
| 537 | static void free_mem(struct cxlflash_cfg *cfg) |
| 538 | { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 539 | struct afu *afu = cfg->afu; |
| 540 | |
| 541 | if (cfg->afu) { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 542 | free_pages((ulong)afu, get_order(sizeof(struct afu))); |
| 543 | cfg->afu = NULL; |
| 544 | } |
| 545 | } |
| 546 | |
| 547 | /** |
| 548 | * stop_afu() - stops the AFU command timers and unmaps the MMIO space |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 549 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 550 | * |
| 551 | * Safe to call with AFU in a partially allocated/initialized state. |
Manoj Kumar | ee91e33 | 2015-12-14 15:07:02 -0600 | [diff] [blame] | 552 | * |
Uma Krishnan | 0df5bef | 2017-01-11 19:20:03 -0600 | [diff] [blame] | 553 | * Cancels scheduled worker threads, waits for any active internal AFU |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 554 | * commands to timeout, disables IRQ polling and then unmaps the MMIO space. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 555 | */ |
| 556 | static void stop_afu(struct cxlflash_cfg *cfg) |
| 557 | { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 558 | struct afu *afu = cfg->afu; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 559 | struct hwq *hwq; |
| 560 | int i; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 561 | |
Uma Krishnan | 0df5bef | 2017-01-11 19:20:03 -0600 | [diff] [blame] | 562 | cancel_work_sync(&cfg->work_q); |
| 563 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 564 | if (likely(afu)) { |
Matthew R. Ochs | de01283 | 2016-11-28 18:42:33 -0600 | [diff] [blame] | 565 | while (atomic_read(&afu->cmds_active)) |
| 566 | ssleep(1); |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 567 | |
| 568 | if (afu_is_irqpoll_enabled(afu)) { |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 569 | for (i = 0; i < afu->num_hwqs; i++) { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 570 | hwq = get_hwq(afu, i); |
| 571 | |
| 572 | irq_poll_disable(&hwq->irqpoll); |
| 573 | } |
| 574 | } |
| 575 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 576 | if (likely(afu->afu_map)) { |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 577 | cxl_psa_unmap((void __iomem *)afu->afu_map); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 578 | afu->afu_map = NULL; |
| 579 | } |
| 580 | } |
| 581 | } |
| 582 | |
| 583 | /** |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 584 | * term_intr() - disables all AFU interrupts |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 585 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 586 | * @level: Depth of allocation, where to begin waterfall tear down. |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 587 | * @index: Index of the hardware queue. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 588 | * |
| 589 | * Safe to call with AFU/MC in partially allocated/initialized state. |
| 590 | */ |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 591 | static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level, |
| 592 | u32 index) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 593 | { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 594 | struct afu *afu = cfg->afu; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 595 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 596 | struct hwq *hwq; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 597 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 598 | if (!afu) { |
| 599 | dev_err(dev, "%s: returning with NULL afu\n", __func__); |
| 600 | return; |
| 601 | } |
| 602 | |
| 603 | hwq = get_hwq(afu, index); |
| 604 | |
| 605 | if (!hwq->ctx) { |
| 606 | dev_err(dev, "%s: returning with NULL MC\n", __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 607 | return; |
| 608 | } |
| 609 | |
| 610 | switch (level) { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 611 | case UNMAP_THREE: |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 612 | /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ |
| 613 | if (index == PRIMARY_HWQ) |
| 614 | cxl_unmap_afu_irq(hwq->ctx, 3, hwq); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 615 | case UNMAP_TWO: |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 616 | cxl_unmap_afu_irq(hwq->ctx, 2, hwq); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 617 | case UNMAP_ONE: |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 618 | cxl_unmap_afu_irq(hwq->ctx, 1, hwq); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 619 | case FREE_IRQ: |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 620 | cxl_free_afu_irqs(hwq->ctx); |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 621 | /* fall through */ |
| 622 | case UNDO_NOOP: |
| 623 | /* No action required */ |
| 624 | break; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 625 | } |
| 626 | } |
| 627 | |
| 628 | /** |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 629 | * term_mc() - terminates the master context |
| 630 | * @cfg: Internal structure associated with the host. |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 631 | * @index: Index of the hardware queue. |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 632 | * |
| 633 | * Safe to call with AFU/MC in partially allocated/initialized state. |
| 634 | */ |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 635 | static void term_mc(struct cxlflash_cfg *cfg, u32 index) |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 636 | { |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 637 | struct afu *afu = cfg->afu; |
| 638 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 639 | struct hwq *hwq; |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 640 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 641 | if (!afu) { |
| 642 | dev_err(dev, "%s: returning with NULL afu\n", __func__); |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 643 | return; |
| 644 | } |
| 645 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 646 | hwq = get_hwq(afu, index); |
| 647 | |
| 648 | if (!hwq->ctx) { |
| 649 | dev_err(dev, "%s: returning with NULL MC\n", __func__); |
| 650 | return; |
| 651 | } |
| 652 | |
| 653 | WARN_ON(cxl_stop_context(hwq->ctx)); |
| 654 | if (index != PRIMARY_HWQ) |
| 655 | WARN_ON(cxl_release_context(hwq->ctx)); |
| 656 | hwq->ctx = NULL; |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 657 | } |
| 658 | |
| 659 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 660 | * term_afu() - terminates the AFU |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 661 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 662 | * |
| 663 | * Safe to call with AFU/MC in partially allocated/initialized state. |
| 664 | */ |
| 665 | static void term_afu(struct cxlflash_cfg *cfg) |
| 666 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 667 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 668 | int k; |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 669 | |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 670 | /* |
| 671 | * Tear down is carefully orchestrated to ensure |
| 672 | * no interrupts can come in when the problem state |
| 673 | * area is unmapped. |
| 674 | * |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 675 | * 1) Disable all AFU interrupts for each master |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 676 | * 2) Unmap the problem state area |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 677 | * 3) Stop each master context |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 678 | */ |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 679 | for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 680 | term_intr(cfg, UNMAP_THREE, k); |
| 681 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 682 | if (cfg->afu) |
| 683 | stop_afu(cfg); |
| 684 | |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 685 | for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 686 | term_mc(cfg, k); |
Uma Krishnan | 6ded8b3 | 2016-03-04 15:55:15 -0600 | [diff] [blame] | 687 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 688 | dev_dbg(dev, "%s: returning\n", __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 689 | } |
| 690 | |
| 691 | /** |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 692 | * notify_shutdown() - notifies device of pending shutdown |
| 693 | * @cfg: Internal structure associated with the host. |
| 694 | * @wait: Whether to wait for shutdown processing to complete. |
| 695 | * |
| 696 | * This function will notify the AFU that the adapter is being shutdown |
| 697 | * and will wait for shutdown processing to complete if wait is true. |
| 698 | * This notification should flush pending I/Os to the device and halt |
| 699 | * further I/Os until the next AFU reset is issued and device restarted. |
| 700 | */ |
| 701 | static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) |
| 702 | { |
| 703 | struct afu *afu = cfg->afu; |
| 704 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 705 | struct dev_dependent_vals *ddv; |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 706 | __be64 __iomem *fc_port_regs; |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 707 | u64 reg, status; |
| 708 | int i, retry_cnt = 0; |
| 709 | |
| 710 | ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data; |
| 711 | if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN)) |
| 712 | return; |
| 713 | |
Uma Krishnan | 1bd2b28 | 2016-07-21 15:44:04 -0500 | [diff] [blame] | 714 | if (!afu || !afu->afu_map) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 715 | dev_dbg(dev, "%s: Problem state area not mapped\n", __func__); |
Uma Krishnan | 1bd2b28 | 2016-07-21 15:44:04 -0500 | [diff] [blame] | 716 | return; |
| 717 | } |
| 718 | |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 719 | /* Notify AFU */ |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 720 | for (i = 0; i < cfg->num_fc_ports; i++) { |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 721 | fc_port_regs = get_fc_port_regs(cfg, i); |
| 722 | |
| 723 | reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 724 | reg |= SISL_FC_SHUTDOWN_NORMAL; |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 725 | writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 726 | } |
| 727 | |
| 728 | if (!wait) |
| 729 | return; |
| 730 | |
| 731 | /* Wait up to 1.5 seconds for shutdown processing to complete */ |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 732 | for (i = 0; i < cfg->num_fc_ports; i++) { |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 733 | fc_port_regs = get_fc_port_regs(cfg, i); |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 734 | retry_cnt = 0; |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 735 | |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 736 | while (true) { |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 737 | status = readq_be(&fc_port_regs[FC_STATUS / 8]); |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 738 | if (status & SISL_STATUS_SHUTDOWN_COMPLETE) |
| 739 | break; |
| 740 | if (++retry_cnt >= MC_RETRY_CNT) { |
| 741 | dev_dbg(dev, "%s: port %d shutdown processing " |
| 742 | "not yet completed\n", __func__, i); |
| 743 | break; |
| 744 | } |
| 745 | msleep(100 * retry_cnt); |
| 746 | } |
| 747 | } |
| 748 | } |
| 749 | |
| 750 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 751 | * cxlflash_remove() - PCI entry point to tear down host |
| 752 | * @pdev: PCI device associated with the host. |
| 753 | * |
Matthew R. Ochs | 323e334 | 2017-04-12 14:14:51 -0500 | [diff] [blame] | 754 | * Safe to use as a cleanup in partially allocated/initialized state. Note that |
| 755 | * the reset_waitq is flushed as part of the stop/termination of user contexts. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 756 | */ |
| 757 | static void cxlflash_remove(struct pci_dev *pdev) |
| 758 | { |
| 759 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 760 | struct device *dev = &pdev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 761 | ulong lock_flags; |
| 762 | |
Uma Krishnan | babf985 | 2016-09-02 15:39:16 -0500 | [diff] [blame] | 763 | if (!pci_is_enabled(pdev)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 764 | dev_dbg(dev, "%s: Device is disabled\n", __func__); |
Uma Krishnan | babf985 | 2016-09-02 15:39:16 -0500 | [diff] [blame] | 765 | return; |
| 766 | } |
| 767 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 768 | /* If a Task Management Function is active, wait for it to complete |
| 769 | * before continuing with remove. |
| 770 | */ |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 771 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 772 | if (cfg->tmf_active) |
Matthew R. Ochs | 018d1dc95 | 2015-10-21 15:13:21 -0500 | [diff] [blame] | 773 | wait_event_interruptible_lock_irq(cfg->tmf_waitq, |
| 774 | !cfg->tmf_active, |
| 775 | cfg->tmf_slock); |
| 776 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 777 | |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 778 | /* Notify AFU and wait for shutdown processing to complete */ |
| 779 | notify_shutdown(cfg, true); |
| 780 | |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 781 | cfg->state = STATE_FAILTERM; |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 782 | cxlflash_stop_term_user_contexts(cfg); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 783 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 784 | switch (cfg->init_state) { |
| 785 | case INIT_STATE_SCSI: |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 786 | cxlflash_term_local_luns(cfg); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 787 | scsi_remove_host(cfg->host); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 788 | case INIT_STATE_AFU: |
Manoj Kumar | b45cdbaf | 2015-12-14 15:07:23 -0600 | [diff] [blame] | 789 | term_afu(cfg); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 790 | case INIT_STATE_PCI: |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 791 | pci_disable_device(pdev); |
| 792 | case INIT_STATE_NONE: |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 793 | free_mem(cfg); |
Matthew R. Ochs | 8b5b1e8 | 2015-10-21 15:14:09 -0500 | [diff] [blame] | 794 | scsi_host_put(cfg->host); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 795 | break; |
| 796 | } |
| 797 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 798 | dev_dbg(dev, "%s: returning\n", __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 799 | } |
| 800 | |
| 801 | /** |
| 802 | * alloc_mem() - allocates the AFU and its command pool |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 803 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 804 | * |
| 805 | * A partially allocated state remains on failure. |
| 806 | * |
| 807 | * Return: |
| 808 | * 0 on success |
| 809 | * -ENOMEM on failure to allocate memory |
| 810 | */ |
| 811 | static int alloc_mem(struct cxlflash_cfg *cfg) |
| 812 | { |
| 813 | int rc = 0; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 814 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 815 | |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 816 | /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 817 | cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
| 818 | get_order(sizeof(struct afu))); |
| 819 | if (unlikely(!cfg->afu)) { |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 820 | dev_err(dev, "%s: cannot get %d free pages\n", |
| 821 | __func__, get_order(sizeof(struct afu))); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 822 | rc = -ENOMEM; |
| 823 | goto out; |
| 824 | } |
| 825 | cfg->afu->parent = cfg; |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 826 | cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 827 | cfg->afu->afu_map = NULL; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 828 | out: |
| 829 | return rc; |
| 830 | } |
| 831 | |
| 832 | /** |
| 833 | * init_pci() - initializes the host as a PCI device |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 834 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 835 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 836 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 837 | */ |
| 838 | static int init_pci(struct cxlflash_cfg *cfg) |
| 839 | { |
| 840 | struct pci_dev *pdev = cfg->dev; |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 841 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 842 | int rc = 0; |
| 843 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 844 | rc = pci_enable_device(pdev); |
| 845 | if (rc || pci_channel_offline(pdev)) { |
| 846 | if (pci_channel_offline(pdev)) { |
| 847 | cxlflash_wait_for_pci_err_recovery(cfg); |
| 848 | rc = pci_enable_device(pdev); |
| 849 | } |
| 850 | |
| 851 | if (rc) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 852 | dev_err(dev, "%s: Cannot enable adapter\n", __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 853 | cxlflash_wait_for_pci_err_recovery(cfg); |
Manoj N. Kumar | 961487e | 2016-03-04 15:55:14 -0600 | [diff] [blame] | 854 | goto out; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 855 | } |
| 856 | } |
| 857 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 858 | out: |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 859 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 860 | return rc; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 861 | } |
| 862 | |
| 863 | /** |
| 864 | * init_scsi() - adds the host to the SCSI stack and kicks off host scan |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 865 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 866 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 867 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 868 | */ |
| 869 | static int init_scsi(struct cxlflash_cfg *cfg) |
| 870 | { |
| 871 | struct pci_dev *pdev = cfg->dev; |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 872 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 873 | int rc = 0; |
| 874 | |
| 875 | rc = scsi_add_host(cfg->host, &pdev->dev); |
| 876 | if (rc) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 877 | dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 878 | goto out; |
| 879 | } |
| 880 | |
| 881 | scsi_scan_host(cfg->host); |
| 882 | |
| 883 | out: |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 884 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 885 | return rc; |
| 886 | } |
| 887 | |
| 888 | /** |
| 889 | * set_port_online() - transitions the specified host FC port to online state |
| 890 | * @fc_regs: Top of MMIO region defined for specified port. |
| 891 | * |
| 892 | * The provided MMIO region must be mapped prior to call. Online state means |
| 893 | * that the FC link layer has synced, completed the handshaking process, and |
| 894 | * is ready for login to start. |
| 895 | */ |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 896 | static void set_port_online(__be64 __iomem *fc_regs) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 897 | { |
| 898 | u64 cmdcfg; |
| 899 | |
| 900 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); |
| 901 | cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ |
| 902 | cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ |
| 903 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); |
| 904 | } |
| 905 | |
| 906 | /** |
| 907 | * set_port_offline() - transitions the specified host FC port to offline state |
| 908 | * @fc_regs: Top of MMIO region defined for specified port. |
| 909 | * |
| 910 | * The provided MMIO region must be mapped prior to call. |
| 911 | */ |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 912 | static void set_port_offline(__be64 __iomem *fc_regs) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 913 | { |
| 914 | u64 cmdcfg; |
| 915 | |
| 916 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); |
| 917 | cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ |
| 918 | cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ |
| 919 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); |
| 920 | } |
| 921 | |
| 922 | /** |
| 923 | * wait_port_online() - waits for the specified host FC port come online |
| 924 | * @fc_regs: Top of MMIO region defined for specified port. |
| 925 | * @delay_us: Number of microseconds to delay between reading port status. |
| 926 | * @nretry: Number of cycles to retry reading port status. |
| 927 | * |
| 928 | * The provided MMIO region must be mapped prior to call. This will timeout |
| 929 | * when the cable is not plugged in. |
| 930 | * |
| 931 | * Return: |
| 932 | * TRUE (1) when the specified port is online |
| 933 | * FALSE (0) when the specified port fails to come online after timeout |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 934 | */ |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 935 | static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 936 | { |
| 937 | u64 status; |
| 938 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 939 | WARN_ON(delay_us < 1000); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 940 | |
| 941 | do { |
| 942 | msleep(delay_us / 1000); |
| 943 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); |
Matthew R. Ochs | 05dab43 | 2016-09-02 15:40:03 -0500 | [diff] [blame] | 944 | if (status == U64_MAX) |
| 945 | nretry /= 2; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 946 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && |
| 947 | nretry--); |
| 948 | |
| 949 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); |
| 950 | } |
| 951 | |
| 952 | /** |
| 953 | * wait_port_offline() - waits for the specified host FC port go offline |
| 954 | * @fc_regs: Top of MMIO region defined for specified port. |
| 955 | * @delay_us: Number of microseconds to delay between reading port status. |
| 956 | * @nretry: Number of cycles to retry reading port status. |
| 957 | * |
| 958 | * The provided MMIO region must be mapped prior to call. |
| 959 | * |
| 960 | * Return: |
| 961 | * TRUE (1) when the specified port is offline |
| 962 | * FALSE (0) when the specified port fails to go offline after timeout |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 963 | */ |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 964 | static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 965 | { |
| 966 | u64 status; |
| 967 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 968 | WARN_ON(delay_us < 1000); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 969 | |
| 970 | do { |
| 971 | msleep(delay_us / 1000); |
| 972 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); |
Matthew R. Ochs | 05dab43 | 2016-09-02 15:40:03 -0500 | [diff] [blame] | 973 | if (status == U64_MAX) |
| 974 | nretry /= 2; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 975 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && |
| 976 | nretry--); |
| 977 | |
| 978 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); |
| 979 | } |
| 980 | |
| 981 | /** |
| 982 | * afu_set_wwpn() - configures the WWPN for the specified host FC port |
| 983 | * @afu: AFU associated with the host that owns the specified FC port. |
| 984 | * @port: Port number being configured. |
| 985 | * @fc_regs: Top of MMIO region defined for specified port. |
| 986 | * @wwpn: The world-wide-port-number previously discovered for port. |
| 987 | * |
| 988 | * The provided MMIO region must be mapped prior to call. As part of the |
| 989 | * sequence to configure the WWPN, the port is toggled offline and then back |
| 990 | * online. This toggling action can cause this routine to delay up to a few |
| 991 | * seconds. When configured to use the internal LUN feature of the AFU, a |
| 992 | * failure to come online is overridden. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 993 | */ |
Matthew R. Ochs | f801326 | 2016-09-02 15:40:20 -0500 | [diff] [blame] | 994 | static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, |
| 995 | u64 wwpn) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 996 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 997 | struct cxlflash_cfg *cfg = afu->parent; |
| 998 | struct device *dev = &cfg->dev->dev; |
| 999 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1000 | set_port_offline(fc_regs); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1001 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
| 1002 | FC_PORT_STATUS_RETRY_CNT)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1003 | dev_dbg(dev, "%s: wait on port %d to go offline timed out\n", |
| 1004 | __func__, port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1005 | } |
| 1006 | |
Matthew R. Ochs | f801326 | 2016-09-02 15:40:20 -0500 | [diff] [blame] | 1007 | writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); |
Matthew R. Ochs | 964497b | 2015-10-21 15:13:54 -0500 | [diff] [blame] | 1008 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1009 | set_port_online(fc_regs); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1010 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
| 1011 | FC_PORT_STATUS_RETRY_CNT)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1012 | dev_dbg(dev, "%s: wait on port %d to go online timed out\n", |
| 1013 | __func__, port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1014 | } |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1015 | } |
| 1016 | |
| 1017 | /** |
| 1018 | * afu_link_reset() - resets the specified host FC port |
| 1019 | * @afu: AFU associated with the host that owns the specified FC port. |
| 1020 | * @port: Port number being configured. |
| 1021 | * @fc_regs: Top of MMIO region defined for specified port. |
| 1022 | * |
| 1023 | * The provided MMIO region must be mapped prior to call. The sequence to |
| 1024 | * reset the port involves toggling it offline and then back online. This |
| 1025 | * action can cause this routine to delay up to a few seconds. An effort |
| 1026 | * is made to maintain link with the device by switching to host to use |
| 1027 | * the alternate port exclusively while the reset takes place. |
| 1028 | * failure to come online is overridden. |
| 1029 | */ |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 1030 | static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1031 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1032 | struct cxlflash_cfg *cfg = afu->parent; |
| 1033 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1034 | u64 port_sel; |
| 1035 | |
| 1036 | /* first switch the AFU to the other links, if any */ |
| 1037 | port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); |
Dan Carpenter | 4da74db | 2015-08-18 11:57:43 +0300 | [diff] [blame] | 1038 | port_sel &= ~(1ULL << port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1039 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); |
| 1040 | cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); |
| 1041 | |
| 1042 | set_port_offline(fc_regs); |
| 1043 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
| 1044 | FC_PORT_STATUS_RETRY_CNT)) |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1045 | dev_err(dev, "%s: wait on port %d to go offline timed out\n", |
| 1046 | __func__, port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1047 | |
| 1048 | set_port_online(fc_regs); |
| 1049 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
| 1050 | FC_PORT_STATUS_RETRY_CNT)) |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1051 | dev_err(dev, "%s: wait on port %d to go online timed out\n", |
| 1052 | __func__, port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1053 | |
| 1054 | /* switch back to include this port */ |
Dan Carpenter | 4da74db | 2015-08-18 11:57:43 +0300 | [diff] [blame] | 1055 | port_sel |= (1ULL << port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1056 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); |
| 1057 | cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); |
| 1058 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1059 | dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1060 | } |
| 1061 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1062 | /** |
| 1063 | * afu_err_intr_init() - clears and initializes the AFU for error interrupts |
| 1064 | * @afu: AFU associated with the host. |
| 1065 | */ |
| 1066 | static void afu_err_intr_init(struct afu *afu) |
| 1067 | { |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 1068 | struct cxlflash_cfg *cfg = afu->parent; |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1069 | __be64 __iomem *fc_port_regs; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1070 | int i; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1071 | struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1072 | u64 reg; |
| 1073 | |
| 1074 | /* global async interrupts: AFU clears afu_ctrl on context exit |
| 1075 | * if async interrupts were sent to that context. This prevents |
| 1076 | * the AFU form sending further async interrupts when |
| 1077 | * there is |
| 1078 | * nobody to receive them. |
| 1079 | */ |
| 1080 | |
| 1081 | /* mask all */ |
| 1082 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1083 | /* set LISN# to send and point to primary master context */ |
| 1084 | reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1085 | |
| 1086 | if (afu->internal_lun) |
| 1087 | reg |= 1; /* Bit 63 indicates local lun */ |
| 1088 | writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); |
| 1089 | /* clear all */ |
| 1090 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); |
| 1091 | /* unmask bits that are of interest */ |
| 1092 | /* note: afu can send an interrupt after this step */ |
| 1093 | writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); |
| 1094 | /* clear again in case a bit came on after previous clear but before */ |
| 1095 | /* unmask */ |
| 1096 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); |
| 1097 | |
| 1098 | /* Clear/Set internal lun bits */ |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1099 | fc_port_regs = get_fc_port_regs(cfg, 0); |
| 1100 | reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1101 | reg &= SISL_FC_INTERNAL_MASK; |
| 1102 | if (afu->internal_lun) |
| 1103 | reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1104 | writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1105 | |
| 1106 | /* now clear FC errors */ |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 1107 | for (i = 0; i < cfg->num_fc_ports; i++) { |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1108 | fc_port_regs = get_fc_port_regs(cfg, i); |
| 1109 | |
| 1110 | writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]); |
| 1111 | writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1112 | } |
| 1113 | |
| 1114 | /* sync interrupts for master's IOARRIN write */ |
| 1115 | /* note that unlike asyncs, there can be no pending sync interrupts */ |
| 1116 | /* at this time (this is a fresh context and master has not written */ |
| 1117 | /* IOARRIN yet), so there is nothing to clear. */ |
| 1118 | |
| 1119 | /* set LISN#, it is always sent to the context that wrote IOARRIN */ |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 1120 | for (i = 0; i < afu->num_hwqs; i++) { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1121 | hwq = get_hwq(afu, i); |
| 1122 | |
| 1123 | writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl); |
| 1124 | writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask); |
| 1125 | } |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1126 | } |
| 1127 | |
| 1128 | /** |
| 1129 | * cxlflash_sync_err_irq() - interrupt handler for synchronous errors |
| 1130 | * @irq: Interrupt number. |
| 1131 | * @data: Private data provided at interrupt registration, the AFU. |
| 1132 | * |
| 1133 | * Return: Always return IRQ_HANDLED. |
| 1134 | */ |
| 1135 | static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) |
| 1136 | { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1137 | struct hwq *hwq = (struct hwq *)data; |
| 1138 | struct cxlflash_cfg *cfg = hwq->afu->parent; |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1139 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1140 | u64 reg; |
| 1141 | u64 reg_unmasked; |
| 1142 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1143 | reg = readq_be(&hwq->host_map->intr_status); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1144 | reg_unmasked = (reg & SISL_ISTATUS_UNMASK); |
| 1145 | |
| 1146 | if (reg_unmasked == 0UL) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1147 | dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n", |
| 1148 | __func__, reg); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1149 | goto cxlflash_sync_err_irq_exit; |
| 1150 | } |
| 1151 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1152 | dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n", |
| 1153 | __func__, reg); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1154 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1155 | writeq_be(reg_unmasked, &hwq->host_map->intr_clear); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1156 | |
| 1157 | cxlflash_sync_err_irq_exit: |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1158 | return IRQ_HANDLED; |
| 1159 | } |
| 1160 | |
| 1161 | /** |
Matthew R. Ochs | 76a6ebb | 2017-04-12 14:11:44 -0500 | [diff] [blame] | 1162 | * process_hrrq() - process the read-response queue |
| 1163 | * @afu: AFU associated with the host. |
Matthew R. Ochs | f918b4a | 2017-04-12 14:12:55 -0500 | [diff] [blame] | 1164 | * @doneq: Queue of commands harvested from the RRQ. |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 1165 | * @budget: Threshold of RRQ entries to process. |
Matthew R. Ochs | f918b4a | 2017-04-12 14:12:55 -0500 | [diff] [blame] | 1166 | * |
| 1167 | * This routine must be called holding the disabled RRQ spin lock. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1168 | * |
Matthew R. Ochs | 76a6ebb | 2017-04-12 14:11:44 -0500 | [diff] [blame] | 1169 | * Return: The number of entries processed. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1170 | */ |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1171 | static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1172 | { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1173 | struct afu *afu = hwq->afu; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1174 | struct afu_cmd *cmd; |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 1175 | struct sisl_ioasa *ioasa; |
| 1176 | struct sisl_ioarcb *ioarcb; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1177 | bool toggle = hwq->toggle; |
Matthew R. Ochs | 76a6ebb | 2017-04-12 14:11:44 -0500 | [diff] [blame] | 1178 | int num_hrrq = 0; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1179 | u64 entry, |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1180 | *hrrq_start = hwq->hrrq_start, |
| 1181 | *hrrq_end = hwq->hrrq_end, |
| 1182 | *hrrq_curr = hwq->hrrq_curr; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1183 | |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 1184 | /* Process ready RRQ entries up to the specified budget (if any) */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1185 | while (true) { |
| 1186 | entry = *hrrq_curr; |
| 1187 | |
| 1188 | if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) |
| 1189 | break; |
| 1190 | |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 1191 | entry &= ~SISL_RESP_HANDLE_T_BIT; |
| 1192 | |
| 1193 | if (afu_is_sq_cmd_mode(afu)) { |
| 1194 | ioasa = (struct sisl_ioasa *)entry; |
| 1195 | cmd = container_of(ioasa, struct afu_cmd, sa); |
| 1196 | } else { |
| 1197 | ioarcb = (struct sisl_ioarcb *)entry; |
| 1198 | cmd = container_of(ioarcb, struct afu_cmd, rcb); |
| 1199 | } |
| 1200 | |
Matthew R. Ochs | f918b4a | 2017-04-12 14:12:55 -0500 | [diff] [blame] | 1201 | list_add_tail(&cmd->queue, doneq); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1202 | |
| 1203 | /* Advance to next entry or wrap and flip the toggle bit */ |
| 1204 | if (hrrq_curr < hrrq_end) |
| 1205 | hrrq_curr++; |
| 1206 | else { |
| 1207 | hrrq_curr = hrrq_start; |
| 1208 | toggle ^= SISL_RESP_HANDLE_T_BIT; |
| 1209 | } |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 1210 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1211 | atomic_inc(&hwq->hsq_credits); |
Matthew R. Ochs | 76a6ebb | 2017-04-12 14:11:44 -0500 | [diff] [blame] | 1212 | num_hrrq++; |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 1213 | |
| 1214 | if (budget > 0 && num_hrrq >= budget) |
| 1215 | break; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1216 | } |
| 1217 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1218 | hwq->hrrq_curr = hrrq_curr; |
| 1219 | hwq->toggle = toggle; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1220 | |
Matthew R. Ochs | 76a6ebb | 2017-04-12 14:11:44 -0500 | [diff] [blame] | 1221 | return num_hrrq; |
| 1222 | } |
| 1223 | |
| 1224 | /** |
Matthew R. Ochs | f918b4a | 2017-04-12 14:12:55 -0500 | [diff] [blame] | 1225 | * process_cmd_doneq() - process a queue of harvested RRQ commands |
| 1226 | * @doneq: Queue of completed commands. |
| 1227 | * |
| 1228 | * Note that upon return the queue can no longer be trusted. |
| 1229 | */ |
| 1230 | static void process_cmd_doneq(struct list_head *doneq) |
| 1231 | { |
| 1232 | struct afu_cmd *cmd, *tmp; |
| 1233 | |
| 1234 | WARN_ON(list_empty(doneq)); |
| 1235 | |
| 1236 | list_for_each_entry_safe(cmd, tmp, doneq, queue) |
| 1237 | cmd_complete(cmd); |
| 1238 | } |
| 1239 | |
| 1240 | /** |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 1241 | * cxlflash_irqpoll() - process a queue of harvested RRQ commands |
| 1242 | * @irqpoll: IRQ poll structure associated with queue to poll. |
| 1243 | * @budget: Threshold of RRQ entries to process per poll. |
| 1244 | * |
| 1245 | * Return: The number of entries processed. |
| 1246 | */ |
| 1247 | static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget) |
| 1248 | { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1249 | struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll); |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 1250 | unsigned long hrrq_flags; |
| 1251 | LIST_HEAD(doneq); |
| 1252 | int num_entries = 0; |
| 1253 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1254 | spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 1255 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1256 | num_entries = process_hrrq(hwq, &doneq, budget); |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 1257 | if (num_entries < budget) |
| 1258 | irq_poll_complete(irqpoll); |
| 1259 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1260 | spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 1261 | |
| 1262 | process_cmd_doneq(&doneq); |
| 1263 | return num_entries; |
| 1264 | } |
| 1265 | |
| 1266 | /** |
Matthew R. Ochs | 76a6ebb | 2017-04-12 14:11:44 -0500 | [diff] [blame] | 1267 | * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) |
| 1268 | * @irq: Interrupt number. |
| 1269 | * @data: Private data provided at interrupt registration, the AFU. |
| 1270 | * |
Matthew R. Ochs | f918b4a | 2017-04-12 14:12:55 -0500 | [diff] [blame] | 1271 | * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found. |
Matthew R. Ochs | 76a6ebb | 2017-04-12 14:11:44 -0500 | [diff] [blame] | 1272 | */ |
| 1273 | static irqreturn_t cxlflash_rrq_irq(int irq, void *data) |
| 1274 | { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1275 | struct hwq *hwq = (struct hwq *)data; |
| 1276 | struct afu *afu = hwq->afu; |
Matthew R. Ochs | f918b4a | 2017-04-12 14:12:55 -0500 | [diff] [blame] | 1277 | unsigned long hrrq_flags; |
| 1278 | LIST_HEAD(doneq); |
| 1279 | int num_entries = 0; |
Matthew R. Ochs | 76a6ebb | 2017-04-12 14:11:44 -0500 | [diff] [blame] | 1280 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1281 | spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 1282 | |
| 1283 | if (afu_is_irqpoll_enabled(afu)) { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1284 | irq_poll_sched(&hwq->irqpoll); |
| 1285 | spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 1286 | return IRQ_HANDLED; |
| 1287 | } |
| 1288 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1289 | num_entries = process_hrrq(hwq, &doneq, -1); |
| 1290 | spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); |
Matthew R. Ochs | f918b4a | 2017-04-12 14:12:55 -0500 | [diff] [blame] | 1291 | |
| 1292 | if (num_entries == 0) |
| 1293 | return IRQ_NONE; |
| 1294 | |
| 1295 | process_cmd_doneq(&doneq); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1296 | return IRQ_HANDLED; |
| 1297 | } |
| 1298 | |
Matthew R. Ochs | e2ef33f | 2017-04-12 14:15:29 -0500 | [diff] [blame] | 1299 | /* |
| 1300 | * Asynchronous interrupt information table |
| 1301 | * |
| 1302 | * NOTE: |
| 1303 | * - Order matters here as this array is indexed by bit position. |
| 1304 | * |
| 1305 | * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro |
| 1306 | * as complex and complains due to a lack of parentheses/braces. |
| 1307 | */ |
| 1308 | #define ASTATUS_FC(_a, _b, _c, _d) \ |
| 1309 | { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) } |
| 1310 | |
| 1311 | #define BUILD_SISL_ASTATUS_FC_PORT(_a) \ |
| 1312 | ASTATUS_FC(_a, LINK_UP, "link up", 0), \ |
| 1313 | ASTATUS_FC(_a, LINK_DN, "link down", 0), \ |
| 1314 | ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \ |
| 1315 | ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \ |
| 1316 | ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \ |
| 1317 | ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \ |
| 1318 | ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \ |
| 1319 | ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET) |
| 1320 | |
| 1321 | static const struct asyc_intr_info ainfo[] = { |
| 1322 | BUILD_SISL_ASTATUS_FC_PORT(1), |
| 1323 | BUILD_SISL_ASTATUS_FC_PORT(0), |
| 1324 | BUILD_SISL_ASTATUS_FC_PORT(3), |
| 1325 | BUILD_SISL_ASTATUS_FC_PORT(2) |
| 1326 | }; |
| 1327 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1328 | /** |
| 1329 | * cxlflash_async_err_irq() - interrupt handler for asynchronous errors |
| 1330 | * @irq: Interrupt number. |
| 1331 | * @data: Private data provided at interrupt registration, the AFU. |
| 1332 | * |
| 1333 | * Return: Always return IRQ_HANDLED. |
| 1334 | */ |
| 1335 | static irqreturn_t cxlflash_async_err_irq(int irq, void *data) |
| 1336 | { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1337 | struct hwq *hwq = (struct hwq *)data; |
| 1338 | struct afu *afu = hwq->afu; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1339 | struct cxlflash_cfg *cfg = afu->parent; |
| 1340 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1341 | const struct asyc_intr_info *info; |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 1342 | struct sisl_global_map __iomem *global = &afu->afu_map->global; |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1343 | __be64 __iomem *fc_port_regs; |
Matthew R. Ochs | e2ef33f | 2017-04-12 14:15:29 -0500 | [diff] [blame] | 1344 | u64 reg_unmasked; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1345 | u64 reg; |
Matthew R. Ochs | e2ef33f | 2017-04-12 14:15:29 -0500 | [diff] [blame] | 1346 | u64 bit; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1347 | u8 port; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1348 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1349 | reg = readq_be(&global->regs.aintr_status); |
| 1350 | reg_unmasked = (reg & SISL_ASTATUS_UNMASK); |
| 1351 | |
Matthew R. Ochs | e2ef33f | 2017-04-12 14:15:29 -0500 | [diff] [blame] | 1352 | if (unlikely(reg_unmasked == 0)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1353 | dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n", |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1354 | __func__, reg); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1355 | goto out; |
| 1356 | } |
| 1357 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1358 | /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1359 | writeq_be(reg_unmasked, &global->regs.aintr_clear); |
| 1360 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1361 | /* Check each bit that is on */ |
Matthew R. Ochs | e2ef33f | 2017-04-12 14:15:29 -0500 | [diff] [blame] | 1362 | for_each_set_bit(bit, (ulong *)®_unmasked, BITS_PER_LONG) { |
| 1363 | if (unlikely(bit >= ARRAY_SIZE(ainfo))) { |
| 1364 | WARN_ON_ONCE(1); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1365 | continue; |
Matthew R. Ochs | e2ef33f | 2017-04-12 14:15:29 -0500 | [diff] [blame] | 1366 | } |
| 1367 | |
| 1368 | info = &ainfo[bit]; |
| 1369 | if (unlikely(info->status != 1ULL << bit)) { |
| 1370 | WARN_ON_ONCE(1); |
| 1371 | continue; |
| 1372 | } |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1373 | |
| 1374 | port = info->port; |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1375 | fc_port_regs = get_fc_port_regs(cfg, port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1376 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1377 | dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n", |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1378 | __func__, port, info->desc, |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1379 | readq_be(&fc_port_regs[FC_STATUS / 8])); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1380 | |
| 1381 | /* |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1382 | * Do link reset first, some OTHER errors will set FC_ERROR |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1383 | * again if cleared before or w/o a reset |
| 1384 | */ |
| 1385 | if (info->action & LINK_RESET) { |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1386 | dev_err(dev, "%s: FC Port %d: resetting link\n", |
| 1387 | __func__, port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1388 | cfg->lr_state = LINK_RESET_REQUIRED; |
| 1389 | cfg->lr_port = port; |
| 1390 | schedule_work(&cfg->work_q); |
| 1391 | } |
| 1392 | |
| 1393 | if (info->action & CLR_FC_ERROR) { |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1394 | reg = readq_be(&fc_port_regs[FC_ERROR / 8]); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1395 | |
| 1396 | /* |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1397 | * Since all errors are unmasked, FC_ERROR and FC_ERRCAP |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1398 | * should be the same and tracing one is sufficient. |
| 1399 | */ |
| 1400 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1401 | dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n", |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1402 | __func__, port, reg); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1403 | |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1404 | writeq_be(reg, &fc_port_regs[FC_ERROR / 8]); |
| 1405 | writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1406 | } |
Matthew R. Ochs | ef51074 | 2015-10-21 15:13:37 -0500 | [diff] [blame] | 1407 | |
| 1408 | if (info->action & SCAN_HOST) { |
| 1409 | atomic_inc(&cfg->scan_host_needed); |
| 1410 | schedule_work(&cfg->work_q); |
| 1411 | } |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1412 | } |
| 1413 | |
| 1414 | out: |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1415 | return IRQ_HANDLED; |
| 1416 | } |
| 1417 | |
| 1418 | /** |
| 1419 | * start_context() - starts the master context |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1420 | * @cfg: Internal structure associated with the host. |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1421 | * @index: Index of the hardware queue. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1422 | * |
| 1423 | * Return: A success or failure value from CXL services. |
| 1424 | */ |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1425 | static int start_context(struct cxlflash_cfg *cfg, u32 index) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1426 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1427 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1428 | struct hwq *hwq = get_hwq(cfg->afu, index); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1429 | int rc = 0; |
| 1430 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1431 | rc = cxl_start_context(hwq->ctx, |
| 1432 | hwq->work.work_element_descriptor, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1433 | NULL); |
| 1434 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1435 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1436 | return rc; |
| 1437 | } |
| 1438 | |
| 1439 | /** |
| 1440 | * read_vpd() - obtains the WWPNs from VPD |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1441 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 1442 | * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1443 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1444 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1445 | */ |
| 1446 | static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) |
| 1447 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1448 | struct device *dev = &cfg->dev->dev; |
| 1449 | struct pci_dev *pdev = cfg->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1450 | int rc = 0; |
| 1451 | int ro_start, ro_size, i, j, k; |
| 1452 | ssize_t vpd_size; |
| 1453 | char vpd_data[CXLFLASH_VPD_LEN]; |
| 1454 | char tmp_buf[WWPN_BUF_LEN] = { 0 }; |
Matthew R. Ochs | 1cd7fab | 2017-04-12 14:14:41 -0500 | [diff] [blame] | 1455 | char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" }; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1456 | |
| 1457 | /* Get the VPD data from the device */ |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1458 | vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1459 | if (unlikely(vpd_size <= 0)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1460 | dev_err(dev, "%s: Unable to read VPD (size = %ld)\n", |
| 1461 | __func__, vpd_size); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1462 | rc = -ENODEV; |
| 1463 | goto out; |
| 1464 | } |
| 1465 | |
| 1466 | /* Get the read only section offset */ |
| 1467 | ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, |
| 1468 | PCI_VPD_LRDT_RO_DATA); |
| 1469 | if (unlikely(ro_start < 0)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1470 | dev_err(dev, "%s: VPD Read-only data not found\n", __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1471 | rc = -ENODEV; |
| 1472 | goto out; |
| 1473 | } |
| 1474 | |
| 1475 | /* Get the read only section size, cap when extends beyond read VPD */ |
| 1476 | ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); |
| 1477 | j = ro_size; |
| 1478 | i = ro_start + PCI_VPD_LRDT_TAG_SIZE; |
| 1479 | if (unlikely((i + j) > vpd_size)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1480 | dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n", |
| 1481 | __func__, (i + j), vpd_size); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1482 | ro_size = vpd_size - i; |
| 1483 | } |
| 1484 | |
| 1485 | /* |
| 1486 | * Find the offset of the WWPN tag within the read only |
| 1487 | * VPD data and validate the found field (partials are |
| 1488 | * no good to us). Convert the ASCII data to an integer |
| 1489 | * value. Note that we must copy to a temporary buffer |
| 1490 | * because the conversion service requires that the ASCII |
| 1491 | * string be terminated. |
| 1492 | */ |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 1493 | for (k = 0; k < cfg->num_fc_ports; k++) { |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1494 | j = ro_size; |
| 1495 | i = ro_start + PCI_VPD_LRDT_TAG_SIZE; |
| 1496 | |
| 1497 | i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); |
| 1498 | if (unlikely(i < 0)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1499 | dev_err(dev, "%s: Port %d WWPN not found in VPD\n", |
| 1500 | __func__, k); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1501 | rc = -ENODEV; |
| 1502 | goto out; |
| 1503 | } |
| 1504 | |
| 1505 | j = pci_vpd_info_field_size(&vpd_data[i]); |
| 1506 | i += PCI_VPD_INFO_FLD_HDR_SIZE; |
| 1507 | if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1508 | dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n", |
| 1509 | __func__, k); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1510 | rc = -ENODEV; |
| 1511 | goto out; |
| 1512 | } |
| 1513 | |
| 1514 | memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); |
| 1515 | rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); |
| 1516 | if (unlikely(rc)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1517 | dev_err(dev, "%s: WWPN conversion failed for port %d\n", |
| 1518 | __func__, k); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1519 | rc = -ENODEV; |
| 1520 | goto out; |
| 1521 | } |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 1522 | |
| 1523 | dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1524 | } |
| 1525 | |
| 1526 | out: |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1527 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1528 | return rc; |
| 1529 | } |
| 1530 | |
| 1531 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1532 | * init_pcr() - initialize the provisioning and control registers |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1533 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1534 | * |
| 1535 | * Also sets up fast access to the mapped registers and initializes AFU |
| 1536 | * command fields that never change. |
| 1537 | */ |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1538 | static void init_pcr(struct cxlflash_cfg *cfg) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1539 | { |
| 1540 | struct afu *afu = cfg->afu; |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 1541 | struct sisl_ctrl_map __iomem *ctrl_map; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1542 | struct hwq *hwq; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1543 | int i; |
| 1544 | |
| 1545 | for (i = 0; i < MAX_CONTEXT; i++) { |
| 1546 | ctrl_map = &afu->afu_map->ctrls[i].ctrl; |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1547 | /* Disrupt any clients that could be running */ |
| 1548 | /* e.g. clients that survived a master restart */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1549 | writeq_be(0, &ctrl_map->rht_start); |
| 1550 | writeq_be(0, &ctrl_map->rht_cnt_id); |
| 1551 | writeq_be(0, &ctrl_map->ctx_cap); |
| 1552 | } |
| 1553 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1554 | /* Copy frequently used fields into hwq */ |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 1555 | for (i = 0; i < afu->num_hwqs; i++) { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1556 | hwq = get_hwq(afu, i); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1557 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1558 | hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx); |
| 1559 | hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host; |
| 1560 | hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl; |
| 1561 | |
| 1562 | /* Program the Endian Control for the master context */ |
| 1563 | writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl); |
| 1564 | } |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1565 | } |
| 1566 | |
| 1567 | /** |
| 1568 | * init_global() - initialize AFU global registers |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1569 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1570 | */ |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 1571 | static int init_global(struct cxlflash_cfg *cfg) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1572 | { |
| 1573 | struct afu *afu = cfg->afu; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1574 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1575 | struct hwq *hwq; |
| 1576 | struct sisl_host_map __iomem *hmap; |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1577 | __be64 __iomem *fc_port_regs; |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 1578 | u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1579 | int i = 0, num_ports = 0; |
| 1580 | int rc = 0; |
| 1581 | u64 reg; |
| 1582 | |
| 1583 | rc = read_vpd(cfg, &wwpn[0]); |
| 1584 | if (rc) { |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1585 | dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1586 | goto out; |
| 1587 | } |
| 1588 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1589 | /* Set up RRQ and SQ in HWQ for master issued cmds */ |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 1590 | for (i = 0; i < afu->num_hwqs; i++) { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1591 | hwq = get_hwq(afu, i); |
| 1592 | hmap = hwq->host_map; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1593 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1594 | writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start); |
| 1595 | writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end); |
| 1596 | |
| 1597 | if (afu_is_sq_cmd_mode(afu)) { |
| 1598 | writeq_be((u64)hwq->hsq_start, &hmap->sq_start); |
| 1599 | writeq_be((u64)hwq->hsq_end, &hmap->sq_end); |
| 1600 | } |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 1601 | } |
| 1602 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1603 | /* AFU configuration */ |
| 1604 | reg = readq_be(&afu->afu_map->global.regs.afu_config); |
| 1605 | reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; |
| 1606 | /* enable all auto retry options and control endianness */ |
| 1607 | /* leave others at default: */ |
| 1608 | /* CTX_CAP write protected, mbox_r does not clear on read and */ |
| 1609 | /* checker on if dual afu */ |
| 1610 | writeq_be(reg, &afu->afu_map->global.regs.afu_config); |
| 1611 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1612 | /* Global port select: select either port */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1613 | if (afu->internal_lun) { |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1614 | /* Only use port 0 */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1615 | writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 1616 | num_ports = 0; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1617 | } else { |
Matthew R. Ochs | 8fa4f17 | 2017-04-12 14:14:05 -0500 | [diff] [blame] | 1618 | writeq_be(PORT_MASK(cfg->num_fc_ports), |
| 1619 | &afu->afu_map->global.regs.afu_port_sel); |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 1620 | num_ports = cfg->num_fc_ports; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1621 | } |
| 1622 | |
| 1623 | for (i = 0; i < num_ports; i++) { |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1624 | fc_port_regs = get_fc_port_regs(cfg, i); |
| 1625 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1626 | /* Unmask all errors (but they are still masked at AFU) */ |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1627 | writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]); |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1628 | /* Clear CRC error cnt & set a threshold */ |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1629 | (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]); |
| 1630 | writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1631 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1632 | /* Set WWPNs. If already programmed, wwpn[i] is 0 */ |
Matthew R. Ochs | f801326 | 2016-09-02 15:40:20 -0500 | [diff] [blame] | 1633 | if (wwpn[i] != 0) |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 1634 | afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1635 | /* Programming WWPN back to back causes additional |
| 1636 | * offline/online transitions and a PLOGI |
| 1637 | */ |
| 1638 | msleep(100); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1639 | } |
| 1640 | |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1641 | /* Set up master's own CTX_CAP to allow real mode, host translation */ |
| 1642 | /* tables, afu cmds and read/write GSCSI cmds. */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1643 | /* First, unlock ctx_cap write by reading mbox */ |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 1644 | for (i = 0; i < afu->num_hwqs; i++) { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1645 | hwq = get_hwq(afu, i); |
| 1646 | |
| 1647 | (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */ |
| 1648 | writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | |
| 1649 | SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | |
| 1650 | SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), |
| 1651 | &hwq->ctrl_map->ctx_cap); |
| 1652 | } |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1653 | /* Initialize heartbeat */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1654 | afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1655 | out: |
| 1656 | return rc; |
| 1657 | } |
| 1658 | |
| 1659 | /** |
| 1660 | * start_afu() - initializes and starts the AFU |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1661 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1662 | */ |
| 1663 | static int start_afu(struct cxlflash_cfg *cfg) |
| 1664 | { |
| 1665 | struct afu *afu = cfg->afu; |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1666 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1667 | struct hwq *hwq; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1668 | int rc = 0; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1669 | int i; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1670 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1671 | init_pcr(cfg); |
| 1672 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1673 | /* Initialize each HWQ */ |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 1674 | for (i = 0; i < afu->num_hwqs; i++) { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1675 | hwq = get_hwq(afu, i); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1676 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1677 | /* After an AFU reset, RRQ entries are stale, clear them */ |
| 1678 | memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry)); |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 1679 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1680 | /* Initialize RRQ pointers */ |
| 1681 | hwq->hrrq_start = &hwq->rrq_entry[0]; |
| 1682 | hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1]; |
| 1683 | hwq->hrrq_curr = hwq->hrrq_start; |
| 1684 | hwq->toggle = 1; |
| 1685 | spin_lock_init(&hwq->hrrq_slock); |
| 1686 | |
| 1687 | /* Initialize SQ */ |
| 1688 | if (afu_is_sq_cmd_mode(afu)) { |
| 1689 | memset(&hwq->sq, 0, sizeof(hwq->sq)); |
| 1690 | hwq->hsq_start = &hwq->sq[0]; |
| 1691 | hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1]; |
| 1692 | hwq->hsq_curr = hwq->hsq_start; |
| 1693 | |
| 1694 | spin_lock_init(&hwq->hsq_slock); |
| 1695 | atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1); |
| 1696 | } |
| 1697 | |
| 1698 | /* Initialize IRQ poll */ |
| 1699 | if (afu_is_irqpoll_enabled(afu)) |
| 1700 | irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight, |
| 1701 | cxlflash_irqpoll); |
| 1702 | |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 1703 | } |
| 1704 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1705 | rc = init_global(cfg); |
| 1706 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1707 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1708 | return rc; |
| 1709 | } |
| 1710 | |
| 1711 | /** |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1712 | * init_intr() - setup interrupt handlers for the master context |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1713 | * @cfg: Internal structure associated with the host. |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1714 | * @hwq: Hardware queue to initialize. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1715 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1716 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1717 | */ |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1718 | static enum undo_level init_intr(struct cxlflash_cfg *cfg, |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1719 | struct hwq *hwq) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1720 | { |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1721 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1722 | struct cxl_context *ctx = hwq->ctx; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1723 | int rc = 0; |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1724 | enum undo_level level = UNDO_NOOP; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1725 | bool is_primary_hwq = (hwq->index == PRIMARY_HWQ); |
| 1726 | int num_irqs = is_primary_hwq ? 3 : 2; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1727 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1728 | rc = cxl_allocate_afu_irqs(ctx, num_irqs); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1729 | if (unlikely(rc)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1730 | dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n", |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1731 | __func__, rc); |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1732 | level = UNDO_NOOP; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1733 | goto out; |
| 1734 | } |
| 1735 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1736 | rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1737 | "SISL_MSI_SYNC_ERROR"); |
| 1738 | if (unlikely(rc <= 0)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1739 | dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1740 | level = FREE_IRQ; |
| 1741 | goto out; |
| 1742 | } |
| 1743 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1744 | rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1745 | "SISL_MSI_RRQ_UPDATED"); |
| 1746 | if (unlikely(rc <= 0)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1747 | dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1748 | level = UNMAP_ONE; |
| 1749 | goto out; |
| 1750 | } |
| 1751 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1752 | /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ |
| 1753 | if (!is_primary_hwq) |
| 1754 | goto out; |
| 1755 | |
| 1756 | rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1757 | "SISL_MSI_ASYNC_ERROR"); |
| 1758 | if (unlikely(rc <= 0)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1759 | dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1760 | level = UNMAP_TWO; |
| 1761 | goto out; |
| 1762 | } |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1763 | out: |
| 1764 | return level; |
| 1765 | } |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1766 | |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1767 | /** |
| 1768 | * init_mc() - create and register as the master context |
| 1769 | * @cfg: Internal structure associated with the host. |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1770 | * index: HWQ Index of the master context. |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1771 | * |
| 1772 | * Return: 0 on success, -errno on failure |
| 1773 | */ |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1774 | static int init_mc(struct cxlflash_cfg *cfg, u32 index) |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1775 | { |
| 1776 | struct cxl_context *ctx; |
| 1777 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1778 | struct hwq *hwq = get_hwq(cfg->afu, index); |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1779 | int rc = 0; |
| 1780 | enum undo_level level; |
| 1781 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1782 | hwq->afu = cfg->afu; |
| 1783 | hwq->index = index; |
| 1784 | |
| 1785 | if (index == PRIMARY_HWQ) |
| 1786 | ctx = cxl_get_context(cfg->dev); |
| 1787 | else |
| 1788 | ctx = cxl_dev_context_init(cfg->dev); |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1789 | if (unlikely(!ctx)) { |
| 1790 | rc = -ENOMEM; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1791 | goto err1; |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1792 | } |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1793 | |
| 1794 | WARN_ON(hwq->ctx); |
| 1795 | hwq->ctx = ctx; |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1796 | |
| 1797 | /* Set it up as a master with the CXL */ |
| 1798 | cxl_set_master(ctx); |
| 1799 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1800 | /* Reset AFU when initializing primary context */ |
| 1801 | if (index == PRIMARY_HWQ) { |
| 1802 | rc = cxl_afu_reset(ctx); |
| 1803 | if (unlikely(rc)) { |
| 1804 | dev_err(dev, "%s: AFU reset failed rc=%d\n", |
| 1805 | __func__, rc); |
| 1806 | goto err1; |
| 1807 | } |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1808 | } |
| 1809 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1810 | level = init_intr(cfg, hwq); |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1811 | if (unlikely(level)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1812 | dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc); |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1813 | goto err2; |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 1814 | } |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1815 | |
| 1816 | /* This performs the equivalent of the CXL_IOCTL_START_WORK. |
| 1817 | * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process |
| 1818 | * element (pe) that is embedded in the context (ctx) |
| 1819 | */ |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1820 | rc = start_context(cfg, index); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1821 | if (unlikely(rc)) { |
| 1822 | dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); |
| 1823 | level = UNMAP_THREE; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1824 | goto err2; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1825 | } |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1826 | |
| 1827 | out: |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1828 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1829 | return rc; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1830 | err2: |
| 1831 | term_intr(cfg, level, index); |
| 1832 | if (index != PRIMARY_HWQ) |
| 1833 | cxl_release_context(ctx); |
| 1834 | err1: |
| 1835 | hwq->ctx = NULL; |
| 1836 | goto out; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1837 | } |
| 1838 | |
| 1839 | /** |
Matthew R. Ochs | 56518072 | 2017-04-12 14:14:28 -0500 | [diff] [blame] | 1840 | * get_num_afu_ports() - determines and configures the number of AFU ports |
| 1841 | * @cfg: Internal structure associated with the host. |
| 1842 | * |
| 1843 | * This routine determines the number of AFU ports by converting the global |
| 1844 | * port selection mask. The converted value is only valid following an AFU |
| 1845 | * reset (explicit or power-on). This routine must be invoked shortly after |
| 1846 | * mapping as other routines are dependent on the number of ports during the |
| 1847 | * initialization sequence. |
| 1848 | * |
| 1849 | * To support legacy AFUs that might not have reflected an initial global |
| 1850 | * port mask (value read is 0), default to the number of ports originally |
| 1851 | * supported by the cxlflash driver (2) before hardware with other port |
| 1852 | * offerings was introduced. |
| 1853 | */ |
| 1854 | static void get_num_afu_ports(struct cxlflash_cfg *cfg) |
| 1855 | { |
| 1856 | struct afu *afu = cfg->afu; |
| 1857 | struct device *dev = &cfg->dev->dev; |
| 1858 | u64 port_mask; |
| 1859 | int num_fc_ports = LEGACY_FC_PORTS; |
| 1860 | |
| 1861 | port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel); |
| 1862 | if (port_mask != 0ULL) |
| 1863 | num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS); |
| 1864 | |
| 1865 | dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n", |
| 1866 | __func__, port_mask, num_fc_ports); |
| 1867 | |
| 1868 | cfg->num_fc_ports = num_fc_ports; |
| 1869 | cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports); |
| 1870 | } |
| 1871 | |
| 1872 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1873 | * init_afu() - setup as master context and start AFU |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1874 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1875 | * |
| 1876 | * This routine is a higher level of control for configuring the |
| 1877 | * AFU on probe and reset paths. |
| 1878 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 1879 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1880 | */ |
| 1881 | static int init_afu(struct cxlflash_cfg *cfg) |
| 1882 | { |
| 1883 | u64 reg; |
| 1884 | int rc = 0; |
| 1885 | struct afu *afu = cfg->afu; |
| 1886 | struct device *dev = &cfg->dev->dev; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1887 | struct hwq *hwq; |
| 1888 | int i; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1889 | |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 1890 | cxl_perst_reloads_same_image(cfg->cxl_afu, true); |
| 1891 | |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 1892 | afu->num_hwqs = afu->desired_hwqs; |
| 1893 | for (i = 0; i < afu->num_hwqs; i++) { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1894 | rc = init_mc(cfg, i); |
| 1895 | if (rc) { |
| 1896 | dev_err(dev, "%s: init_mc failed rc=%d index=%d\n", |
| 1897 | __func__, rc, i); |
| 1898 | goto err1; |
| 1899 | } |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1900 | } |
| 1901 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1902 | /* Map the entire MMIO space of the AFU using the first context */ |
| 1903 | hwq = get_hwq(afu, PRIMARY_HWQ); |
| 1904 | afu->afu_map = cxl_psa_map(hwq->ctx); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1905 | if (!afu->afu_map) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1906 | dev_err(dev, "%s: cxl_psa_map failed\n", __func__); |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1907 | rc = -ENOMEM; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1908 | goto err1; |
| 1909 | } |
| 1910 | |
Matthew R. Ochs | e5ce067 | 2015-10-21 15:14:01 -0500 | [diff] [blame] | 1911 | /* No byte reverse on reading afu_version or string will be backwards */ |
| 1912 | reg = readq(&afu->afu_map->global.regs.afu_version); |
| 1913 | memcpy(afu->version, ®, sizeof(reg)); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1914 | afu->interface_version = |
| 1915 | readq_be(&afu->afu_map->global.regs.interface_version); |
Matthew R. Ochs | e5ce067 | 2015-10-21 15:14:01 -0500 | [diff] [blame] | 1916 | if ((afu->interface_version + 1) == 0) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1917 | dev_err(dev, "Back level AFU, please upgrade. AFU version %s " |
| 1918 | "interface version %016llx\n", afu->version, |
Matthew R. Ochs | e5ce067 | 2015-10-21 15:14:01 -0500 | [diff] [blame] | 1919 | afu->interface_version); |
| 1920 | rc = -EINVAL; |
Uma Krishnan | 0df5bef | 2017-01-11 19:20:03 -0600 | [diff] [blame] | 1921 | goto err1; |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1922 | } |
| 1923 | |
Matthew R. Ochs | 696d0b0 | 2017-01-11 19:19:33 -0600 | [diff] [blame] | 1924 | if (afu_is_sq_cmd_mode(afu)) { |
| 1925 | afu->send_cmd = send_cmd_sq; |
| 1926 | afu->context_reset = context_reset_sq; |
| 1927 | } else { |
| 1928 | afu->send_cmd = send_cmd_ioarrin; |
| 1929 | afu->context_reset = context_reset_ioarrin; |
| 1930 | } |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 1931 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1932 | dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__, |
| 1933 | afu->version, afu->interface_version); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1934 | |
Matthew R. Ochs | 56518072 | 2017-04-12 14:14:28 -0500 | [diff] [blame] | 1935 | get_num_afu_ports(cfg); |
| 1936 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1937 | rc = start_afu(cfg); |
| 1938 | if (rc) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1939 | dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc); |
Uma Krishnan | 0df5bef | 2017-01-11 19:20:03 -0600 | [diff] [blame] | 1940 | goto err1; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1941 | } |
| 1942 | |
| 1943 | afu_err_intr_init(cfg->afu); |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 1944 | for (i = 0; i < afu->num_hwqs; i++) { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1945 | hwq = get_hwq(afu, i); |
| 1946 | |
| 1947 | spin_lock_init(&hwq->rrin_slock); |
| 1948 | hwq->room = readq_be(&hwq->host_map->cmd_room); |
| 1949 | } |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1950 | |
Matthew R. Ochs | 2cb7926 | 2015-08-13 21:47:53 -0500 | [diff] [blame] | 1951 | /* Restore the LUN mappings */ |
| 1952 | cxlflash_restore_luntable(cfg); |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1953 | out: |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1954 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1955 | return rc; |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1956 | |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1957 | err1: |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 1958 | for (i = afu->num_hwqs - 1; i >= 0; i--) { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1959 | term_intr(cfg, UNMAP_THREE, i); |
| 1960 | term_mc(cfg, i); |
| 1961 | } |
Matthew R. Ochs | ee3491b | 2015-10-21 15:16:00 -0500 | [diff] [blame] | 1962 | goto out; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1963 | } |
| 1964 | |
| 1965 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1966 | * cxlflash_afu_sync() - builds and sends an AFU sync command |
| 1967 | * @afu: AFU associated with the host. |
| 1968 | * @ctx_hndl_u: Identifies context requesting sync. |
| 1969 | * @res_hndl_u: Identifies resource requesting sync. |
| 1970 | * @mode: Type of sync to issue (lightweight, heavyweight, global). |
| 1971 | * |
| 1972 | * The AFU can only take 1 sync command at a time. This routine enforces this |
Matthew R. Ochs | f15fbf8 | 2015-10-21 15:15:06 -0500 | [diff] [blame] | 1973 | * limitation by using a mutex to provide exclusive access to the AFU during |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1974 | * the sync. This design point requires calling threads to not be on interrupt |
| 1975 | * context due to the possibility of sleeping during concurrent sync operations. |
| 1976 | * |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 1977 | * AFU sync operations are only necessary and allowed when the device is |
| 1978 | * operating normally. When not operating normally, sync requests can occur as |
| 1979 | * part of cleaning up resources associated with an adapter prior to removal. |
| 1980 | * In this scenario, these requests are simply ignored (safe due to the AFU |
| 1981 | * going away). |
| 1982 | * |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1983 | * Return: |
| 1984 | * 0 on success |
| 1985 | * -1 on failure |
| 1986 | */ |
| 1987 | int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, |
| 1988 | res_hndl_t res_hndl_u, u8 mode) |
| 1989 | { |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 1990 | struct cxlflash_cfg *cfg = afu->parent; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 1991 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1992 | struct afu_cmd *cmd = NULL; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 1993 | struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); |
Matthew R. Ochs | 350bb47 | 2016-11-28 18:42:11 -0600 | [diff] [blame] | 1994 | char *buf = NULL; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1995 | int rc = 0; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 1996 | static DEFINE_MUTEX(sync_active); |
| 1997 | |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 1998 | if (cfg->state != STATE_NORMAL) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 1999 | dev_dbg(dev, "%s: Sync not required state=%u\n", |
| 2000 | __func__, cfg->state); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2001 | return 0; |
| 2002 | } |
| 2003 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2004 | mutex_lock(&sync_active); |
Matthew R. Ochs | de01283 | 2016-11-28 18:42:33 -0600 | [diff] [blame] | 2005 | atomic_inc(&afu->cmds_active); |
Matthew R. Ochs | 350bb47 | 2016-11-28 18:42:11 -0600 | [diff] [blame] | 2006 | buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); |
| 2007 | if (unlikely(!buf)) { |
| 2008 | dev_err(dev, "%s: no memory for command\n", __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2009 | rc = -1; |
| 2010 | goto out; |
| 2011 | } |
| 2012 | |
Matthew R. Ochs | 350bb47 | 2016-11-28 18:42:11 -0600 | [diff] [blame] | 2013 | cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); |
| 2014 | init_completion(&cmd->cevent); |
Matthew R. Ochs | 350bb47 | 2016-11-28 18:42:11 -0600 | [diff] [blame] | 2015 | cmd->parent = afu; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 2016 | cmd->hwq_index = hwq->index; |
Matthew R. Ochs | 350bb47 | 2016-11-28 18:42:11 -0600 | [diff] [blame] | 2017 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2018 | dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2019 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2020 | cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 2021 | cmd->rcb.ctx_id = hwq->ctx_hndl; |
Matthew R. Ochs | 350bb47 | 2016-11-28 18:42:11 -0600 | [diff] [blame] | 2022 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2023 | cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; |
| 2024 | |
| 2025 | cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ |
| 2026 | cmd->rcb.cdb[1] = mode; |
| 2027 | |
| 2028 | /* The cdb is aligned, no unaligned accessors required */ |
Matthew R. Ochs | 1786f4a | 2015-10-21 15:14:48 -0500 | [diff] [blame] | 2029 | *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u); |
| 2030 | *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2031 | |
Matthew R. Ochs | 48b4be3 | 2016-11-28 18:43:09 -0600 | [diff] [blame] | 2032 | rc = afu->send_cmd(afu, cmd); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2033 | if (unlikely(rc)) |
| 2034 | goto out; |
| 2035 | |
Matthew R. Ochs | 9ba848a | 2016-11-28 18:42:42 -0600 | [diff] [blame] | 2036 | rc = wait_resp(afu, cmd); |
| 2037 | if (unlikely(rc)) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2038 | rc = -1; |
| 2039 | out: |
Matthew R. Ochs | de01283 | 2016-11-28 18:42:33 -0600 | [diff] [blame] | 2040 | atomic_dec(&afu->cmds_active); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2041 | mutex_unlock(&sync_active); |
Matthew R. Ochs | 350bb47 | 2016-11-28 18:42:11 -0600 | [diff] [blame] | 2042 | kfree(buf); |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2043 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2044 | return rc; |
| 2045 | } |
| 2046 | |
| 2047 | /** |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2048 | * afu_reset() - resets the AFU |
| 2049 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2050 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 2051 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2052 | */ |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2053 | static int afu_reset(struct cxlflash_cfg *cfg) |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2054 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2055 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2056 | int rc = 0; |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2057 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2058 | /* Stop the context before the reset. Since the context is |
| 2059 | * no longer available restart it after the reset is complete |
| 2060 | */ |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2061 | term_afu(cfg); |
| 2062 | |
| 2063 | rc = init_afu(cfg); |
| 2064 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2065 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2066 | return rc; |
| 2067 | } |
| 2068 | |
| 2069 | /** |
Manoj N. Kumar | f411396 | 2016-06-15 18:49:20 -0500 | [diff] [blame] | 2070 | * drain_ioctls() - wait until all currently executing ioctls have completed |
| 2071 | * @cfg: Internal structure associated with the host. |
| 2072 | * |
| 2073 | * Obtain write access to read/write semaphore that wraps ioctl |
| 2074 | * handling to 'drain' ioctls currently executing. |
| 2075 | */ |
| 2076 | static void drain_ioctls(struct cxlflash_cfg *cfg) |
| 2077 | { |
| 2078 | down_write(&cfg->ioctl_rwsem); |
| 2079 | up_write(&cfg->ioctl_rwsem); |
| 2080 | } |
| 2081 | |
| 2082 | /** |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2083 | * cxlflash_eh_device_reset_handler() - reset a single LUN |
| 2084 | * @scp: SCSI command to send. |
| 2085 | * |
| 2086 | * Return: |
| 2087 | * SUCCESS as defined in scsi/scsi.h |
| 2088 | * FAILED as defined in scsi/scsi.h |
| 2089 | */ |
| 2090 | static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) |
| 2091 | { |
| 2092 | int rc = SUCCESS; |
| 2093 | struct Scsi_Host *host = scp->device->host; |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2094 | struct cxlflash_cfg *cfg = shost_priv(host); |
| 2095 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2096 | struct afu *afu = cfg->afu; |
| 2097 | int rcr = 0; |
| 2098 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2099 | dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " |
| 2100 | "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, |
| 2101 | scp->device->channel, scp->device->id, scp->device->lun, |
| 2102 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), |
| 2103 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), |
| 2104 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), |
| 2105 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2106 | |
Matthew R. Ochs | ed486da | 2015-10-21 15:14:24 -0500 | [diff] [blame] | 2107 | retry: |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2108 | switch (cfg->state) { |
| 2109 | case STATE_NORMAL: |
| 2110 | rcr = send_tmf(afu, scp, TMF_LUN_RESET); |
| 2111 | if (unlikely(rcr)) |
| 2112 | rc = FAILED; |
| 2113 | break; |
| 2114 | case STATE_RESET: |
| 2115 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); |
Matthew R. Ochs | ed486da | 2015-10-21 15:14:24 -0500 | [diff] [blame] | 2116 | goto retry; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2117 | default: |
| 2118 | rc = FAILED; |
| 2119 | break; |
| 2120 | } |
| 2121 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2122 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2123 | return rc; |
| 2124 | } |
| 2125 | |
| 2126 | /** |
| 2127 | * cxlflash_eh_host_reset_handler() - reset the host adapter |
| 2128 | * @scp: SCSI command from stack identifying host. |
| 2129 | * |
Matthew R. Ochs | 1d3324c | 2016-09-02 15:39:30 -0500 | [diff] [blame] | 2130 | * Following a reset, the state is evaluated again in case an EEH occurred |
| 2131 | * during the reset. In such a scenario, the host reset will either yield |
| 2132 | * until the EEH recovery is complete or return success or failure based |
| 2133 | * upon the current device state. |
| 2134 | * |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2135 | * Return: |
| 2136 | * SUCCESS as defined in scsi/scsi.h |
| 2137 | * FAILED as defined in scsi/scsi.h |
| 2138 | */ |
| 2139 | static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) |
| 2140 | { |
| 2141 | int rc = SUCCESS; |
| 2142 | int rcr = 0; |
| 2143 | struct Scsi_Host *host = scp->device->host; |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2144 | struct cxlflash_cfg *cfg = shost_priv(host); |
| 2145 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2146 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2147 | dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " |
| 2148 | "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, |
| 2149 | scp->device->channel, scp->device->id, scp->device->lun, |
| 2150 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), |
| 2151 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), |
| 2152 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), |
| 2153 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2154 | |
| 2155 | switch (cfg->state) { |
| 2156 | case STATE_NORMAL: |
| 2157 | cfg->state = STATE_RESET; |
Manoj N. Kumar | f411396 | 2016-06-15 18:49:20 -0500 | [diff] [blame] | 2158 | drain_ioctls(cfg); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2159 | cxlflash_mark_contexts_error(cfg); |
| 2160 | rcr = afu_reset(cfg); |
| 2161 | if (rcr) { |
| 2162 | rc = FAILED; |
| 2163 | cfg->state = STATE_FAILTERM; |
| 2164 | } else |
| 2165 | cfg->state = STATE_NORMAL; |
| 2166 | wake_up_all(&cfg->reset_waitq); |
Matthew R. Ochs | 1d3324c | 2016-09-02 15:39:30 -0500 | [diff] [blame] | 2167 | ssleep(1); |
| 2168 | /* fall through */ |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2169 | case STATE_RESET: |
| 2170 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); |
| 2171 | if (cfg->state == STATE_NORMAL) |
| 2172 | break; |
| 2173 | /* fall through */ |
| 2174 | default: |
| 2175 | rc = FAILED; |
| 2176 | break; |
| 2177 | } |
| 2178 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2179 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2180 | return rc; |
| 2181 | } |
| 2182 | |
| 2183 | /** |
| 2184 | * cxlflash_change_queue_depth() - change the queue depth for the device |
| 2185 | * @sdev: SCSI device destined for queue depth change. |
| 2186 | * @qdepth: Requested queue depth value to set. |
| 2187 | * |
| 2188 | * The requested queue depth is capped to the maximum supported value. |
| 2189 | * |
| 2190 | * Return: The actual queue depth set. |
| 2191 | */ |
| 2192 | static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) |
| 2193 | { |
| 2194 | |
| 2195 | if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) |
| 2196 | qdepth = CXLFLASH_MAX_CMDS_PER_LUN; |
| 2197 | |
| 2198 | scsi_change_queue_depth(sdev, qdepth); |
| 2199 | return sdev->queue_depth; |
| 2200 | } |
| 2201 | |
| 2202 | /** |
| 2203 | * cxlflash_show_port_status() - queries and presents the current port status |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2204 | * @port: Desired port for status reporting. |
Matthew R. Ochs | 3b225cd | 2017-04-12 14:13:34 -0500 | [diff] [blame] | 2205 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2206 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 2207 | * |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 2208 | * Return: The size of the ASCII string returned in @buf or -EINVAL. |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2209 | */ |
Matthew R. Ochs | 3b225cd | 2017-04-12 14:13:34 -0500 | [diff] [blame] | 2210 | static ssize_t cxlflash_show_port_status(u32 port, |
| 2211 | struct cxlflash_cfg *cfg, |
| 2212 | char *buf) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2213 | { |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 2214 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2215 | char *disp_status; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2216 | u64 status; |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 2217 | __be64 __iomem *fc_port_regs; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2218 | |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 2219 | WARN_ON(port >= MAX_FC_PORTS); |
| 2220 | |
| 2221 | if (port >= cfg->num_fc_ports) { |
| 2222 | dev_info(dev, "%s: Port %d not supported on this card.\n", |
| 2223 | __func__, port); |
| 2224 | return -EINVAL; |
| 2225 | } |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2226 | |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 2227 | fc_port_regs = get_fc_port_regs(cfg, port); |
| 2228 | status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]); |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2229 | status &= FC_MTIP_STATUS_MASK; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2230 | |
| 2231 | if (status == FC_MTIP_STATUS_ONLINE) |
| 2232 | disp_status = "online"; |
| 2233 | else if (status == FC_MTIP_STATUS_OFFLINE) |
| 2234 | disp_status = "offline"; |
| 2235 | else |
| 2236 | disp_status = "unknown"; |
| 2237 | |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2238 | return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2239 | } |
| 2240 | |
| 2241 | /** |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2242 | * port0_show() - queries and presents the current status of port 0 |
| 2243 | * @dev: Generic device associated with the host owning the port. |
| 2244 | * @attr: Device attribute representing the port. |
| 2245 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2246 | * |
| 2247 | * Return: The size of the ASCII string returned in @buf. |
| 2248 | */ |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2249 | static ssize_t port0_show(struct device *dev, |
| 2250 | struct device_attribute *attr, |
| 2251 | char *buf) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2252 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2253 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2254 | |
Matthew R. Ochs | 3b225cd | 2017-04-12 14:13:34 -0500 | [diff] [blame] | 2255 | return cxlflash_show_port_status(0, cfg, buf); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2256 | } |
| 2257 | |
| 2258 | /** |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2259 | * port1_show() - queries and presents the current status of port 1 |
| 2260 | * @dev: Generic device associated with the host owning the port. |
| 2261 | * @attr: Device attribute representing the port. |
| 2262 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 2263 | * |
| 2264 | * Return: The size of the ASCII string returned in @buf. |
| 2265 | */ |
| 2266 | static ssize_t port1_show(struct device *dev, |
| 2267 | struct device_attribute *attr, |
| 2268 | char *buf) |
| 2269 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2270 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2271 | |
Matthew R. Ochs | 3b225cd | 2017-04-12 14:13:34 -0500 | [diff] [blame] | 2272 | return cxlflash_show_port_status(1, cfg, buf); |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2273 | } |
| 2274 | |
| 2275 | /** |
Matthew R. Ochs | 1cd7fab | 2017-04-12 14:14:41 -0500 | [diff] [blame] | 2276 | * port2_show() - queries and presents the current status of port 2 |
| 2277 | * @dev: Generic device associated with the host owning the port. |
| 2278 | * @attr: Device attribute representing the port. |
| 2279 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 2280 | * |
| 2281 | * Return: The size of the ASCII string returned in @buf. |
| 2282 | */ |
| 2283 | static ssize_t port2_show(struct device *dev, |
| 2284 | struct device_attribute *attr, |
| 2285 | char *buf) |
| 2286 | { |
| 2287 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
| 2288 | |
| 2289 | return cxlflash_show_port_status(2, cfg, buf); |
| 2290 | } |
| 2291 | |
| 2292 | /** |
| 2293 | * port3_show() - queries and presents the current status of port 3 |
| 2294 | * @dev: Generic device associated with the host owning the port. |
| 2295 | * @attr: Device attribute representing the port. |
| 2296 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 2297 | * |
| 2298 | * Return: The size of the ASCII string returned in @buf. |
| 2299 | */ |
| 2300 | static ssize_t port3_show(struct device *dev, |
| 2301 | struct device_attribute *attr, |
| 2302 | char *buf) |
| 2303 | { |
| 2304 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
| 2305 | |
| 2306 | return cxlflash_show_port_status(3, cfg, buf); |
| 2307 | } |
| 2308 | |
| 2309 | /** |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2310 | * lun_mode_show() - presents the current LUN mode of the host |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2311 | * @dev: Generic device associated with the host. |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2312 | * @attr: Device attribute representing the LUN mode. |
| 2313 | * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. |
| 2314 | * |
| 2315 | * Return: The size of the ASCII string returned in @buf. |
| 2316 | */ |
| 2317 | static ssize_t lun_mode_show(struct device *dev, |
| 2318 | struct device_attribute *attr, char *buf) |
| 2319 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2320 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2321 | struct afu *afu = cfg->afu; |
| 2322 | |
| 2323 | return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); |
| 2324 | } |
| 2325 | |
| 2326 | /** |
| 2327 | * lun_mode_store() - sets the LUN mode of the host |
| 2328 | * @dev: Generic device associated with the host. |
| 2329 | * @attr: Device attribute representing the LUN mode. |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2330 | * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. |
| 2331 | * @count: Length of data resizing in @buf. |
| 2332 | * |
| 2333 | * The CXL Flash AFU supports a dummy LUN mode where the external |
| 2334 | * links and storage are not required. Space on the FPGA is used |
| 2335 | * to create 1 or 2 small LUNs which are presented to the system |
| 2336 | * as if they were a normal storage device. This feature is useful |
| 2337 | * during development and also provides manufacturing with a way |
| 2338 | * to test the AFU without an actual device. |
| 2339 | * |
| 2340 | * 0 = external LUN[s] (default) |
| 2341 | * 1 = internal LUN (1 x 64K, 512B blocks, id 0) |
| 2342 | * 2 = internal LUN (1 x 64K, 4K blocks, id 0) |
| 2343 | * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) |
| 2344 | * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) |
| 2345 | * |
| 2346 | * Return: The size of the ASCII string returned in @buf. |
| 2347 | */ |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2348 | static ssize_t lun_mode_store(struct device *dev, |
| 2349 | struct device_attribute *attr, |
| 2350 | const char *buf, size_t count) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2351 | { |
| 2352 | struct Scsi_Host *shost = class_to_shost(dev); |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2353 | struct cxlflash_cfg *cfg = shost_priv(shost); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2354 | struct afu *afu = cfg->afu; |
| 2355 | int rc; |
| 2356 | u32 lun_mode; |
| 2357 | |
| 2358 | rc = kstrtouint(buf, 10, &lun_mode); |
| 2359 | if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { |
| 2360 | afu->internal_lun = lun_mode; |
Manoj N. Kumar | 603ecce | 2016-03-04 15:55:19 -0600 | [diff] [blame] | 2361 | |
| 2362 | /* |
| 2363 | * When configured for internal LUN, there is only one channel, |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 2364 | * channel number 0, else there will be one less than the number |
| 2365 | * of fc ports for this card. |
Manoj N. Kumar | 603ecce | 2016-03-04 15:55:19 -0600 | [diff] [blame] | 2366 | */ |
| 2367 | if (afu->internal_lun) |
| 2368 | shost->max_channel = 0; |
| 2369 | else |
Matthew R. Ochs | 8fa4f17 | 2017-04-12 14:14:05 -0500 | [diff] [blame] | 2370 | shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports); |
Manoj N. Kumar | 603ecce | 2016-03-04 15:55:19 -0600 | [diff] [blame] | 2371 | |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2372 | afu_reset(cfg); |
| 2373 | scsi_scan_host(cfg->host); |
| 2374 | } |
| 2375 | |
| 2376 | return count; |
| 2377 | } |
| 2378 | |
| 2379 | /** |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2380 | * ioctl_version_show() - presents the current ioctl version of the host |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2381 | * @dev: Generic device associated with the host. |
| 2382 | * @attr: Device attribute representing the ioctl version. |
| 2383 | * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. |
| 2384 | * |
| 2385 | * Return: The size of the ASCII string returned in @buf. |
| 2386 | */ |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2387 | static ssize_t ioctl_version_show(struct device *dev, |
| 2388 | struct device_attribute *attr, char *buf) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2389 | { |
| 2390 | return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0); |
| 2391 | } |
| 2392 | |
| 2393 | /** |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2394 | * cxlflash_show_port_lun_table() - queries and presents the port LUN table |
| 2395 | * @port: Desired port for status reporting. |
Matthew R. Ochs | 3b225cd | 2017-04-12 14:13:34 -0500 | [diff] [blame] | 2396 | * @cfg: Internal structure associated with the host. |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2397 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 2398 | * |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 2399 | * Return: The size of the ASCII string returned in @buf or -EINVAL. |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2400 | */ |
| 2401 | static ssize_t cxlflash_show_port_lun_table(u32 port, |
Matthew R. Ochs | 3b225cd | 2017-04-12 14:13:34 -0500 | [diff] [blame] | 2402 | struct cxlflash_cfg *cfg, |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2403 | char *buf) |
| 2404 | { |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 2405 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 2406 | __be64 __iomem *fc_port_luns; |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2407 | int i; |
| 2408 | ssize_t bytes = 0; |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2409 | |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 2410 | WARN_ON(port >= MAX_FC_PORTS); |
| 2411 | |
| 2412 | if (port >= cfg->num_fc_ports) { |
| 2413 | dev_info(dev, "%s: Port %d not supported on this card.\n", |
| 2414 | __func__, port); |
| 2415 | return -EINVAL; |
| 2416 | } |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2417 | |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 2418 | fc_port_luns = get_fc_port_luns(cfg, port); |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2419 | |
| 2420 | for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) |
| 2421 | bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 2422 | "%03d: %016llx\n", |
| 2423 | i, readq_be(&fc_port_luns[i])); |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2424 | return bytes; |
| 2425 | } |
| 2426 | |
| 2427 | /** |
| 2428 | * port0_lun_table_show() - presents the current LUN table of port 0 |
| 2429 | * @dev: Generic device associated with the host owning the port. |
| 2430 | * @attr: Device attribute representing the port. |
| 2431 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 2432 | * |
| 2433 | * Return: The size of the ASCII string returned in @buf. |
| 2434 | */ |
| 2435 | static ssize_t port0_lun_table_show(struct device *dev, |
| 2436 | struct device_attribute *attr, |
| 2437 | char *buf) |
| 2438 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2439 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2440 | |
Matthew R. Ochs | 3b225cd | 2017-04-12 14:13:34 -0500 | [diff] [blame] | 2441 | return cxlflash_show_port_lun_table(0, cfg, buf); |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2442 | } |
| 2443 | |
| 2444 | /** |
| 2445 | * port1_lun_table_show() - presents the current LUN table of port 1 |
| 2446 | * @dev: Generic device associated with the host owning the port. |
| 2447 | * @attr: Device attribute representing the port. |
| 2448 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 2449 | * |
| 2450 | * Return: The size of the ASCII string returned in @buf. |
| 2451 | */ |
| 2452 | static ssize_t port1_lun_table_show(struct device *dev, |
| 2453 | struct device_attribute *attr, |
| 2454 | char *buf) |
| 2455 | { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2456 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2457 | |
Matthew R. Ochs | 3b225cd | 2017-04-12 14:13:34 -0500 | [diff] [blame] | 2458 | return cxlflash_show_port_lun_table(1, cfg, buf); |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2459 | } |
| 2460 | |
| 2461 | /** |
Matthew R. Ochs | 1cd7fab | 2017-04-12 14:14:41 -0500 | [diff] [blame] | 2462 | * port2_lun_table_show() - presents the current LUN table of port 2 |
| 2463 | * @dev: Generic device associated with the host owning the port. |
| 2464 | * @attr: Device attribute representing the port. |
| 2465 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 2466 | * |
| 2467 | * Return: The size of the ASCII string returned in @buf. |
| 2468 | */ |
| 2469 | static ssize_t port2_lun_table_show(struct device *dev, |
| 2470 | struct device_attribute *attr, |
| 2471 | char *buf) |
| 2472 | { |
| 2473 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
| 2474 | |
| 2475 | return cxlflash_show_port_lun_table(2, cfg, buf); |
| 2476 | } |
| 2477 | |
| 2478 | /** |
| 2479 | * port3_lun_table_show() - presents the current LUN table of port 3 |
| 2480 | * @dev: Generic device associated with the host owning the port. |
| 2481 | * @attr: Device attribute representing the port. |
| 2482 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
| 2483 | * |
| 2484 | * Return: The size of the ASCII string returned in @buf. |
| 2485 | */ |
| 2486 | static ssize_t port3_lun_table_show(struct device *dev, |
| 2487 | struct device_attribute *attr, |
| 2488 | char *buf) |
| 2489 | { |
| 2490 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
| 2491 | |
| 2492 | return cxlflash_show_port_lun_table(3, cfg, buf); |
| 2493 | } |
| 2494 | |
| 2495 | /** |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 2496 | * irqpoll_weight_show() - presents the current IRQ poll weight for the host |
| 2497 | * @dev: Generic device associated with the host. |
| 2498 | * @attr: Device attribute representing the IRQ poll weight. |
| 2499 | * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll |
| 2500 | * weight in ASCII. |
| 2501 | * |
| 2502 | * An IRQ poll weight of 0 indicates polling is disabled. |
| 2503 | * |
| 2504 | * Return: The size of the ASCII string returned in @buf. |
| 2505 | */ |
| 2506 | static ssize_t irqpoll_weight_show(struct device *dev, |
| 2507 | struct device_attribute *attr, char *buf) |
| 2508 | { |
| 2509 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
| 2510 | struct afu *afu = cfg->afu; |
| 2511 | |
| 2512 | return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight); |
| 2513 | } |
| 2514 | |
| 2515 | /** |
| 2516 | * irqpoll_weight_store() - sets the current IRQ poll weight for the host |
| 2517 | * @dev: Generic device associated with the host. |
| 2518 | * @attr: Device attribute representing the IRQ poll weight. |
| 2519 | * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll |
| 2520 | * weight in ASCII. |
| 2521 | * @count: Length of data resizing in @buf. |
| 2522 | * |
| 2523 | * An IRQ poll weight of 0 indicates polling is disabled. |
| 2524 | * |
| 2525 | * Return: The size of the ASCII string returned in @buf. |
| 2526 | */ |
| 2527 | static ssize_t irqpoll_weight_store(struct device *dev, |
| 2528 | struct device_attribute *attr, |
| 2529 | const char *buf, size_t count) |
| 2530 | { |
| 2531 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
| 2532 | struct device *cfgdev = &cfg->dev->dev; |
| 2533 | struct afu *afu = cfg->afu; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 2534 | struct hwq *hwq; |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 2535 | u32 weight; |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 2536 | int rc, i; |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 2537 | |
| 2538 | rc = kstrtouint(buf, 10, &weight); |
| 2539 | if (rc) |
| 2540 | return -EINVAL; |
| 2541 | |
| 2542 | if (weight > 256) { |
| 2543 | dev_info(cfgdev, |
| 2544 | "Invalid IRQ poll weight. It must be 256 or less.\n"); |
| 2545 | return -EINVAL; |
| 2546 | } |
| 2547 | |
| 2548 | if (weight == afu->irqpoll_weight) { |
| 2549 | dev_info(cfgdev, |
| 2550 | "Current IRQ poll weight has the same weight.\n"); |
| 2551 | return -EINVAL; |
| 2552 | } |
| 2553 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 2554 | if (afu_is_irqpoll_enabled(afu)) { |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 2555 | for (i = 0; i < afu->num_hwqs; i++) { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 2556 | hwq = get_hwq(afu, i); |
| 2557 | |
| 2558 | irq_poll_disable(&hwq->irqpoll); |
| 2559 | } |
| 2560 | } |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 2561 | |
| 2562 | afu->irqpoll_weight = weight; |
| 2563 | |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 2564 | if (weight > 0) { |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 2565 | for (i = 0; i < afu->num_hwqs; i++) { |
Uma Krishnan | bfc0bab | 2017-04-12 14:15:42 -0500 | [diff] [blame] | 2566 | hwq = get_hwq(afu, i); |
| 2567 | |
| 2568 | irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll); |
| 2569 | } |
| 2570 | } |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 2571 | |
| 2572 | return count; |
| 2573 | } |
| 2574 | |
| 2575 | /** |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 2576 | * num_hwqs_show() - presents the number of hardware queues for the host |
| 2577 | * @dev: Generic device associated with the host. |
| 2578 | * @attr: Device attribute representing the number of hardware queues. |
| 2579 | * @buf: Buffer of length PAGE_SIZE to report back the number of hardware |
| 2580 | * queues in ASCII. |
| 2581 | * |
| 2582 | * Return: The size of the ASCII string returned in @buf. |
| 2583 | */ |
| 2584 | static ssize_t num_hwqs_show(struct device *dev, |
| 2585 | struct device_attribute *attr, char *buf) |
| 2586 | { |
| 2587 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
| 2588 | struct afu *afu = cfg->afu; |
| 2589 | |
| 2590 | return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs); |
| 2591 | } |
| 2592 | |
| 2593 | /** |
| 2594 | * num_hwqs_store() - sets the number of hardware queues for the host |
| 2595 | * @dev: Generic device associated with the host. |
| 2596 | * @attr: Device attribute representing the number of hardware queues. |
| 2597 | * @buf: Buffer of length PAGE_SIZE containing the number of hardware |
| 2598 | * queues in ASCII. |
| 2599 | * @count: Length of data resizing in @buf. |
| 2600 | * |
| 2601 | * n > 0: num_hwqs = n |
| 2602 | * n = 0: num_hwqs = num_online_cpus() |
| 2603 | * n < 0: num_online_cpus() / abs(n) |
| 2604 | * |
| 2605 | * Return: The size of the ASCII string returned in @buf. |
| 2606 | */ |
| 2607 | static ssize_t num_hwqs_store(struct device *dev, |
| 2608 | struct device_attribute *attr, |
| 2609 | const char *buf, size_t count) |
| 2610 | { |
| 2611 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
| 2612 | struct afu *afu = cfg->afu; |
| 2613 | int rc; |
| 2614 | int nhwqs, num_hwqs; |
| 2615 | |
| 2616 | rc = kstrtoint(buf, 10, &nhwqs); |
| 2617 | if (rc) |
| 2618 | return -EINVAL; |
| 2619 | |
| 2620 | if (nhwqs >= 1) |
| 2621 | num_hwqs = nhwqs; |
| 2622 | else if (nhwqs == 0) |
| 2623 | num_hwqs = num_online_cpus(); |
| 2624 | else |
| 2625 | num_hwqs = num_online_cpus() / abs(nhwqs); |
| 2626 | |
| 2627 | afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS); |
| 2628 | WARN_ON_ONCE(afu->desired_hwqs == 0); |
| 2629 | |
| 2630 | retry: |
| 2631 | switch (cfg->state) { |
| 2632 | case STATE_NORMAL: |
| 2633 | cfg->state = STATE_RESET; |
| 2634 | drain_ioctls(cfg); |
| 2635 | cxlflash_mark_contexts_error(cfg); |
| 2636 | rc = afu_reset(cfg); |
| 2637 | if (rc) |
| 2638 | cfg->state = STATE_FAILTERM; |
| 2639 | else |
| 2640 | cfg->state = STATE_NORMAL; |
| 2641 | wake_up_all(&cfg->reset_waitq); |
| 2642 | break; |
| 2643 | case STATE_RESET: |
| 2644 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); |
| 2645 | if (cfg->state == STATE_NORMAL) |
| 2646 | goto retry; |
| 2647 | default: |
| 2648 | /* Ideally should not happen */ |
| 2649 | dev_err(dev, "%s: Device is not ready, state=%d\n", |
| 2650 | __func__, cfg->state); |
| 2651 | break; |
| 2652 | } |
| 2653 | |
| 2654 | return count; |
| 2655 | } |
| 2656 | |
| 2657 | /** |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2658 | * mode_show() - presents the current mode of the device |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2659 | * @dev: Generic device associated with the device. |
| 2660 | * @attr: Device attribute representing the device mode. |
| 2661 | * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. |
| 2662 | * |
| 2663 | * Return: The size of the ASCII string returned in @buf. |
| 2664 | */ |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2665 | static ssize_t mode_show(struct device *dev, |
| 2666 | struct device_attribute *attr, char *buf) |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2667 | { |
| 2668 | struct scsi_device *sdev = to_scsi_device(dev); |
| 2669 | |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2670 | return scnprintf(buf, PAGE_SIZE, "%s\n", |
| 2671 | sdev->hostdata ? "superpipe" : "legacy"); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2672 | } |
| 2673 | |
| 2674 | /* |
| 2675 | * Host attributes |
| 2676 | */ |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2677 | static DEVICE_ATTR_RO(port0); |
| 2678 | static DEVICE_ATTR_RO(port1); |
Matthew R. Ochs | 1cd7fab | 2017-04-12 14:14:41 -0500 | [diff] [blame] | 2679 | static DEVICE_ATTR_RO(port2); |
| 2680 | static DEVICE_ATTR_RO(port3); |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2681 | static DEVICE_ATTR_RW(lun_mode); |
| 2682 | static DEVICE_ATTR_RO(ioctl_version); |
| 2683 | static DEVICE_ATTR_RO(port0_lun_table); |
| 2684 | static DEVICE_ATTR_RO(port1_lun_table); |
Matthew R. Ochs | 1cd7fab | 2017-04-12 14:14:41 -0500 | [diff] [blame] | 2685 | static DEVICE_ATTR_RO(port2_lun_table); |
| 2686 | static DEVICE_ATTR_RO(port3_lun_table); |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 2687 | static DEVICE_ATTR_RW(irqpoll_weight); |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 2688 | static DEVICE_ATTR_RW(num_hwqs); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2689 | |
| 2690 | static struct device_attribute *cxlflash_host_attrs[] = { |
| 2691 | &dev_attr_port0, |
| 2692 | &dev_attr_port1, |
Matthew R. Ochs | 1cd7fab | 2017-04-12 14:14:41 -0500 | [diff] [blame] | 2693 | &dev_attr_port2, |
| 2694 | &dev_attr_port3, |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2695 | &dev_attr_lun_mode, |
| 2696 | &dev_attr_ioctl_version, |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2697 | &dev_attr_port0_lun_table, |
| 2698 | &dev_attr_port1_lun_table, |
Matthew R. Ochs | 1cd7fab | 2017-04-12 14:14:41 -0500 | [diff] [blame] | 2699 | &dev_attr_port2_lun_table, |
| 2700 | &dev_attr_port3_lun_table, |
Matthew R. Ochs | cba06e6 | 2017-04-12 14:13:20 -0500 | [diff] [blame] | 2701 | &dev_attr_irqpoll_weight, |
Matthew R. Ochs | 3065267 | 2017-04-12 14:15:53 -0500 | [diff] [blame^] | 2702 | &dev_attr_num_hwqs, |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2703 | NULL |
| 2704 | }; |
| 2705 | |
| 2706 | /* |
| 2707 | * Device attributes |
| 2708 | */ |
Matthew R. Ochs | e0f01a2 | 2015-10-21 15:12:39 -0500 | [diff] [blame] | 2709 | static DEVICE_ATTR_RO(mode); |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2710 | |
| 2711 | static struct device_attribute *cxlflash_dev_attrs[] = { |
| 2712 | &dev_attr_mode, |
| 2713 | NULL |
| 2714 | }; |
| 2715 | |
| 2716 | /* |
| 2717 | * Host template |
| 2718 | */ |
| 2719 | static struct scsi_host_template driver_template = { |
| 2720 | .module = THIS_MODULE, |
| 2721 | .name = CXLFLASH_ADAPTER_NAME, |
| 2722 | .info = cxlflash_driver_info, |
| 2723 | .ioctl = cxlflash_ioctl, |
| 2724 | .proc_name = CXLFLASH_NAME, |
| 2725 | .queuecommand = cxlflash_queuecommand, |
| 2726 | .eh_device_reset_handler = cxlflash_eh_device_reset_handler, |
| 2727 | .eh_host_reset_handler = cxlflash_eh_host_reset_handler, |
| 2728 | .change_queue_depth = cxlflash_change_queue_depth, |
Manoj N. Kumar | 8343083 | 2016-03-04 15:55:20 -0600 | [diff] [blame] | 2729 | .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2730 | .can_queue = CXLFLASH_MAX_CMDS, |
Matthew R. Ochs | 5fbb96c | 2016-11-28 18:42:19 -0600 | [diff] [blame] | 2731 | .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1, |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2732 | .this_id = -1, |
Uma Krishnan | 68ab2d7 | 2016-11-28 18:41:06 -0600 | [diff] [blame] | 2733 | .sg_tablesize = 1, /* No scatter gather support */ |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2734 | .max_sectors = CXLFLASH_MAX_SECTORS, |
| 2735 | .use_clustering = ENABLE_CLUSTERING, |
| 2736 | .shost_attrs = cxlflash_host_attrs, |
| 2737 | .sdev_attrs = cxlflash_dev_attrs, |
| 2738 | }; |
| 2739 | |
| 2740 | /* |
| 2741 | * Device dependent values |
| 2742 | */ |
Uma Krishnan | 96e1b66 | 2016-06-15 18:49:38 -0500 | [diff] [blame] | 2743 | static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, |
| 2744 | 0ULL }; |
| 2745 | static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, |
Uma Krishnan | 704c4b0 | 2016-06-15 18:49:57 -0500 | [diff] [blame] | 2746 | CXLFLASH_NOTIFY_SHUTDOWN }; |
Matthew R. Ochs | 9434452 | 2017-02-16 21:39:32 -0600 | [diff] [blame] | 2747 | static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS, |
| 2748 | CXLFLASH_NOTIFY_SHUTDOWN }; |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2749 | |
| 2750 | /* |
| 2751 | * PCI device binding table |
| 2752 | */ |
| 2753 | static struct pci_device_id cxlflash_pci_table[] = { |
| 2754 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, |
| 2755 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, |
Manoj Kumar | a2746fb | 2015-12-14 15:07:43 -0600 | [diff] [blame] | 2756 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, |
| 2757 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, |
Matthew R. Ochs | 9434452 | 2017-02-16 21:39:32 -0600 | [diff] [blame] | 2758 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD, |
| 2759 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals}, |
Matthew R. Ochs | 1530551 | 2015-10-21 15:12:10 -0500 | [diff] [blame] | 2760 | {} |
| 2761 | }; |
| 2762 | |
| 2763 | MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); |
| 2764 | |
| 2765 | /** |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2766 | * cxlflash_worker_thread() - work thread handler for the AFU |
| 2767 | * @work: Work structure contained within cxlflash associated with host. |
| 2768 | * |
| 2769 | * Handles the following events: |
| 2770 | * - Link reset which cannot be performed on interrupt context due to |
| 2771 | * blocking up to a few seconds |
Matthew R. Ochs | ef51074 | 2015-10-21 15:13:37 -0500 | [diff] [blame] | 2772 | * - Rescan the host |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2773 | */ |
| 2774 | static void cxlflash_worker_thread(struct work_struct *work) |
| 2775 | { |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2776 | struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, |
| 2777 | work_q); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2778 | struct afu *afu = cfg->afu; |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 2779 | struct device *dev = &cfg->dev->dev; |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 2780 | __be64 __iomem *fc_port_regs; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2781 | int port; |
| 2782 | ulong lock_flags; |
| 2783 | |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2784 | /* Avoid MMIO if the device has failed */ |
| 2785 | |
| 2786 | if (cfg->state != STATE_NORMAL) |
| 2787 | return; |
| 2788 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2789 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); |
| 2790 | |
| 2791 | if (cfg->lr_state == LINK_RESET_REQUIRED) { |
| 2792 | port = cfg->lr_port; |
| 2793 | if (port < 0) |
Matthew R. Ochs | 4392ba4 | 2015-10-21 15:13:11 -0500 | [diff] [blame] | 2794 | dev_err(dev, "%s: invalid port index %d\n", |
| 2795 | __func__, port); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2796 | else { |
| 2797 | spin_unlock_irqrestore(cfg->host->host_lock, |
| 2798 | lock_flags); |
| 2799 | |
| 2800 | /* The reset can block... */ |
Matthew R. Ochs | 0aa1488 | 2017-04-12 14:14:17 -0500 | [diff] [blame] | 2801 | fc_port_regs = get_fc_port_regs(cfg, port); |
| 2802 | afu_link_reset(afu, port, fc_port_regs); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2803 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); |
| 2804 | } |
| 2805 | |
| 2806 | cfg->lr_state = LINK_RESET_COMPLETE; |
| 2807 | } |
| 2808 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2809 | spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); |
Matthew R. Ochs | ef51074 | 2015-10-21 15:13:37 -0500 | [diff] [blame] | 2810 | |
| 2811 | if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) |
| 2812 | scsi_scan_host(cfg->host); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2813 | } |
| 2814 | |
| 2815 | /** |
| 2816 | * cxlflash_probe() - PCI entry point to add host |
| 2817 | * @pdev: PCI device associated with the host. |
| 2818 | * @dev_id: PCI device id associated with device. |
| 2819 | * |
Matthew R. Ochs | 323e334 | 2017-04-12 14:14:51 -0500 | [diff] [blame] | 2820 | * The device will initially start out in a 'probing' state and |
| 2821 | * transition to the 'normal' state at the end of a successful |
| 2822 | * probe. Should an EEH event occur during probe, the notification |
| 2823 | * thread (error_detected()) will wait until the probe handler |
| 2824 | * is nearly complete. At that time, the device will be moved to |
| 2825 | * a 'probed' state and the EEH thread woken up to drive the slot |
| 2826 | * reset and recovery (device moves to 'normal' state). Meanwhile, |
| 2827 | * the probe will be allowed to exit successfully. |
| 2828 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 2829 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2830 | */ |
| 2831 | static int cxlflash_probe(struct pci_dev *pdev, |
| 2832 | const struct pci_device_id *dev_id) |
| 2833 | { |
| 2834 | struct Scsi_Host *host; |
| 2835 | struct cxlflash_cfg *cfg = NULL; |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2836 | struct device *dev = &pdev->dev; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2837 | struct dev_dependent_vals *ddv; |
| 2838 | int rc = 0; |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 2839 | int k; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2840 | |
| 2841 | dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", |
| 2842 | __func__, pdev->irq); |
| 2843 | |
| 2844 | ddv = (struct dev_dependent_vals *)dev_id->driver_data; |
| 2845 | driver_template.max_sectors = ddv->max_sectors; |
| 2846 | |
| 2847 | host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); |
| 2848 | if (!host) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2849 | dev_err(dev, "%s: scsi_host_alloc failed\n", __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2850 | rc = -ENOMEM; |
| 2851 | goto out; |
| 2852 | } |
| 2853 | |
| 2854 | host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; |
| 2855 | host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2856 | host->unique_id = host->host_no; |
| 2857 | host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; |
| 2858 | |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2859 | cfg = shost_priv(host); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2860 | cfg->host = host; |
| 2861 | rc = alloc_mem(cfg); |
| 2862 | if (rc) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2863 | dev_err(dev, "%s: alloc_mem failed\n", __func__); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2864 | rc = -ENOMEM; |
Matthew R. Ochs | 8b5b1e8 | 2015-10-21 15:14:09 -0500 | [diff] [blame] | 2865 | scsi_host_put(cfg->host); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2866 | goto out; |
| 2867 | } |
| 2868 | |
| 2869 | cfg->init_state = INIT_STATE_NONE; |
| 2870 | cfg->dev = pdev; |
Matthew R. Ochs | 17ead26 | 2015-10-21 15:15:37 -0500 | [diff] [blame] | 2871 | cfg->cxl_fops = cxlflash_cxl_fops; |
Matthew R. Ochs | 2cb7926 | 2015-08-13 21:47:53 -0500 | [diff] [blame] | 2872 | |
| 2873 | /* |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 2874 | * Promoted LUNs move to the top of the LUN table. The rest stay on |
| 2875 | * the bottom half. The bottom half grows from the end (index = 255), |
| 2876 | * whereas the top half grows from the beginning (index = 0). |
| 2877 | * |
| 2878 | * Initialize the last LUN index for all possible ports. |
Matthew R. Ochs | 2cb7926 | 2015-08-13 21:47:53 -0500 | [diff] [blame] | 2879 | */ |
Matthew R. Ochs | 78ae028 | 2017-04-12 14:13:50 -0500 | [diff] [blame] | 2880 | cfg->promote_lun_index = 0; |
| 2881 | |
| 2882 | for (k = 0; k < MAX_FC_PORTS; k++) |
| 2883 | cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1; |
Matthew R. Ochs | 2cb7926 | 2015-08-13 21:47:53 -0500 | [diff] [blame] | 2884 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2885 | cfg->dev_id = (struct pci_device_id *)dev_id; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2886 | |
| 2887 | init_waitqueue_head(&cfg->tmf_waitq); |
Matthew R. Ochs | 439e85c | 2015-10-21 15:12:00 -0500 | [diff] [blame] | 2888 | init_waitqueue_head(&cfg->reset_waitq); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2889 | |
| 2890 | INIT_WORK(&cfg->work_q, cxlflash_worker_thread); |
| 2891 | cfg->lr_state = LINK_RESET_INVALID; |
| 2892 | cfg->lr_port = -1; |
Matthew R. Ochs | 0d73122 | 2015-10-21 15:16:24 -0500 | [diff] [blame] | 2893 | spin_lock_init(&cfg->tmf_slock); |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 2894 | mutex_init(&cfg->ctx_tbl_list_mutex); |
| 2895 | mutex_init(&cfg->ctx_recovery_mutex); |
Matthew R. Ochs | 0a27ae5 | 2015-10-21 15:11:52 -0500 | [diff] [blame] | 2896 | init_rwsem(&cfg->ioctl_rwsem); |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 2897 | INIT_LIST_HEAD(&cfg->ctx_err_recovery); |
| 2898 | INIT_LIST_HEAD(&cfg->lluns); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2899 | |
| 2900 | pci_set_drvdata(pdev, cfg); |
| 2901 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2902 | cfg->cxl_afu = cxl_pci_to_afu(pdev); |
| 2903 | |
| 2904 | rc = init_pci(cfg); |
| 2905 | if (rc) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2906 | dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2907 | goto out_remove; |
| 2908 | } |
| 2909 | cfg->init_state = INIT_STATE_PCI; |
| 2910 | |
| 2911 | rc = init_afu(cfg); |
Matthew R. Ochs | 323e334 | 2017-04-12 14:14:51 -0500 | [diff] [blame] | 2912 | if (rc && !wq_has_sleeper(&cfg->reset_waitq)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2913 | dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2914 | goto out_remove; |
| 2915 | } |
| 2916 | cfg->init_state = INIT_STATE_AFU; |
| 2917 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2918 | rc = init_scsi(cfg); |
| 2919 | if (rc) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2920 | dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2921 | goto out_remove; |
| 2922 | } |
| 2923 | cfg->init_state = INIT_STATE_SCSI; |
| 2924 | |
Matthew R. Ochs | 323e334 | 2017-04-12 14:14:51 -0500 | [diff] [blame] | 2925 | if (wq_has_sleeper(&cfg->reset_waitq)) { |
| 2926 | cfg->state = STATE_PROBED; |
| 2927 | wake_up_all(&cfg->reset_waitq); |
| 2928 | } else |
| 2929 | cfg->state = STATE_NORMAL; |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2930 | out: |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2931 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 2932 | return rc; |
| 2933 | |
| 2934 | out_remove: |
| 2935 | cxlflash_remove(pdev); |
| 2936 | goto out; |
| 2937 | } |
| 2938 | |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2939 | /** |
| 2940 | * cxlflash_pci_error_detected() - called when a PCI error is detected |
| 2941 | * @pdev: PCI device struct. |
| 2942 | * @state: PCI channel state. |
| 2943 | * |
Matthew R. Ochs | 1d3324c | 2016-09-02 15:39:30 -0500 | [diff] [blame] | 2944 | * When an EEH occurs during an active reset, wait until the reset is |
| 2945 | * complete and then take action based upon the device state. |
| 2946 | * |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2947 | * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT |
| 2948 | */ |
| 2949 | static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, |
| 2950 | pci_channel_state_t state) |
| 2951 | { |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 2952 | int rc = 0; |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2953 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
| 2954 | struct device *dev = &cfg->dev->dev; |
| 2955 | |
| 2956 | dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); |
| 2957 | |
| 2958 | switch (state) { |
| 2959 | case pci_channel_io_frozen: |
Matthew R. Ochs | 323e334 | 2017-04-12 14:14:51 -0500 | [diff] [blame] | 2960 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && |
| 2961 | cfg->state != STATE_PROBING); |
Matthew R. Ochs | 1d3324c | 2016-09-02 15:39:30 -0500 | [diff] [blame] | 2962 | if (cfg->state == STATE_FAILTERM) |
| 2963 | return PCI_ERS_RESULT_DISCONNECT; |
| 2964 | |
Matthew R. Ochs | 439e85c | 2015-10-21 15:12:00 -0500 | [diff] [blame] | 2965 | cfg->state = STATE_RESET; |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2966 | scsi_block_requests(cfg->host); |
Matthew R. Ochs | 0a27ae5 | 2015-10-21 15:11:52 -0500 | [diff] [blame] | 2967 | drain_ioctls(cfg); |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 2968 | rc = cxlflash_mark_contexts_error(cfg); |
| 2969 | if (unlikely(rc)) |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 2970 | dev_err(dev, "%s: Failed to mark user contexts rc=%d\n", |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 2971 | __func__, rc); |
Manoj N. Kumar | 9526f36 | 2016-03-25 14:26:34 -0500 | [diff] [blame] | 2972 | term_afu(cfg); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2973 | return PCI_ERS_RESULT_NEED_RESET; |
| 2974 | case pci_channel_io_perm_failure: |
| 2975 | cfg->state = STATE_FAILTERM; |
Matthew R. Ochs | 439e85c | 2015-10-21 15:12:00 -0500 | [diff] [blame] | 2976 | wake_up_all(&cfg->reset_waitq); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 2977 | scsi_unblock_requests(cfg->host); |
| 2978 | return PCI_ERS_RESULT_DISCONNECT; |
| 2979 | default: |
| 2980 | break; |
| 2981 | } |
| 2982 | return PCI_ERS_RESULT_NEED_RESET; |
| 2983 | } |
| 2984 | |
| 2985 | /** |
| 2986 | * cxlflash_pci_slot_reset() - called when PCI slot has been reset |
| 2987 | * @pdev: PCI device struct. |
| 2988 | * |
| 2989 | * This routine is called by the pci error recovery code after the PCI |
| 2990 | * slot has been reset, just before we should resume normal operations. |
| 2991 | * |
| 2992 | * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT |
| 2993 | */ |
| 2994 | static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) |
| 2995 | { |
| 2996 | int rc = 0; |
| 2997 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
| 2998 | struct device *dev = &cfg->dev->dev; |
| 2999 | |
| 3000 | dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); |
| 3001 | |
| 3002 | rc = init_afu(cfg); |
| 3003 | if (unlikely(rc)) { |
Matthew R. Ochs | fb67d44 | 2017-01-11 19:19:47 -0600 | [diff] [blame] | 3004 | dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 3005 | return PCI_ERS_RESULT_DISCONNECT; |
| 3006 | } |
| 3007 | |
| 3008 | return PCI_ERS_RESULT_RECOVERED; |
| 3009 | } |
| 3010 | |
| 3011 | /** |
| 3012 | * cxlflash_pci_resume() - called when normal operation can resume |
| 3013 | * @pdev: PCI device struct |
| 3014 | */ |
| 3015 | static void cxlflash_pci_resume(struct pci_dev *pdev) |
| 3016 | { |
| 3017 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
| 3018 | struct device *dev = &cfg->dev->dev; |
| 3019 | |
| 3020 | dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); |
| 3021 | |
| 3022 | cfg->state = STATE_NORMAL; |
Matthew R. Ochs | 439e85c | 2015-10-21 15:12:00 -0500 | [diff] [blame] | 3023 | wake_up_all(&cfg->reset_waitq); |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 3024 | scsi_unblock_requests(cfg->host); |
| 3025 | } |
| 3026 | |
| 3027 | static const struct pci_error_handlers cxlflash_err_handler = { |
| 3028 | .error_detected = cxlflash_pci_error_detected, |
| 3029 | .slot_reset = cxlflash_pci_slot_reset, |
| 3030 | .resume = cxlflash_pci_resume, |
| 3031 | }; |
| 3032 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 3033 | /* |
| 3034 | * PCI device structure |
| 3035 | */ |
| 3036 | static struct pci_driver cxlflash_driver = { |
| 3037 | .name = CXLFLASH_NAME, |
| 3038 | .id_table = cxlflash_pci_table, |
| 3039 | .probe = cxlflash_probe, |
| 3040 | .remove = cxlflash_remove, |
Uma Krishnan | babf985 | 2016-09-02 15:39:16 -0500 | [diff] [blame] | 3041 | .shutdown = cxlflash_remove, |
Matthew R. Ochs | 5cdac81 | 2015-08-13 21:47:34 -0500 | [diff] [blame] | 3042 | .err_handler = &cxlflash_err_handler, |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 3043 | }; |
| 3044 | |
| 3045 | /** |
| 3046 | * init_cxlflash() - module entry point |
| 3047 | * |
Matthew R. Ochs | 1284fb0 | 2015-10-21 15:14:40 -0500 | [diff] [blame] | 3048 | * Return: 0 on success, -errno on failure |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 3049 | */ |
| 3050 | static int __init init_cxlflash(void) |
| 3051 | { |
Matthew R. Ochs | cd41e18 | 2017-04-12 14:15:11 -0500 | [diff] [blame] | 3052 | check_sizes(); |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 3053 | cxlflash_list_init(); |
| 3054 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 3055 | return pci_register_driver(&cxlflash_driver); |
| 3056 | } |
| 3057 | |
| 3058 | /** |
| 3059 | * exit_cxlflash() - module exit point |
| 3060 | */ |
| 3061 | static void __exit exit_cxlflash(void) |
| 3062 | { |
Matthew R. Ochs | 65be2c7 | 2015-08-13 21:47:43 -0500 | [diff] [blame] | 3063 | cxlflash_term_global_luns(); |
| 3064 | cxlflash_free_errpage(); |
| 3065 | |
Matthew R. Ochs | c21e0bb | 2015-06-09 17:15:52 -0500 | [diff] [blame] | 3066 | pci_unregister_driver(&cxlflash_driver); |
| 3067 | } |
| 3068 | |
| 3069 | module_init(init_cxlflash); |
| 3070 | module_exit(exit_cxlflash); |