Dupuis, Chad | 61d8658 | 2017-02-15 06:28:23 -0800 | [diff] [blame] | 1 | /* |
| 2 | * QLogic FCoE Offload Driver |
| 3 | * Copyright (c) 2016 Cavium Inc. |
| 4 | * |
| 5 | * This software is available under the terms of the GNU General Public License |
| 6 | * (GPL) Version 2, available from the file COPYING in the main directory of |
| 7 | * this source tree. |
| 8 | */ |
| 9 | #include "qedf.h" |
| 10 | |
| 11 | /* It's assumed that the lock is held when calling this function. */ |
| 12 | static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, |
| 13 | void *data, uint32_t data_len, |
| 14 | void (*cb_func)(struct qedf_els_cb_arg *cb_arg), |
| 15 | struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec) |
| 16 | { |
| 17 | struct qedf_ctx *qedf = fcport->qedf; |
| 18 | struct fc_lport *lport = qedf->lport; |
| 19 | struct qedf_ioreq *els_req; |
| 20 | struct qedf_mp_req *mp_req; |
| 21 | struct fc_frame_header *fc_hdr; |
| 22 | struct fcoe_task_context *task; |
| 23 | int rc = 0; |
| 24 | uint32_t did, sid; |
| 25 | uint16_t xid; |
| 26 | uint32_t start_time = jiffies / HZ; |
| 27 | uint32_t current_time; |
Mintz, Yuval | be086e7 | 2017-03-11 18:39:18 +0200 | [diff] [blame] | 28 | struct fcoe_wqe *sqe; |
| 29 | unsigned long flags; |
| 30 | u16 sqe_idx; |
Dupuis, Chad | 61d8658 | 2017-02-15 06:28:23 -0800 | [diff] [blame] | 31 | |
| 32 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n"); |
| 33 | |
| 34 | rc = fc_remote_port_chkready(fcport->rport); |
| 35 | if (rc) { |
| 36 | QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op); |
| 37 | rc = -EAGAIN; |
| 38 | goto els_err; |
| 39 | } |
| 40 | if (lport->state != LPORT_ST_READY || !(lport->link_up)) { |
| 41 | QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n", |
| 42 | op); |
| 43 | rc = -EAGAIN; |
| 44 | goto els_err; |
| 45 | } |
| 46 | |
| 47 | if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { |
| 48 | QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op); |
| 49 | rc = -EINVAL; |
| 50 | goto els_err; |
| 51 | } |
| 52 | |
| 53 | retry_els: |
| 54 | els_req = qedf_alloc_cmd(fcport, QEDF_ELS); |
| 55 | if (!els_req) { |
| 56 | current_time = jiffies / HZ; |
| 57 | if ((current_time - start_time) > 10) { |
| 58 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, |
| 59 | "els: Failed els 0x%x\n", op); |
| 60 | rc = -ENOMEM; |
| 61 | goto els_err; |
| 62 | } |
| 63 | mdelay(20 * USEC_PER_MSEC); |
| 64 | goto retry_els; |
| 65 | } |
| 66 | |
| 67 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = " |
| 68 | "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg, |
| 69 | els_req->xid); |
| 70 | els_req->sc_cmd = NULL; |
| 71 | els_req->cmd_type = QEDF_ELS; |
| 72 | els_req->fcport = fcport; |
| 73 | els_req->cb_func = cb_func; |
| 74 | cb_arg->io_req = els_req; |
| 75 | cb_arg->op = op; |
| 76 | els_req->cb_arg = cb_arg; |
| 77 | els_req->data_xfer_len = data_len; |
| 78 | |
| 79 | /* Record which cpu this request is associated with */ |
| 80 | els_req->cpu = smp_processor_id(); |
| 81 | |
| 82 | mp_req = (struct qedf_mp_req *)&(els_req->mp_req); |
| 83 | rc = qedf_init_mp_req(els_req); |
| 84 | if (rc) { |
| 85 | QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n"); |
| 86 | kref_put(&els_req->refcount, qedf_release_cmd); |
| 87 | goto els_err; |
| 88 | } else { |
| 89 | rc = 0; |
| 90 | } |
| 91 | |
| 92 | /* Fill ELS Payload */ |
| 93 | if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { |
| 94 | memcpy(mp_req->req_buf, data, data_len); |
| 95 | } else { |
| 96 | QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op); |
| 97 | els_req->cb_func = NULL; |
| 98 | els_req->cb_arg = NULL; |
| 99 | kref_put(&els_req->refcount, qedf_release_cmd); |
| 100 | rc = -EINVAL; |
| 101 | } |
| 102 | |
| 103 | if (rc) |
| 104 | goto els_err; |
| 105 | |
| 106 | /* Fill FC header */ |
| 107 | fc_hdr = &(mp_req->req_fc_hdr); |
| 108 | |
| 109 | did = fcport->rdata->ids.port_id; |
| 110 | sid = fcport->sid; |
| 111 | |
| 112 | __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, sid, did, |
| 113 | FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | |
| 114 | FC_FC_SEQ_INIT, 0); |
| 115 | |
| 116 | /* Obtain exchange id */ |
| 117 | xid = els_req->xid; |
| 118 | |
Mintz, Yuval | be086e7 | 2017-03-11 18:39:18 +0200 | [diff] [blame] | 119 | spin_lock_irqsave(&fcport->rport_lock, flags); |
| 120 | |
| 121 | sqe_idx = qedf_get_sqe_idx(fcport); |
| 122 | sqe = &fcport->sq[sqe_idx]; |
| 123 | memset(sqe, 0, sizeof(struct fcoe_wqe)); |
| 124 | |
Dupuis, Chad | 61d8658 | 2017-02-15 06:28:23 -0800 | [diff] [blame] | 125 | /* Initialize task context for this IO request */ |
| 126 | task = qedf_get_task_mem(&qedf->tasks, xid); |
Mintz, Yuval | be086e7 | 2017-03-11 18:39:18 +0200 | [diff] [blame] | 127 | qedf_init_mp_task(els_req, task, sqe); |
Dupuis, Chad | 61d8658 | 2017-02-15 06:28:23 -0800 | [diff] [blame] | 128 | |
| 129 | /* Put timer on original I/O request */ |
| 130 | if (timer_msec) |
| 131 | qedf_cmd_timer_set(qedf, els_req, timer_msec); |
| 132 | |
Dupuis, Chad | 61d8658 | 2017-02-15 06:28:23 -0800 | [diff] [blame] | 133 | /* Ring doorbell */ |
| 134 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS " |
| 135 | "req\n"); |
| 136 | qedf_ring_doorbell(fcport); |
Mintz, Yuval | be086e7 | 2017-03-11 18:39:18 +0200 | [diff] [blame] | 137 | spin_unlock_irqrestore(&fcport->rport_lock, flags); |
Dupuis, Chad | 61d8658 | 2017-02-15 06:28:23 -0800 | [diff] [blame] | 138 | els_err: |
| 139 | return rc; |
| 140 | } |
| 141 | |
| 142 | void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, |
| 143 | struct qedf_ioreq *els_req) |
| 144 | { |
| 145 | struct fcoe_task_context *task_ctx; |
| 146 | struct scsi_cmnd *sc_cmd; |
| 147 | uint16_t xid; |
| 148 | struct fcoe_cqe_midpath_info *mp_info; |
| 149 | |
| 150 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x" |
| 151 | " cmd_type = %d.\n", els_req->xid, els_req->cmd_type); |
| 152 | |
| 153 | /* Kill the ELS timer */ |
| 154 | cancel_delayed_work(&els_req->timeout_work); |
| 155 | |
| 156 | xid = els_req->xid; |
| 157 | task_ctx = qedf_get_task_mem(&qedf->tasks, xid); |
| 158 | sc_cmd = els_req->sc_cmd; |
| 159 | |
| 160 | /* Get ELS response length from CQE */ |
| 161 | mp_info = &cqe->cqe_info.midpath_info; |
| 162 | els_req->mp_req.resp_len = mp_info->data_placement_size; |
| 163 | |
| 164 | /* Parse ELS response */ |
| 165 | if ((els_req->cb_func) && (els_req->cb_arg)) { |
| 166 | els_req->cb_func(els_req->cb_arg); |
| 167 | els_req->cb_arg = NULL; |
| 168 | } |
| 169 | |
| 170 | kref_put(&els_req->refcount, qedf_release_cmd); |
| 171 | } |
| 172 | |
| 173 | static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg) |
| 174 | { |
| 175 | struct qedf_ioreq *orig_io_req; |
| 176 | struct qedf_ioreq *rrq_req; |
| 177 | struct qedf_ctx *qedf; |
| 178 | int refcount; |
| 179 | |
| 180 | rrq_req = cb_arg->io_req; |
| 181 | qedf = rrq_req->fcport->qedf; |
| 182 | |
| 183 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n"); |
| 184 | |
| 185 | orig_io_req = cb_arg->aborted_io_req; |
| 186 | |
| 187 | if (!orig_io_req) |
| 188 | goto out_free; |
| 189 | |
| 190 | if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO && |
| 191 | rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) |
| 192 | cancel_delayed_work_sync(&orig_io_req->timeout_work); |
| 193 | |
Dupuis, Chad | 1afca6b | 2017-02-23 07:01:03 -0800 | [diff] [blame] | 194 | refcount = kref_read(&orig_io_req->refcount); |
Dupuis, Chad | 61d8658 | 2017-02-15 06:28:23 -0800 | [diff] [blame] | 195 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p," |
| 196 | " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n", |
| 197 | orig_io_req, orig_io_req->xid, rrq_req->xid, refcount); |
| 198 | |
| 199 | /* This should return the aborted io_req to the command pool */ |
| 200 | if (orig_io_req) |
| 201 | kref_put(&orig_io_req->refcount, qedf_release_cmd); |
| 202 | |
| 203 | out_free: |
| 204 | kfree(cb_arg); |
| 205 | } |
| 206 | |
| 207 | /* Assumes kref is already held by caller */ |
| 208 | int qedf_send_rrq(struct qedf_ioreq *aborted_io_req) |
| 209 | { |
| 210 | |
| 211 | struct fc_els_rrq rrq; |
| 212 | struct qedf_rport *fcport; |
| 213 | struct fc_lport *lport; |
| 214 | struct qedf_els_cb_arg *cb_arg = NULL; |
| 215 | struct qedf_ctx *qedf; |
| 216 | uint32_t sid; |
| 217 | uint32_t r_a_tov; |
| 218 | int rc; |
| 219 | |
| 220 | if (!aborted_io_req) { |
| 221 | QEDF_ERR(NULL, "abort_io_req is NULL.\n"); |
| 222 | return -EINVAL; |
| 223 | } |
| 224 | |
| 225 | fcport = aborted_io_req->fcport; |
| 226 | |
| 227 | /* Check that fcport is still offloaded */ |
| 228 | if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { |
| 229 | QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); |
| 230 | return -EINVAL; |
| 231 | } |
| 232 | |
| 233 | if (!fcport->qedf) { |
| 234 | QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); |
| 235 | return -EINVAL; |
| 236 | } |
| 237 | |
| 238 | qedf = fcport->qedf; |
| 239 | lport = qedf->lport; |
| 240 | sid = fcport->sid; |
| 241 | r_a_tov = lport->r_a_tov; |
| 242 | |
| 243 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig " |
| 244 | "io = %p, orig_xid = 0x%x\n", aborted_io_req, |
| 245 | aborted_io_req->xid); |
| 246 | memset(&rrq, 0, sizeof(rrq)); |
| 247 | |
| 248 | cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); |
| 249 | if (!cb_arg) { |
| 250 | QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " |
| 251 | "RRQ\n"); |
| 252 | rc = -ENOMEM; |
| 253 | goto rrq_err; |
| 254 | } |
| 255 | |
| 256 | cb_arg->aborted_io_req = aborted_io_req; |
| 257 | |
| 258 | rrq.rrq_cmd = ELS_RRQ; |
| 259 | hton24(rrq.rrq_s_id, sid); |
| 260 | rrq.rrq_ox_id = htons(aborted_io_req->xid); |
| 261 | rrq.rrq_rx_id = |
| 262 | htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id); |
| 263 | |
| 264 | rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq), |
| 265 | qedf_rrq_compl, cb_arg, r_a_tov); |
| 266 | |
| 267 | rrq_err: |
| 268 | if (rc) { |
| 269 | QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io " |
| 270 | "req 0x%x\n", aborted_io_req->xid); |
| 271 | kfree(cb_arg); |
| 272 | kref_put(&aborted_io_req->refcount, qedf_release_cmd); |
| 273 | } |
| 274 | return rc; |
| 275 | } |
| 276 | |
| 277 | static void qedf_process_l2_frame_compl(struct qedf_rport *fcport, |
| 278 | struct fc_frame *fp, |
| 279 | u16 l2_oxid) |
| 280 | { |
| 281 | struct fc_lport *lport = fcport->qedf->lport; |
| 282 | struct fc_frame_header *fh; |
| 283 | u32 crc; |
| 284 | |
| 285 | fh = (struct fc_frame_header *)fc_frame_header_get(fp); |
| 286 | |
| 287 | /* Set the OXID we return to what libfc used */ |
| 288 | if (l2_oxid != FC_XID_UNKNOWN) |
| 289 | fh->fh_ox_id = htons(l2_oxid); |
| 290 | |
| 291 | /* Setup header fields */ |
| 292 | fh->fh_r_ctl = FC_RCTL_ELS_REP; |
| 293 | fh->fh_type = FC_TYPE_ELS; |
| 294 | /* Last sequence, end sequence */ |
| 295 | fh->fh_f_ctl[0] = 0x98; |
| 296 | hton24(fh->fh_d_id, lport->port_id); |
| 297 | hton24(fh->fh_s_id, fcport->rdata->ids.port_id); |
| 298 | fh->fh_rx_id = 0xffff; |
| 299 | |
| 300 | /* Set frame attributes */ |
| 301 | crc = fcoe_fc_crc(fp); |
| 302 | fc_frame_init(fp); |
| 303 | fr_dev(fp) = lport; |
| 304 | fr_sof(fp) = FC_SOF_I3; |
| 305 | fr_eof(fp) = FC_EOF_T; |
| 306 | fr_crc(fp) = cpu_to_le32(~crc); |
| 307 | |
| 308 | /* Send completed request to libfc */ |
| 309 | fc_exch_recv(lport, fp); |
| 310 | } |
| 311 | |
| 312 | /* |
| 313 | * In instances where an ELS command times out we may need to restart the |
| 314 | * rport by logging out and then logging back in. |
| 315 | */ |
| 316 | void qedf_restart_rport(struct qedf_rport *fcport) |
| 317 | { |
| 318 | struct fc_lport *lport; |
| 319 | struct fc_rport_priv *rdata; |
| 320 | u32 port_id; |
| 321 | |
| 322 | if (!fcport) |
| 323 | return; |
| 324 | |
| 325 | rdata = fcport->rdata; |
| 326 | if (rdata) { |
| 327 | lport = fcport->qedf->lport; |
| 328 | port_id = rdata->ids.port_id; |
| 329 | QEDF_ERR(&(fcport->qedf->dbg_ctx), |
| 330 | "LOGO port_id=%x.\n", port_id); |
| 331 | fc_rport_logoff(rdata); |
| 332 | /* Recreate the rport and log back in */ |
| 333 | rdata = fc_rport_create(lport, port_id); |
| 334 | if (rdata) |
| 335 | fc_rport_login(rdata); |
| 336 | } |
| 337 | } |
| 338 | |
| 339 | static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg) |
| 340 | { |
| 341 | struct qedf_ioreq *els_req; |
| 342 | struct qedf_rport *fcport; |
| 343 | struct qedf_mp_req *mp_req; |
| 344 | struct fc_frame *fp; |
| 345 | struct fc_frame_header *fh, *mp_fc_hdr; |
| 346 | void *resp_buf, *fc_payload; |
| 347 | u32 resp_len; |
| 348 | u16 l2_oxid; |
| 349 | |
| 350 | l2_oxid = cb_arg->l2_oxid; |
| 351 | els_req = cb_arg->io_req; |
| 352 | |
| 353 | if (!els_req) { |
| 354 | QEDF_ERR(NULL, "els_req is NULL.\n"); |
| 355 | goto free_arg; |
| 356 | } |
| 357 | |
| 358 | /* |
| 359 | * If we are flushing the command just free the cb_arg as none of the |
| 360 | * response data will be valid. |
| 361 | */ |
| 362 | if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) |
| 363 | goto free_arg; |
| 364 | |
| 365 | fcport = els_req->fcport; |
| 366 | mp_req = &(els_req->mp_req); |
| 367 | mp_fc_hdr = &(mp_req->resp_fc_hdr); |
| 368 | resp_len = mp_req->resp_len; |
| 369 | resp_buf = mp_req->resp_buf; |
| 370 | |
| 371 | /* |
| 372 | * If a middle path ELS command times out, don't try to return |
| 373 | * the command but rather do any internal cleanup and then libfc |
| 374 | * timeout the command and clean up its internal resources. |
| 375 | */ |
| 376 | if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) { |
| 377 | /* |
| 378 | * If ADISC times out, libfc will timeout the exchange and then |
| 379 | * try to send a PLOGI which will timeout since the session is |
| 380 | * still offloaded. Force libfc to logout the session which |
| 381 | * will offload the connection and allow the PLOGI response to |
| 382 | * flow over the LL2 path. |
| 383 | */ |
| 384 | if (cb_arg->op == ELS_ADISC) |
| 385 | qedf_restart_rport(fcport); |
| 386 | return; |
| 387 | } |
| 388 | |
| 389 | if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) { |
| 390 | QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is " |
| 391 | "beyond page size.\n"); |
| 392 | goto free_arg; |
| 393 | } |
| 394 | |
| 395 | fp = fc_frame_alloc(fcport->qedf->lport, resp_len); |
| 396 | if (!fp) { |
| 397 | QEDF_ERR(&(fcport->qedf->dbg_ctx), |
| 398 | "fc_frame_alloc failure.\n"); |
| 399 | return; |
| 400 | } |
| 401 | |
| 402 | /* Copy frame header from firmware into fp */ |
| 403 | fh = (struct fc_frame_header *)fc_frame_header_get(fp); |
| 404 | memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); |
| 405 | |
| 406 | /* Copy payload from firmware into fp */ |
| 407 | fc_payload = fc_frame_payload_get(fp, resp_len); |
| 408 | memcpy(fc_payload, resp_buf, resp_len); |
| 409 | |
| 410 | QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, |
| 411 | "Completing OX_ID 0x%x back to libfc.\n", l2_oxid); |
| 412 | qedf_process_l2_frame_compl(fcport, fp, l2_oxid); |
| 413 | |
| 414 | free_arg: |
| 415 | kfree(cb_arg); |
| 416 | } |
| 417 | |
| 418 | int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp) |
| 419 | { |
| 420 | struct fc_els_adisc *adisc; |
| 421 | struct fc_frame_header *fh; |
| 422 | struct fc_lport *lport = fcport->qedf->lport; |
| 423 | struct qedf_els_cb_arg *cb_arg = NULL; |
| 424 | struct qedf_ctx *qedf; |
| 425 | uint32_t r_a_tov = lport->r_a_tov; |
| 426 | int rc; |
| 427 | |
| 428 | qedf = fcport->qedf; |
| 429 | fh = fc_frame_header_get(fp); |
| 430 | |
| 431 | cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); |
| 432 | if (!cb_arg) { |
| 433 | QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " |
| 434 | "ADISC\n"); |
| 435 | rc = -ENOMEM; |
| 436 | goto adisc_err; |
| 437 | } |
| 438 | cb_arg->l2_oxid = ntohs(fh->fh_ox_id); |
| 439 | |
| 440 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, |
| 441 | "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid); |
| 442 | |
| 443 | adisc = fc_frame_payload_get(fp, sizeof(*adisc)); |
| 444 | |
| 445 | rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc), |
| 446 | qedf_l2_els_compl, cb_arg, r_a_tov); |
| 447 | |
| 448 | adisc_err: |
| 449 | if (rc) { |
| 450 | QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n"); |
| 451 | kfree(cb_arg); |
| 452 | } |
| 453 | return rc; |
| 454 | } |
| 455 | |
| 456 | static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) |
| 457 | { |
| 458 | struct qedf_ioreq *orig_io_req; |
| 459 | struct qedf_ioreq *srr_req; |
| 460 | struct qedf_mp_req *mp_req; |
| 461 | struct fc_frame_header *mp_fc_hdr, *fh; |
| 462 | struct fc_frame *fp; |
| 463 | void *resp_buf, *fc_payload; |
| 464 | u32 resp_len; |
| 465 | struct fc_lport *lport; |
| 466 | struct qedf_ctx *qedf; |
| 467 | int refcount; |
| 468 | u8 opcode; |
| 469 | |
| 470 | srr_req = cb_arg->io_req; |
| 471 | qedf = srr_req->fcport->qedf; |
| 472 | lport = qedf->lport; |
| 473 | |
| 474 | orig_io_req = cb_arg->aborted_io_req; |
| 475 | |
| 476 | if (!orig_io_req) |
| 477 | goto out_free; |
| 478 | |
| 479 | clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); |
| 480 | |
| 481 | if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO && |
| 482 | srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) |
| 483 | cancel_delayed_work_sync(&orig_io_req->timeout_work); |
| 484 | |
Dupuis, Chad | 1afca6b | 2017-02-23 07:01:03 -0800 | [diff] [blame] | 485 | refcount = kref_read(&orig_io_req->refcount); |
Dupuis, Chad | 61d8658 | 2017-02-15 06:28:23 -0800 | [diff] [blame] | 486 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," |
| 487 | " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", |
| 488 | orig_io_req, orig_io_req->xid, srr_req->xid, refcount); |
| 489 | |
| 490 | /* If a SRR times out, simply free resources */ |
| 491 | if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) |
| 492 | goto out_free; |
| 493 | |
| 494 | /* Normalize response data into struct fc_frame */ |
| 495 | mp_req = &(srr_req->mp_req); |
| 496 | mp_fc_hdr = &(mp_req->resp_fc_hdr); |
| 497 | resp_len = mp_req->resp_len; |
| 498 | resp_buf = mp_req->resp_buf; |
| 499 | |
| 500 | fp = fc_frame_alloc(lport, resp_len); |
| 501 | if (!fp) { |
| 502 | QEDF_ERR(&(qedf->dbg_ctx), |
| 503 | "fc_frame_alloc failure.\n"); |
| 504 | goto out_free; |
| 505 | } |
| 506 | |
| 507 | /* Copy frame header from firmware into fp */ |
| 508 | fh = (struct fc_frame_header *)fc_frame_header_get(fp); |
| 509 | memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); |
| 510 | |
| 511 | /* Copy payload from firmware into fp */ |
| 512 | fc_payload = fc_frame_payload_get(fp, resp_len); |
| 513 | memcpy(fc_payload, resp_buf, resp_len); |
| 514 | |
| 515 | opcode = fc_frame_payload_op(fp); |
| 516 | switch (opcode) { |
| 517 | case ELS_LS_ACC: |
| 518 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, |
| 519 | "SRR success.\n"); |
| 520 | break; |
| 521 | case ELS_LS_RJT: |
| 522 | QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, |
| 523 | "SRR rejected.\n"); |
| 524 | qedf_initiate_abts(orig_io_req, true); |
| 525 | break; |
| 526 | } |
| 527 | |
| 528 | fc_frame_free(fp); |
| 529 | out_free: |
| 530 | /* Put reference for original command since SRR completed */ |
| 531 | kref_put(&orig_io_req->refcount, qedf_release_cmd); |
| 532 | kfree(cb_arg); |
| 533 | } |
| 534 | |
| 535 | static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl) |
| 536 | { |
| 537 | struct fcp_srr srr; |
| 538 | struct qedf_ctx *qedf; |
| 539 | struct qedf_rport *fcport; |
| 540 | struct fc_lport *lport; |
| 541 | struct qedf_els_cb_arg *cb_arg = NULL; |
| 542 | u32 sid, r_a_tov; |
| 543 | int rc; |
| 544 | |
| 545 | if (!orig_io_req) { |
| 546 | QEDF_ERR(NULL, "orig_io_req is NULL.\n"); |
| 547 | return -EINVAL; |
| 548 | } |
| 549 | |
| 550 | fcport = orig_io_req->fcport; |
| 551 | |
| 552 | /* Check that fcport is still offloaded */ |
| 553 | if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { |
| 554 | QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); |
| 555 | return -EINVAL; |
| 556 | } |
| 557 | |
| 558 | if (!fcport->qedf) { |
| 559 | QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); |
| 560 | return -EINVAL; |
| 561 | } |
| 562 | |
| 563 | /* Take reference until SRR command completion */ |
| 564 | kref_get(&orig_io_req->refcount); |
| 565 | |
| 566 | qedf = fcport->qedf; |
| 567 | lport = qedf->lport; |
| 568 | sid = fcport->sid; |
| 569 | r_a_tov = lport->r_a_tov; |
| 570 | |
| 571 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, " |
| 572 | "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid); |
| 573 | memset(&srr, 0, sizeof(srr)); |
| 574 | |
| 575 | cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); |
| 576 | if (!cb_arg) { |
| 577 | QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " |
| 578 | "SRR\n"); |
| 579 | rc = -ENOMEM; |
| 580 | goto srr_err; |
| 581 | } |
| 582 | |
| 583 | cb_arg->aborted_io_req = orig_io_req; |
| 584 | |
| 585 | srr.srr_op = ELS_SRR; |
| 586 | srr.srr_ox_id = htons(orig_io_req->xid); |
| 587 | srr.srr_rx_id = htons(orig_io_req->rx_id); |
| 588 | srr.srr_rel_off = htonl(offset); |
| 589 | srr.srr_r_ctl = r_ctl; |
| 590 | |
| 591 | rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr), |
| 592 | qedf_srr_compl, cb_arg, r_a_tov); |
| 593 | |
| 594 | srr_err: |
| 595 | if (rc) { |
| 596 | QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req" |
| 597 | "=0x%x\n", orig_io_req->xid); |
| 598 | kfree(cb_arg); |
| 599 | /* If we fail to queue SRR, send ABTS to orig_io */ |
| 600 | qedf_initiate_abts(orig_io_req, true); |
| 601 | kref_put(&orig_io_req->refcount, qedf_release_cmd); |
| 602 | } else |
| 603 | /* Tell other threads that SRR is in progress */ |
| 604 | set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); |
| 605 | |
| 606 | return rc; |
| 607 | } |
| 608 | |
| 609 | static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req, |
| 610 | u32 offset, u8 r_ctl) |
| 611 | { |
| 612 | struct qedf_rport *fcport; |
| 613 | unsigned long flags; |
| 614 | struct qedf_els_cb_arg *cb_arg; |
Mintz, Yuval | be086e7 | 2017-03-11 18:39:18 +0200 | [diff] [blame] | 615 | struct fcoe_wqe *sqe; |
| 616 | u16 sqe_idx; |
Dupuis, Chad | 61d8658 | 2017-02-15 06:28:23 -0800 | [diff] [blame] | 617 | |
| 618 | fcport = orig_io_req->fcport; |
| 619 | |
| 620 | QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, |
| 621 | "Doing sequence cleanup for xid=0x%x offset=%u.\n", |
| 622 | orig_io_req->xid, offset); |
| 623 | |
| 624 | cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); |
| 625 | if (!cb_arg) { |
| 626 | QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg " |
| 627 | "for sequence cleanup\n"); |
| 628 | return; |
| 629 | } |
| 630 | |
| 631 | /* Get reference for cleanup request */ |
| 632 | kref_get(&orig_io_req->refcount); |
| 633 | |
| 634 | orig_io_req->cmd_type = QEDF_SEQ_CLEANUP; |
| 635 | cb_arg->offset = offset; |
| 636 | cb_arg->r_ctl = r_ctl; |
| 637 | orig_io_req->cb_arg = cb_arg; |
| 638 | |
| 639 | qedf_cmd_timer_set(fcport->qedf, orig_io_req, |
| 640 | QEDF_CLEANUP_TIMEOUT * HZ); |
| 641 | |
| 642 | spin_lock_irqsave(&fcport->rport_lock, flags); |
| 643 | |
Mintz, Yuval | be086e7 | 2017-03-11 18:39:18 +0200 | [diff] [blame] | 644 | sqe_idx = qedf_get_sqe_idx(fcport); |
| 645 | sqe = &fcport->sq[sqe_idx]; |
| 646 | memset(sqe, 0, sizeof(struct fcoe_wqe)); |
| 647 | orig_io_req->task_params->sqe = sqe; |
| 648 | |
| 649 | init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params, |
| 650 | offset); |
Dupuis, Chad | 61d8658 | 2017-02-15 06:28:23 -0800 | [diff] [blame] | 651 | qedf_ring_doorbell(fcport); |
| 652 | |
| 653 | spin_unlock_irqrestore(&fcport->rport_lock, flags); |
| 654 | } |
| 655 | |
| 656 | void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf, |
| 657 | struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) |
| 658 | { |
| 659 | int rc; |
| 660 | struct qedf_els_cb_arg *cb_arg; |
| 661 | |
| 662 | cb_arg = io_req->cb_arg; |
| 663 | |
| 664 | /* If we timed out just free resources */ |
| 665 | if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) |
| 666 | goto free; |
| 667 | |
| 668 | /* Kill the timer we put on the request */ |
| 669 | cancel_delayed_work_sync(&io_req->timeout_work); |
| 670 | |
| 671 | rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl); |
| 672 | if (rc) |
| 673 | QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will " |
| 674 | "abort, xid=0x%x.\n", io_req->xid); |
| 675 | free: |
| 676 | kfree(cb_arg); |
| 677 | kref_put(&io_req->refcount, qedf_release_cmd); |
| 678 | } |
| 679 | |
| 680 | static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req) |
| 681 | { |
| 682 | struct qedf_rport *fcport; |
| 683 | struct qedf_ioreq *new_io_req; |
| 684 | unsigned long flags; |
| 685 | bool rc = false; |
| 686 | |
| 687 | fcport = orig_io_req->fcport; |
| 688 | if (!fcport) { |
| 689 | QEDF_ERR(NULL, "fcport is NULL.\n"); |
| 690 | goto out; |
| 691 | } |
| 692 | |
| 693 | if (!orig_io_req->sc_cmd) { |
| 694 | QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for " |
| 695 | "xid=0x%x.\n", orig_io_req->xid); |
| 696 | goto out; |
| 697 | } |
| 698 | |
| 699 | new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); |
| 700 | if (!new_io_req) { |
| 701 | QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new " |
| 702 | "io_req.\n"); |
| 703 | goto out; |
| 704 | } |
| 705 | |
| 706 | new_io_req->sc_cmd = orig_io_req->sc_cmd; |
| 707 | |
| 708 | /* |
| 709 | * This keeps the sc_cmd struct from being returned to the tape |
| 710 | * driver and being requeued twice. We do need to put a reference |
| 711 | * for the original I/O request since we will not do a SCSI completion |
| 712 | * for it. |
| 713 | */ |
| 714 | orig_io_req->sc_cmd = NULL; |
| 715 | kref_put(&orig_io_req->refcount, qedf_release_cmd); |
| 716 | |
| 717 | spin_lock_irqsave(&fcport->rport_lock, flags); |
| 718 | |
| 719 | /* kref for new command released in qedf_post_io_req on error */ |
| 720 | if (qedf_post_io_req(fcport, new_io_req)) { |
| 721 | QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n"); |
| 722 | /* Return SQE to pool */ |
| 723 | atomic_inc(&fcport->free_sqes); |
| 724 | } else { |
| 725 | QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, |
| 726 | "Reissued SCSI command from orig_xid=0x%x on " |
| 727 | "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid); |
| 728 | /* |
| 729 | * Abort the original I/O but do not return SCSI command as |
| 730 | * it has been reissued on another OX_ID. |
| 731 | */ |
| 732 | spin_unlock_irqrestore(&fcport->rport_lock, flags); |
| 733 | qedf_initiate_abts(orig_io_req, false); |
| 734 | goto out; |
| 735 | } |
| 736 | |
| 737 | spin_unlock_irqrestore(&fcport->rport_lock, flags); |
| 738 | out: |
| 739 | return rc; |
| 740 | } |
| 741 | |
| 742 | |
| 743 | static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) |
| 744 | { |
| 745 | struct qedf_ioreq *orig_io_req; |
| 746 | struct qedf_ioreq *rec_req; |
| 747 | struct qedf_mp_req *mp_req; |
| 748 | struct fc_frame_header *mp_fc_hdr, *fh; |
| 749 | struct fc_frame *fp; |
| 750 | void *resp_buf, *fc_payload; |
| 751 | u32 resp_len; |
| 752 | struct fc_lport *lport; |
| 753 | struct qedf_ctx *qedf; |
| 754 | int refcount; |
| 755 | enum fc_rctl r_ctl; |
| 756 | struct fc_els_ls_rjt *rjt; |
| 757 | struct fc_els_rec_acc *acc; |
| 758 | u8 opcode; |
| 759 | u32 offset, e_stat; |
| 760 | struct scsi_cmnd *sc_cmd; |
| 761 | bool srr_needed = false; |
| 762 | |
| 763 | rec_req = cb_arg->io_req; |
| 764 | qedf = rec_req->fcport->qedf; |
| 765 | lport = qedf->lport; |
| 766 | |
| 767 | orig_io_req = cb_arg->aborted_io_req; |
| 768 | |
| 769 | if (!orig_io_req) |
| 770 | goto out_free; |
| 771 | |
| 772 | if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO && |
| 773 | rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) |
| 774 | cancel_delayed_work_sync(&orig_io_req->timeout_work); |
| 775 | |
Dupuis, Chad | 1afca6b | 2017-02-23 07:01:03 -0800 | [diff] [blame] | 776 | refcount = kref_read(&orig_io_req->refcount); |
Dupuis, Chad | 61d8658 | 2017-02-15 06:28:23 -0800 | [diff] [blame] | 777 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," |
| 778 | " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", |
| 779 | orig_io_req, orig_io_req->xid, rec_req->xid, refcount); |
| 780 | |
| 781 | /* If a REC times out, free resources */ |
| 782 | if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) |
| 783 | goto out_free; |
| 784 | |
| 785 | /* Normalize response data into struct fc_frame */ |
| 786 | mp_req = &(rec_req->mp_req); |
| 787 | mp_fc_hdr = &(mp_req->resp_fc_hdr); |
| 788 | resp_len = mp_req->resp_len; |
| 789 | acc = resp_buf = mp_req->resp_buf; |
| 790 | |
| 791 | fp = fc_frame_alloc(lport, resp_len); |
| 792 | if (!fp) { |
| 793 | QEDF_ERR(&(qedf->dbg_ctx), |
| 794 | "fc_frame_alloc failure.\n"); |
| 795 | goto out_free; |
| 796 | } |
| 797 | |
| 798 | /* Copy frame header from firmware into fp */ |
| 799 | fh = (struct fc_frame_header *)fc_frame_header_get(fp); |
| 800 | memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); |
| 801 | |
| 802 | /* Copy payload from firmware into fp */ |
| 803 | fc_payload = fc_frame_payload_get(fp, resp_len); |
| 804 | memcpy(fc_payload, resp_buf, resp_len); |
| 805 | |
| 806 | opcode = fc_frame_payload_op(fp); |
| 807 | if (opcode == ELS_LS_RJT) { |
| 808 | rjt = fc_frame_payload_get(fp, sizeof(*rjt)); |
| 809 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, |
| 810 | "Received LS_RJT for REC: er_reason=0x%x, " |
| 811 | "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan); |
| 812 | /* |
| 813 | * The following response(s) mean that we need to reissue the |
| 814 | * request on another exchange. We need to do this without |
| 815 | * informing the upper layers lest it cause an application |
| 816 | * error. |
| 817 | */ |
| 818 | if ((rjt->er_reason == ELS_RJT_LOGIC || |
| 819 | rjt->er_reason == ELS_RJT_UNAB) && |
| 820 | rjt->er_explan == ELS_EXPL_OXID_RXID) { |
| 821 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, |
| 822 | "Handle CMD LOST case.\n"); |
| 823 | qedf_requeue_io_req(orig_io_req); |
| 824 | } |
| 825 | } else if (opcode == ELS_LS_ACC) { |
| 826 | offset = ntohl(acc->reca_fc4value); |
| 827 | e_stat = ntohl(acc->reca_e_stat); |
| 828 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, |
| 829 | "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n", |
| 830 | offset, e_stat); |
| 831 | if (e_stat & ESB_ST_SEQ_INIT) { |
| 832 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, |
| 833 | "Target has the seq init\n"); |
| 834 | goto out_free_frame; |
| 835 | } |
| 836 | sc_cmd = orig_io_req->sc_cmd; |
| 837 | if (!sc_cmd) { |
| 838 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, |
| 839 | "sc_cmd is NULL for xid=0x%x.\n", |
| 840 | orig_io_req->xid); |
| 841 | goto out_free_frame; |
| 842 | } |
| 843 | /* SCSI write case */ |
| 844 | if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { |
| 845 | if (offset == orig_io_req->data_xfer_len) { |
| 846 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, |
| 847 | "WRITE - response lost.\n"); |
| 848 | r_ctl = FC_RCTL_DD_CMD_STATUS; |
| 849 | srr_needed = true; |
| 850 | offset = 0; |
| 851 | } else { |
| 852 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, |
| 853 | "WRITE - XFER_RDY/DATA lost.\n"); |
| 854 | r_ctl = FC_RCTL_DD_DATA_DESC; |
| 855 | /* Use data from warning CQE instead of REC */ |
| 856 | offset = orig_io_req->tx_buf_off; |
| 857 | } |
| 858 | /* SCSI read case */ |
| 859 | } else { |
| 860 | if (orig_io_req->rx_buf_off == |
| 861 | orig_io_req->data_xfer_len) { |
| 862 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, |
| 863 | "READ - response lost.\n"); |
| 864 | srr_needed = true; |
| 865 | r_ctl = FC_RCTL_DD_CMD_STATUS; |
| 866 | offset = 0; |
| 867 | } else { |
| 868 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, |
| 869 | "READ - DATA lost.\n"); |
| 870 | /* |
| 871 | * For read case we always set the offset to 0 |
| 872 | * for sequence recovery task. |
| 873 | */ |
| 874 | offset = 0; |
| 875 | r_ctl = FC_RCTL_DD_SOL_DATA; |
| 876 | } |
| 877 | } |
| 878 | |
| 879 | if (srr_needed) |
| 880 | qedf_send_srr(orig_io_req, offset, r_ctl); |
| 881 | else |
| 882 | qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl); |
| 883 | } |
| 884 | |
| 885 | out_free_frame: |
| 886 | fc_frame_free(fp); |
| 887 | out_free: |
| 888 | /* Put reference for original command since REC completed */ |
| 889 | kref_put(&orig_io_req->refcount, qedf_release_cmd); |
| 890 | kfree(cb_arg); |
| 891 | } |
| 892 | |
| 893 | /* Assumes kref is already held by caller */ |
| 894 | int qedf_send_rec(struct qedf_ioreq *orig_io_req) |
| 895 | { |
| 896 | |
| 897 | struct fc_els_rec rec; |
| 898 | struct qedf_rport *fcport; |
| 899 | struct fc_lport *lport; |
| 900 | struct qedf_els_cb_arg *cb_arg = NULL; |
| 901 | struct qedf_ctx *qedf; |
| 902 | uint32_t sid; |
| 903 | uint32_t r_a_tov; |
| 904 | int rc; |
| 905 | |
| 906 | if (!orig_io_req) { |
| 907 | QEDF_ERR(NULL, "orig_io_req is NULL.\n"); |
| 908 | return -EINVAL; |
| 909 | } |
| 910 | |
| 911 | fcport = orig_io_req->fcport; |
| 912 | |
| 913 | /* Check that fcport is still offloaded */ |
| 914 | if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { |
| 915 | QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); |
| 916 | return -EINVAL; |
| 917 | } |
| 918 | |
| 919 | if (!fcport->qedf) { |
| 920 | QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); |
| 921 | return -EINVAL; |
| 922 | } |
| 923 | |
| 924 | /* Take reference until REC command completion */ |
| 925 | kref_get(&orig_io_req->refcount); |
| 926 | |
| 927 | qedf = fcport->qedf; |
| 928 | lport = qedf->lport; |
| 929 | sid = fcport->sid; |
| 930 | r_a_tov = lport->r_a_tov; |
| 931 | |
| 932 | memset(&rec, 0, sizeof(rec)); |
| 933 | |
| 934 | cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); |
| 935 | if (!cb_arg) { |
| 936 | QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " |
| 937 | "REC\n"); |
| 938 | rc = -ENOMEM; |
| 939 | goto rec_err; |
| 940 | } |
| 941 | |
| 942 | cb_arg->aborted_io_req = orig_io_req; |
| 943 | |
| 944 | rec.rec_cmd = ELS_REC; |
| 945 | hton24(rec.rec_s_id, sid); |
| 946 | rec.rec_ox_id = htons(orig_io_req->xid); |
| 947 | rec.rec_rx_id = |
| 948 | htons(orig_io_req->task->tstorm_st_context.read_write.rx_id); |
| 949 | |
| 950 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, " |
| 951 | "orig_xid=0x%x rx_id=0x%x\n", orig_io_req, |
| 952 | orig_io_req->xid, rec.rec_rx_id); |
| 953 | rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec), |
| 954 | qedf_rec_compl, cb_arg, r_a_tov); |
| 955 | |
| 956 | rec_err: |
| 957 | if (rc) { |
| 958 | QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req" |
| 959 | "=0x%x\n", orig_io_req->xid); |
| 960 | kfree(cb_arg); |
| 961 | kref_put(&orig_io_req->refcount, qedf_release_cmd); |
| 962 | } |
| 963 | return rc; |
| 964 | } |