Narsimhulu Musini | c8806b6 | 2015-05-29 01:04:01 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 Cisco Systems, Inc. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you may redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; version 2 of the License. |
| 7 | * |
| 8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 10 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 11 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 12 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 13 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 14 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 15 | * SOFTWARE. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/errno.h> |
| 19 | #include <linux/pci.h> |
| 20 | #include <linux/slab.h> |
| 21 | |
| 22 | #include <linux/interrupt.h> |
| 23 | #include <linux/workqueue.h> |
| 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/mempool.h> |
| 26 | #include <scsi/scsi_tcq.h> |
| 27 | |
| 28 | #include "snic_io.h" |
| 29 | #include "snic.h" |
| 30 | #include "cq_enet_desc.h" |
| 31 | #include "snic_fwint.h" |
| 32 | |
| 33 | static void |
| 34 | snic_wq_cmpl_frame_send(struct vnic_wq *wq, |
| 35 | struct cq_desc *cq_desc, |
| 36 | struct vnic_wq_buf *buf, |
| 37 | void *opaque) |
| 38 | { |
| 39 | struct snic *snic = svnic_dev_priv(wq->vdev); |
| 40 | |
| 41 | SNIC_BUG_ON(buf->os_buf == NULL); |
| 42 | |
| 43 | if (snic_log_level & SNIC_DESC_LOGGING) |
| 44 | SNIC_HOST_INFO(snic->shost, |
| 45 | "Ack received for snic_host_req %p.\n", |
| 46 | buf->os_buf); |
| 47 | |
| 48 | SNIC_TRC(snic->shost->host_no, 0, 0, |
| 49 | ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0, |
| 50 | 0); |
| 51 | pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); |
| 52 | buf->os_buf = NULL; |
| 53 | } |
| 54 | |
| 55 | static int |
| 56 | snic_wq_cmpl_handler_cont(struct vnic_dev *vdev, |
| 57 | struct cq_desc *cq_desc, |
| 58 | u8 type, |
| 59 | u16 q_num, |
| 60 | u16 cmpl_idx, |
| 61 | void *opaque) |
| 62 | { |
| 63 | struct snic *snic = svnic_dev_priv(vdev); |
| 64 | unsigned long flags; |
| 65 | |
| 66 | SNIC_BUG_ON(q_num != 0); |
| 67 | |
| 68 | spin_lock_irqsave(&snic->wq_lock[q_num], flags); |
| 69 | svnic_wq_service(&snic->wq[q_num], |
| 70 | cq_desc, |
| 71 | cmpl_idx, |
| 72 | snic_wq_cmpl_frame_send, |
| 73 | NULL); |
| 74 | spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); |
| 75 | |
| 76 | return 0; |
| 77 | } /* end of snic_cmpl_handler_cont */ |
| 78 | |
| 79 | int |
| 80 | snic_wq_cmpl_handler(struct snic *snic, int work_to_do) |
| 81 | { |
| 82 | unsigned int work_done = 0; |
| 83 | unsigned int i; |
| 84 | |
| 85 | snic->s_stats.misc.last_ack_time = jiffies; |
| 86 | for (i = 0; i < snic->wq_count; i++) { |
| 87 | work_done += svnic_cq_service(&snic->cq[i], |
| 88 | work_to_do, |
| 89 | snic_wq_cmpl_handler_cont, |
| 90 | NULL); |
| 91 | } |
| 92 | |
| 93 | return work_done; |
| 94 | } /* end of snic_wq_cmpl_handler */ |
| 95 | |
| 96 | void |
| 97 | snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) |
| 98 | { |
| 99 | |
| 100 | struct snic_host_req *req = buf->os_buf; |
| 101 | struct snic *snic = svnic_dev_priv(wq->vdev); |
| 102 | struct snic_req_info *rqi = NULL; |
| 103 | unsigned long flags; |
| 104 | |
| 105 | pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); |
| 106 | |
| 107 | rqi = req_to_rqi(req); |
| 108 | spin_lock_irqsave(&snic->spl_cmd_lock, flags); |
| 109 | if (list_empty(&rqi->list)) { |
| 110 | spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); |
| 111 | goto end; |
| 112 | } |
| 113 | |
| 114 | SNIC_BUG_ON(rqi->list.next == NULL); /* if not added to spl_cmd_list */ |
| 115 | list_del_init(&rqi->list); |
| 116 | spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); |
| 117 | |
| 118 | if (rqi->sge_va) { |
| 119 | snic_pci_unmap_rsp_buf(snic, rqi); |
| 120 | kfree((void *)rqi->sge_va); |
| 121 | rqi->sge_va = 0; |
| 122 | } |
| 123 | snic_req_free(snic, rqi); |
| 124 | SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n"); |
| 125 | |
| 126 | end: |
| 127 | return; |
| 128 | } |
| 129 | |
| 130 | /* Criteria to select work queue in multi queue mode */ |
| 131 | static int |
| 132 | snic_select_wq(struct snic *snic) |
| 133 | { |
| 134 | /* No multi queue support for now */ |
| 135 | BUILD_BUG_ON(SNIC_WQ_MAX > 1); |
| 136 | |
| 137 | return 0; |
| 138 | } |
| 139 | |
| 140 | int |
| 141 | snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) |
| 142 | { |
| 143 | dma_addr_t pa = 0; |
| 144 | unsigned long flags; |
| 145 | struct snic_fw_stats *fwstats = &snic->s_stats.fw; |
| 146 | long act_reqs; |
| 147 | int q_num = 0; |
| 148 | |
| 149 | snic_print_desc(__func__, os_buf, len); |
| 150 | |
| 151 | /* Map request buffer */ |
| 152 | pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE); |
| 153 | if (pci_dma_mapping_error(snic->pdev, pa)) { |
| 154 | SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n"); |
| 155 | |
| 156 | return -ENOMEM; |
| 157 | } |
| 158 | |
| 159 | q_num = snic_select_wq(snic); |
| 160 | |
| 161 | spin_lock_irqsave(&snic->wq_lock[q_num], flags); |
| 162 | if (!svnic_wq_desc_avail(snic->wq)) { |
| 163 | pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE); |
| 164 | spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); |
| 165 | atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); |
| 166 | SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no); |
| 167 | |
| 168 | return -ENOMEM; |
| 169 | } |
| 170 | |
| 171 | snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); |
| 172 | spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); |
| 173 | |
| 174 | /* Update stats */ |
| 175 | act_reqs = atomic64_inc_return(&fwstats->actv_reqs); |
| 176 | if (act_reqs > atomic64_read(&fwstats->max_actv_reqs)) |
| 177 | atomic64_set(&fwstats->max_actv_reqs, act_reqs); |
| 178 | |
| 179 | return 0; |
| 180 | } /* end of snic_queue_wq_desc() */ |
| 181 | |
| 182 | /* |
| 183 | * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list. |
| 184 | * Purpose : Used during driver unload to clean up the requests. |
| 185 | */ |
| 186 | void |
| 187 | snic_handle_untagged_req(struct snic *snic, struct snic_req_info *rqi) |
| 188 | { |
| 189 | unsigned long flags; |
| 190 | |
| 191 | INIT_LIST_HEAD(&rqi->list); |
| 192 | |
| 193 | spin_lock_irqsave(&snic->spl_cmd_lock, flags); |
| 194 | list_add_tail(&rqi->list, &snic->spl_cmd_list); |
| 195 | spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); |
| 196 | } |
| 197 | |
| 198 | /* |
| 199 | * snic_req_init: |
| 200 | * Allocates snic_req_info + snic_host_req + sgl data, and initializes. |
| 201 | */ |
| 202 | struct snic_req_info * |
| 203 | snic_req_init(struct snic *snic, int sg_cnt) |
| 204 | { |
| 205 | u8 typ; |
| 206 | struct snic_req_info *rqi = NULL; |
| 207 | |
| 208 | typ = (sg_cnt <= SNIC_REQ_CACHE_DFLT_SGL) ? |
| 209 | SNIC_REQ_CACHE_DFLT_SGL : SNIC_REQ_CACHE_MAX_SGL; |
| 210 | |
| 211 | rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC); |
| 212 | if (!rqi) { |
| 213 | atomic64_inc(&snic->s_stats.io.alloc_fail); |
| 214 | SNIC_HOST_ERR(snic->shost, |
| 215 | "Failed to allocate memory from snic req pool id = %d\n", |
| 216 | typ); |
| 217 | return rqi; |
| 218 | } |
| 219 | |
| 220 | memset(rqi, 0, sizeof(*rqi)); |
| 221 | rqi->rq_pool_type = typ; |
| 222 | rqi->start_time = jiffies; |
| 223 | rqi->req = (struct snic_host_req *) (rqi + 1); |
| 224 | rqi->req_len = sizeof(struct snic_host_req); |
| 225 | rqi->snic = snic; |
| 226 | |
| 227 | rqi->req = (struct snic_host_req *)(rqi + 1); |
| 228 | |
| 229 | if (sg_cnt == 0) |
| 230 | goto end; |
| 231 | |
| 232 | rqi->req_len += (sg_cnt * sizeof(struct snic_sg_desc)); |
| 233 | |
| 234 | if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl)) |
| 235 | atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt); |
| 236 | |
| 237 | SNIC_BUG_ON(sg_cnt > SNIC_MAX_SG_DESC_CNT); |
| 238 | atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]); |
| 239 | |
| 240 | end: |
| 241 | memset(rqi->req, 0, rqi->req_len); |
| 242 | |
| 243 | /* pre initialization of init_ctx to support req_to_rqi */ |
| 244 | rqi->req->hdr.init_ctx = (ulong) rqi; |
| 245 | |
| 246 | SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi); |
| 247 | |
| 248 | return rqi; |
| 249 | } /* end of snic_req_init */ |
| 250 | |
| 251 | /* |
| 252 | * snic_abort_req_init : Inits abort request. |
| 253 | */ |
| 254 | struct snic_host_req * |
| 255 | snic_abort_req_init(struct snic *snic, struct snic_req_info *rqi) |
| 256 | { |
| 257 | struct snic_host_req *req = NULL; |
| 258 | |
| 259 | SNIC_BUG_ON(!rqi); |
| 260 | |
| 261 | /* If abort to be issued second time, then reuse */ |
| 262 | if (rqi->abort_req) |
| 263 | return rqi->abort_req; |
| 264 | |
| 265 | |
| 266 | req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC); |
| 267 | if (!req) { |
| 268 | SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n"); |
| 269 | WARN_ON_ONCE(1); |
| 270 | |
| 271 | return NULL; |
| 272 | } |
| 273 | |
| 274 | rqi->abort_req = req; |
| 275 | memset(req, 0, sizeof(struct snic_host_req)); |
| 276 | /* pre initialization of init_ctx to support req_to_rqi */ |
| 277 | req->hdr.init_ctx = (ulong) rqi; |
| 278 | |
| 279 | return req; |
| 280 | } /* end of snic_abort_req_init */ |
| 281 | |
| 282 | /* |
| 283 | * snic_dr_req_init : Inits device reset req |
| 284 | */ |
| 285 | struct snic_host_req * |
| 286 | snic_dr_req_init(struct snic *snic, struct snic_req_info *rqi) |
| 287 | { |
| 288 | struct snic_host_req *req = NULL; |
| 289 | |
| 290 | SNIC_BUG_ON(!rqi); |
| 291 | |
| 292 | req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC); |
| 293 | if (!req) { |
| 294 | SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n"); |
| 295 | WARN_ON_ONCE(1); |
| 296 | |
| 297 | return NULL; |
| 298 | } |
| 299 | |
| 300 | SNIC_BUG_ON(rqi->dr_req != NULL); |
| 301 | rqi->dr_req = req; |
| 302 | memset(req, 0, sizeof(struct snic_host_req)); |
| 303 | /* pre initialization of init_ctx to support req_to_rqi */ |
| 304 | req->hdr.init_ctx = (ulong) rqi; |
| 305 | |
| 306 | return req; |
| 307 | } /* end of snic_dr_req_init */ |
| 308 | |
| 309 | /* frees snic_req_info and snic_host_req */ |
| 310 | void |
| 311 | snic_req_free(struct snic *snic, struct snic_req_info *rqi) |
| 312 | { |
| 313 | SNIC_BUG_ON(rqi->req == rqi->abort_req); |
| 314 | SNIC_BUG_ON(rqi->req == rqi->dr_req); |
| 315 | SNIC_BUG_ON(rqi->sge_va != 0); |
| 316 | |
| 317 | SNIC_SCSI_DBG(snic->shost, |
| 318 | "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n", |
| 319 | rqi, rqi->req, rqi->abort_req, rqi->dr_req); |
| 320 | |
| 321 | if (rqi->abort_req) |
| 322 | mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); |
| 323 | |
| 324 | if (rqi->dr_req) |
| 325 | mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); |
| 326 | |
| 327 | mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); |
| 328 | } |
| 329 | |
| 330 | void |
| 331 | snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi) |
| 332 | { |
| 333 | struct snic_sg_desc *sgd; |
| 334 | |
| 335 | sgd = req_to_sgl(rqi_to_req(rqi)); |
| 336 | SNIC_BUG_ON(sgd[0].addr == 0); |
| 337 | pci_unmap_single(snic->pdev, |
| 338 | le64_to_cpu(sgd[0].addr), |
| 339 | le32_to_cpu(sgd[0].len), |
| 340 | PCI_DMA_FROMDEVICE); |
| 341 | } |
| 342 | |
| 343 | /* |
| 344 | * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them. |
| 345 | */ |
| 346 | void |
| 347 | snic_free_all_untagged_reqs(struct snic *snic) |
| 348 | { |
| 349 | struct snic_req_info *rqi; |
| 350 | struct list_head *cur, *nxt; |
| 351 | unsigned long flags; |
| 352 | |
| 353 | spin_lock_irqsave(&snic->spl_cmd_lock, flags); |
| 354 | list_for_each_safe(cur, nxt, &snic->spl_cmd_list) { |
| 355 | rqi = list_entry(cur, struct snic_req_info, list); |
| 356 | list_del_init(&rqi->list); |
| 357 | if (rqi->sge_va) { |
| 358 | snic_pci_unmap_rsp_buf(snic, rqi); |
| 359 | kfree((void *)rqi->sge_va); |
| 360 | rqi->sge_va = 0; |
| 361 | } |
| 362 | |
| 363 | snic_req_free(snic, rqi); |
| 364 | } |
| 365 | spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); |
| 366 | } |
| 367 | |
| 368 | /* |
| 369 | * snic_release_untagged_req : Unlinks the untagged req and frees it. |
| 370 | */ |
| 371 | void |
| 372 | snic_release_untagged_req(struct snic *snic, struct snic_req_info *rqi) |
| 373 | { |
| 374 | unsigned long flags; |
| 375 | |
| 376 | spin_lock_irqsave(&snic->snic_lock, flags); |
| 377 | if (snic->in_remove) { |
| 378 | spin_unlock_irqrestore(&snic->snic_lock, flags); |
| 379 | goto end; |
| 380 | } |
| 381 | spin_unlock_irqrestore(&snic->snic_lock, flags); |
| 382 | |
| 383 | spin_lock_irqsave(&snic->spl_cmd_lock, flags); |
| 384 | if (list_empty(&rqi->list)) { |
| 385 | spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); |
| 386 | goto end; |
| 387 | } |
| 388 | list_del_init(&rqi->list); |
| 389 | spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); |
| 390 | snic_req_free(snic, rqi); |
| 391 | |
| 392 | end: |
| 393 | return; |
| 394 | } |
| 395 | |
| 396 | /* dump buf in hex fmt */ |
| 397 | void |
| 398 | snic_hex_dump(char *pfx, char *data, int len) |
| 399 | { |
| 400 | SNIC_INFO("%s Dumping Data of Len = %d\n", pfx, len); |
| 401 | print_hex_dump_bytes(pfx, DUMP_PREFIX_NONE, data, len); |
| 402 | } |
| 403 | |
| 404 | #define LINE_BUFSZ 128 /* for snic_print_desc fn */ |
| 405 | static void |
| 406 | snic_dump_desc(const char *fn, char *os_buf, int len) |
| 407 | { |
| 408 | struct snic_host_req *req = (struct snic_host_req *) os_buf; |
| 409 | struct snic_fw_req *fwreq = (struct snic_fw_req *) os_buf; |
| 410 | struct snic_req_info *rqi = NULL; |
| 411 | char line[LINE_BUFSZ] = { '\0' }; |
| 412 | char *cmd_str = NULL; |
| 413 | |
| 414 | if (req->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) |
| 415 | rqi = (struct snic_req_info *) fwreq->hdr.init_ctx; |
| 416 | else |
| 417 | rqi = (struct snic_req_info *) req->hdr.init_ctx; |
| 418 | |
| 419 | SNIC_BUG_ON(rqi == NULL || rqi->req == NULL); |
| 420 | switch (req->hdr.type) { |
| 421 | case SNIC_REQ_REPORT_TGTS: |
| 422 | cmd_str = "report-tgt : "; |
| 423 | snprintf(line, LINE_BUFSZ, "SNIC_REQ_REPORT_TGTS :"); |
| 424 | break; |
| 425 | |
| 426 | case SNIC_REQ_ICMND: |
| 427 | cmd_str = "icmnd : "; |
| 428 | snprintf(line, LINE_BUFSZ, "SNIC_REQ_ICMND : 0x%x :", |
| 429 | req->u.icmnd.cdb[0]); |
| 430 | break; |
| 431 | |
| 432 | case SNIC_REQ_ITMF: |
| 433 | cmd_str = "itmf : "; |
| 434 | snprintf(line, LINE_BUFSZ, "SNIC_REQ_ITMF :"); |
| 435 | break; |
| 436 | |
| 437 | case SNIC_REQ_HBA_RESET: |
| 438 | cmd_str = "hba reset :"; |
| 439 | snprintf(line, LINE_BUFSZ, "SNIC_REQ_HBA_RESET :"); |
| 440 | break; |
| 441 | |
| 442 | case SNIC_REQ_EXCH_VER: |
| 443 | cmd_str = "exch ver : "; |
| 444 | snprintf(line, LINE_BUFSZ, "SNIC_REQ_EXCH_VER :"); |
| 445 | break; |
| 446 | |
| 447 | case SNIC_REQ_TGT_INFO: |
| 448 | cmd_str = "tgt info : "; |
| 449 | break; |
| 450 | |
| 451 | case SNIC_RSP_REPORT_TGTS_CMPL: |
| 452 | cmd_str = "report tgt cmpl : "; |
| 453 | snprintf(line, LINE_BUFSZ, "SNIC_RSP_REPORT_TGTS_CMPL :"); |
| 454 | break; |
| 455 | |
| 456 | case SNIC_RSP_ICMND_CMPL: |
| 457 | cmd_str = "icmnd_cmpl : "; |
| 458 | snprintf(line, LINE_BUFSZ, "SNIC_RSP_ICMND_CMPL : 0x%x :", |
| 459 | rqi->req->u.icmnd.cdb[0]); |
| 460 | break; |
| 461 | |
| 462 | case SNIC_RSP_ITMF_CMPL: |
| 463 | cmd_str = "itmf_cmpl : "; |
| 464 | snprintf(line, LINE_BUFSZ, "SNIC_RSP_ITMF_CMPL :"); |
| 465 | break; |
| 466 | |
| 467 | case SNIC_RSP_HBA_RESET_CMPL: |
| 468 | cmd_str = "hba_reset_cmpl : "; |
| 469 | snprintf(line, LINE_BUFSZ, "SNIC_RSP_HBA_RESET_CMPL :"); |
| 470 | break; |
| 471 | |
| 472 | case SNIC_RSP_EXCH_VER_CMPL: |
| 473 | cmd_str = "exch_ver_cmpl : "; |
| 474 | snprintf(line, LINE_BUFSZ, "SNIC_RSP_EXCH_VER_CMPL :"); |
| 475 | break; |
| 476 | |
| 477 | case SNIC_MSG_ACK: |
| 478 | cmd_str = "msg ack : "; |
| 479 | snprintf(line, LINE_BUFSZ, "SNIC_MSG_ACK :"); |
| 480 | break; |
| 481 | |
| 482 | case SNIC_MSG_ASYNC_EVNOTIFY: |
| 483 | cmd_str = "async notify : "; |
| 484 | snprintf(line, LINE_BUFSZ, "SNIC_MSG_ASYNC_EVNOTIFY :"); |
| 485 | break; |
| 486 | |
| 487 | default: |
| 488 | cmd_str = "unknown : "; |
| 489 | SNIC_BUG_ON(1); |
| 490 | break; |
| 491 | } |
| 492 | |
| 493 | SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n", |
| 494 | fn, line, req->hdr.cmnd_id, req->hdr.sg_cnt, req->hdr.status, |
| 495 | req->hdr.init_ctx); |
| 496 | |
| 497 | /* Enable it, to dump byte stream */ |
| 498 | if (snic_log_level & 0x20) |
| 499 | snic_hex_dump(cmd_str, os_buf, len); |
| 500 | } /* end of __snic_print_desc */ |
| 501 | |
| 502 | void |
| 503 | snic_print_desc(const char *fn, char *os_buf, int len) |
| 504 | { |
| 505 | if (snic_log_level & SNIC_DESC_LOGGING) |
| 506 | snic_dump_desc(fn, os_buf, len); |
| 507 | } |
| 508 | |
| 509 | void |
| 510 | snic_calc_io_process_time(struct snic *snic, struct snic_req_info *rqi) |
| 511 | { |
| 512 | u64 duration; |
| 513 | |
| 514 | duration = jiffies - rqi->start_time; |
| 515 | |
| 516 | if (duration > atomic64_read(&snic->s_stats.io.max_time)) |
| 517 | atomic64_set(&snic->s_stats.io.max_time, duration); |
| 518 | } |