David Somayajulu | afaf5a2 | 2006-09-19 10:28:00 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * QLogic iSCSI HBA Driver |
| 3 | * Copyright (c) 2003-2006 QLogic Corporation |
| 4 | * |
| 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
| 6 | */ |
| 7 | |
| 8 | #include "ql4_def.h" |
| 9 | |
| 10 | #include <scsi/scsi_tcq.h> |
| 11 | |
| 12 | /** |
| 13 | * qla4xxx_get_req_pkt - returns a valid entry in request queue. |
| 14 | * @ha: Pointer to host adapter structure. |
| 15 | * @queue_entry: Pointer to pointer to queue entry structure |
| 16 | * |
| 17 | * This routine performs the following tasks: |
| 18 | * - returns the current request_in pointer (if queue not full) |
| 19 | * - advances the request_in pointer |
| 20 | * - checks for queue full |
| 21 | **/ |
| 22 | int qla4xxx_get_req_pkt(struct scsi_qla_host *ha, |
| 23 | struct queue_entry **queue_entry) |
| 24 | { |
| 25 | uint16_t request_in; |
| 26 | uint8_t status = QLA_SUCCESS; |
| 27 | |
| 28 | *queue_entry = ha->request_ptr; |
| 29 | |
| 30 | /* get the latest request_in and request_out index */ |
| 31 | request_in = ha->request_in; |
| 32 | ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out); |
| 33 | |
| 34 | /* Advance request queue pointer and check for queue full */ |
| 35 | if (request_in == (REQUEST_QUEUE_DEPTH - 1)) { |
| 36 | request_in = 0; |
| 37 | ha->request_ptr = ha->request_ring; |
| 38 | } else { |
| 39 | request_in++; |
| 40 | ha->request_ptr++; |
| 41 | } |
| 42 | |
| 43 | /* request queue is full, try again later */ |
| 44 | if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) { |
| 45 | /* restore request pointer */ |
| 46 | ha->request_ptr = *queue_entry; |
| 47 | status = QLA_ERROR; |
| 48 | } else { |
| 49 | ha->request_in = request_in; |
| 50 | memset(*queue_entry, 0, sizeof(**queue_entry)); |
| 51 | } |
| 52 | |
| 53 | return status; |
| 54 | } |
| 55 | |
| 56 | /** |
| 57 | * qla4xxx_send_marker_iocb - issues marker iocb to HBA |
| 58 | * @ha: Pointer to host adapter structure. |
| 59 | * @ddb_entry: Pointer to device database entry |
| 60 | * @lun: SCSI LUN |
| 61 | * @marker_type: marker identifier |
| 62 | * |
| 63 | * This routine issues a marker IOCB. |
| 64 | **/ |
| 65 | int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, |
| 66 | struct ddb_entry *ddb_entry, int lun) |
| 67 | { |
| 68 | struct marker_entry *marker_entry; |
| 69 | unsigned long flags = 0; |
| 70 | uint8_t status = QLA_SUCCESS; |
| 71 | |
| 72 | /* Acquire hardware specific lock */ |
| 73 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 74 | |
| 75 | /* Get pointer to the queue entry for the marker */ |
| 76 | if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) != |
| 77 | QLA_SUCCESS) { |
| 78 | status = QLA_ERROR; |
| 79 | goto exit_send_marker; |
| 80 | } |
| 81 | |
| 82 | /* Put the marker in the request queue */ |
| 83 | marker_entry->hdr.entryType = ET_MARKER; |
| 84 | marker_entry->hdr.entryCount = 1; |
| 85 | marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index); |
| 86 | marker_entry->modifier = cpu_to_le16(MM_LUN_RESET); |
| 87 | int_to_scsilun(lun, &marker_entry->lun); |
| 88 | wmb(); |
| 89 | |
| 90 | /* Tell ISP it's got a new I/O request */ |
| 91 | writel(ha->request_in, &ha->reg->req_q_in); |
| 92 | readl(&ha->reg->req_q_in); |
| 93 | |
| 94 | exit_send_marker: |
| 95 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 96 | return status; |
| 97 | } |
| 98 | |
| 99 | struct continuation_t1_entry* qla4xxx_alloc_cont_entry( |
| 100 | struct scsi_qla_host *ha) |
| 101 | { |
| 102 | struct continuation_t1_entry *cont_entry; |
| 103 | |
| 104 | cont_entry = (struct continuation_t1_entry *)ha->request_ptr; |
| 105 | |
| 106 | /* Advance request queue pointer */ |
| 107 | if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) { |
| 108 | ha->request_in = 0; |
| 109 | ha->request_ptr = ha->request_ring; |
| 110 | } else { |
| 111 | ha->request_in++; |
| 112 | ha->request_ptr++; |
| 113 | } |
| 114 | |
| 115 | /* Load packet defaults */ |
| 116 | cont_entry->hdr.entryType = ET_CONTINUE; |
| 117 | cont_entry->hdr.entryCount = 1; |
| 118 | cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in); |
| 119 | |
| 120 | return cont_entry; |
| 121 | } |
| 122 | |
| 123 | uint16_t qla4xxx_calc_request_entries(uint16_t dsds) |
| 124 | { |
| 125 | uint16_t iocbs; |
| 126 | |
| 127 | iocbs = 1; |
| 128 | if (dsds > COMMAND_SEG) { |
| 129 | iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG; |
| 130 | if ((dsds - COMMAND_SEG) % CONTINUE_SEG) |
| 131 | iocbs++; |
| 132 | } |
| 133 | return iocbs; |
| 134 | } |
| 135 | |
| 136 | void qla4xxx_build_scsi_iocbs(struct srb *srb, |
| 137 | struct command_t3_entry *cmd_entry, |
| 138 | uint16_t tot_dsds) |
| 139 | { |
| 140 | struct scsi_qla_host *ha; |
| 141 | uint16_t avail_dsds; |
| 142 | struct data_seg_a64 *cur_dsd; |
| 143 | struct scsi_cmnd *cmd; |
| 144 | |
| 145 | cmd = srb->cmd; |
| 146 | ha = srb->ha; |
| 147 | |
| 148 | if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { |
| 149 | /* No data being transferred */ |
| 150 | cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0); |
| 151 | return; |
| 152 | } |
| 153 | |
| 154 | avail_dsds = COMMAND_SEG; |
| 155 | cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]); |
| 156 | |
| 157 | /* Load data segments */ |
| 158 | if (cmd->use_sg) { |
| 159 | struct scatterlist *cur_seg; |
| 160 | struct scatterlist *end_seg; |
| 161 | |
| 162 | cur_seg = (struct scatterlist *)cmd->request_buffer; |
| 163 | end_seg = cur_seg + tot_dsds; |
| 164 | while (cur_seg < end_seg) { |
| 165 | dma_addr_t sle_dma; |
| 166 | |
| 167 | /* Allocate additional continuation packets? */ |
| 168 | if (avail_dsds == 0) { |
| 169 | struct continuation_t1_entry *cont_entry; |
| 170 | |
| 171 | cont_entry = qla4xxx_alloc_cont_entry(ha); |
| 172 | cur_dsd = |
| 173 | (struct data_seg_a64 *) |
| 174 | &cont_entry->dataseg[0]; |
| 175 | avail_dsds = CONTINUE_SEG; |
| 176 | } |
| 177 | |
| 178 | sle_dma = sg_dma_address(cur_seg); |
| 179 | cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma)); |
| 180 | cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma)); |
| 181 | cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg)); |
| 182 | avail_dsds--; |
| 183 | |
| 184 | cur_dsd++; |
| 185 | cur_seg++; |
| 186 | } |
| 187 | } else { |
| 188 | cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle)); |
| 189 | cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle)); |
| 190 | cur_dsd->count = cpu_to_le32(cmd->request_bufflen); |
| 191 | } |
| 192 | } |
| 193 | |
| 194 | /** |
| 195 | * qla4xxx_send_command_to_isp - issues command to HBA |
| 196 | * @ha: pointer to host adapter structure. |
| 197 | * @srb: pointer to SCSI Request Block to be sent to ISP |
| 198 | * |
| 199 | * This routine is called by qla4xxx_queuecommand to build an ISP |
| 200 | * command and pass it to the ISP for execution. |
| 201 | **/ |
| 202 | int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) |
| 203 | { |
| 204 | struct scsi_cmnd *cmd = srb->cmd; |
| 205 | struct ddb_entry *ddb_entry; |
| 206 | struct command_t3_entry *cmd_entry; |
| 207 | struct scatterlist *sg = NULL; |
| 208 | |
| 209 | uint16_t tot_dsds; |
| 210 | uint16_t req_cnt; |
| 211 | |
| 212 | unsigned long flags; |
| 213 | uint16_t cnt; |
| 214 | uint32_t index; |
| 215 | char tag[2]; |
| 216 | |
| 217 | /* Get real lun and adapter */ |
| 218 | ddb_entry = srb->ddb; |
| 219 | |
| 220 | /* Send marker(s) if needed. */ |
| 221 | if (ha->marker_needed == 1) { |
| 222 | if (qla4xxx_send_marker_iocb(ha, ddb_entry, |
| 223 | cmd->device->lun) != QLA_SUCCESS) |
| 224 | return QLA_ERROR; |
| 225 | |
| 226 | ha->marker_needed = 0; |
| 227 | } |
| 228 | tot_dsds = 0; |
| 229 | |
| 230 | /* Acquire hardware specific lock */ |
| 231 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 232 | |
| 233 | index = (uint32_t)cmd->request->tag; |
| 234 | |
| 235 | /* Calculate the number of request entries needed. */ |
| 236 | if (cmd->use_sg) { |
| 237 | sg = (struct scatterlist *)cmd->request_buffer; |
| 238 | tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, |
| 239 | cmd->sc_data_direction); |
| 240 | if (tot_dsds == 0) |
| 241 | goto queuing_error; |
| 242 | } else if (cmd->request_bufflen) { |
| 243 | dma_addr_t req_dma; |
| 244 | |
| 245 | req_dma = pci_map_single(ha->pdev, cmd->request_buffer, |
| 246 | cmd->request_bufflen, |
| 247 | cmd->sc_data_direction); |
| 248 | if (dma_mapping_error(req_dma)) |
| 249 | goto queuing_error; |
| 250 | |
| 251 | srb->dma_handle = req_dma; |
| 252 | tot_dsds = 1; |
| 253 | } |
| 254 | req_cnt = qla4xxx_calc_request_entries(tot_dsds); |
| 255 | |
| 256 | if (ha->req_q_count < (req_cnt + 2)) { |
| 257 | cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out); |
| 258 | if (ha->request_in < cnt) |
| 259 | ha->req_q_count = cnt - ha->request_in; |
| 260 | else |
| 261 | ha->req_q_count = REQUEST_QUEUE_DEPTH - |
| 262 | (ha->request_in - cnt); |
| 263 | } |
| 264 | |
| 265 | if (ha->req_q_count < (req_cnt + 2)) |
| 266 | goto queuing_error; |
| 267 | |
| 268 | /* total iocbs active */ |
| 269 | if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH) |
| 270 | goto queuing_error; |
| 271 | |
| 272 | /* Build command packet */ |
| 273 | cmd_entry = (struct command_t3_entry *) ha->request_ptr; |
| 274 | memset(cmd_entry, 0, sizeof(struct command_t3_entry)); |
| 275 | cmd_entry->hdr.entryType = ET_COMMAND; |
| 276 | cmd_entry->handle = cpu_to_le32(index); |
| 277 | cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index); |
| 278 | cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id); |
| 279 | |
| 280 | int_to_scsilun(cmd->device->lun, &cmd_entry->lun); |
| 281 | cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn); |
| 282 | cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen); |
| 283 | memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len); |
| 284 | cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds); |
| 285 | cmd_entry->hdr.entryCount = req_cnt; |
| 286 | |
| 287 | /* Set data transfer direction control flags |
| 288 | * NOTE: Look at data_direction bits iff there is data to be |
| 289 | * transferred, as the data direction bit is sometimed filled |
| 290 | * in when there is no data to be transferred */ |
| 291 | cmd_entry->control_flags = CF_NO_DATA; |
| 292 | if (cmd->request_bufflen) { |
| 293 | if (cmd->sc_data_direction == DMA_TO_DEVICE) |
| 294 | cmd_entry->control_flags = CF_WRITE; |
| 295 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) |
| 296 | cmd_entry->control_flags = CF_READ; |
| 297 | } |
| 298 | |
| 299 | /* Set tagged queueing control flags */ |
| 300 | cmd_entry->control_flags |= CF_SIMPLE_TAG; |
| 301 | if (scsi_populate_tag_msg(cmd, tag)) |
| 302 | switch (tag[0]) { |
| 303 | case MSG_HEAD_TAG: |
| 304 | cmd_entry->control_flags |= CF_HEAD_TAG; |
| 305 | break; |
| 306 | case MSG_ORDERED_TAG: |
| 307 | cmd_entry->control_flags |= CF_ORDERED_TAG; |
| 308 | break; |
| 309 | } |
| 310 | |
| 311 | |
| 312 | /* Advance request queue pointer */ |
| 313 | ha->request_in++; |
| 314 | if (ha->request_in == REQUEST_QUEUE_DEPTH) { |
| 315 | ha->request_in = 0; |
| 316 | ha->request_ptr = ha->request_ring; |
| 317 | } else |
| 318 | ha->request_ptr++; |
| 319 | |
| 320 | |
| 321 | qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds); |
| 322 | wmb(); |
| 323 | |
| 324 | /* |
| 325 | * Check to see if adapter is online before placing request on |
| 326 | * request queue. If a reset occurs and a request is in the queue, |
| 327 | * the firmware will still attempt to process the request, retrieving |
| 328 | * garbage for pointers. |
| 329 | */ |
| 330 | if (!test_bit(AF_ONLINE, &ha->flags)) { |
| 331 | DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! " |
| 332 | "Do not issue command.\n", |
| 333 | ha->host_no, __func__)); |
| 334 | goto queuing_error; |
| 335 | } |
| 336 | |
| 337 | srb->cmd->host_scribble = (unsigned char *)srb; |
| 338 | |
| 339 | /* update counters */ |
| 340 | srb->state = SRB_ACTIVE_STATE; |
| 341 | srb->flags |= SRB_DMA_VALID; |
| 342 | |
| 343 | /* Track IOCB used */ |
| 344 | ha->iocb_cnt += req_cnt; |
| 345 | srb->iocb_cnt = req_cnt; |
| 346 | ha->req_q_count -= req_cnt; |
| 347 | |
| 348 | /* Debug print statements */ |
| 349 | writel(ha->request_in, &ha->reg->req_q_in); |
| 350 | readl(&ha->reg->req_q_in); |
| 351 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 352 | |
| 353 | return QLA_SUCCESS; |
| 354 | |
| 355 | queuing_error: |
| 356 | |
| 357 | if (cmd->use_sg && tot_dsds) { |
| 358 | sg = (struct scatterlist *) cmd->request_buffer; |
| 359 | pci_unmap_sg(ha->pdev, sg, cmd->use_sg, |
| 360 | cmd->sc_data_direction); |
| 361 | } else if (tot_dsds) |
| 362 | pci_unmap_single(ha->pdev, srb->dma_handle, |
| 363 | cmd->request_bufflen, cmd->sc_data_direction); |
| 364 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 365 | |
| 366 | return QLA_ERROR; |
| 367 | } |
| 368 | |