blob: f0edb07f3198f5597ddd09a41ee148e35e7d025c [file] [log] [blame]
Andrew Vasquezfa90c542005-10-27 11:10:08 -07001/*
2 * QLogic Fibre Channel HBA Driver
Armen Baloyanbd21eaf2014-04-11 16:54:24 -04003 * Copyright (c) 2003-2014 QLogic Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Andrew Vasquezfa90c542005-10-27 11:10:08 -07005 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "qla_def.h"
Nicholas Bellinger2d70c102012-05-15 14:34:28 -04008#include "qla_target.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
10#include <linux/blkdev.h>
11#include <linux/delay.h>
12
13#include <scsi/scsi_tcq.h>
14
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -070015static void qla25xx_set_que(srb_t *, struct rsp_que **);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016/**
17 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18 * @cmd: SCSI command
19 *
20 * Returns the proper CF_* direction based on CDB.
21 */
22static inline uint16_t
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070023qla2x00_get_cmd_direction(srb_t *sp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070024{
25 uint16_t cflags;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080026 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -040027 struct scsi_qla_host *vha = sp->fcport->vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29 cflags = 0;
30
31 /* Set transfer direction */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080032 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 cflags = CF_WRITE;
Saurav Kashyap2be21fa2012-05-15 14:34:16 -040034 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -040035 vha->qla_stats.output_requests++;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080036 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 cflags = CF_READ;
Saurav Kashyap2be21fa2012-05-15 14:34:16 -040038 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -040039 vha->qla_stats.input_requests++;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070040 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 return (cflags);
42}
43
44/**
45 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
46 * Continuation Type 0 IOCBs to allocate.
47 *
48 * @dsds: number of data segment decriptors needed
49 *
50 * Returns the number of IOCB entries needed to store @dsds.
51 */
52uint16_t
53qla2x00_calc_iocbs_32(uint16_t dsds)
54{
55 uint16_t iocbs;
56
57 iocbs = 1;
58 if (dsds > 3) {
59 iocbs += (dsds - 3) / 7;
60 if ((dsds - 3) % 7)
61 iocbs++;
62 }
63 return (iocbs);
64}
65
66/**
67 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
68 * Continuation Type 1 IOCBs to allocate.
69 *
70 * @dsds: number of data segment decriptors needed
71 *
72 * Returns the number of IOCB entries needed to store @dsds.
73 */
74uint16_t
75qla2x00_calc_iocbs_64(uint16_t dsds)
76{
77 uint16_t iocbs;
78
79 iocbs = 1;
80 if (dsds > 2) {
81 iocbs += (dsds - 2) / 5;
82 if ((dsds - 2) % 5)
83 iocbs++;
84 }
85 return (iocbs);
86}
87
88/**
89 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90 * @ha: HA context
91 *
92 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 */
94static inline cont_entry_t *
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070095qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
97 cont_entry_t *cont_pkt;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070098 struct req_que *req = vha->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800100 req->ring_index++;
101 if (req->ring_index == req->length) {
102 req->ring_index = 0;
103 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800105 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 }
107
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800108 cont_pkt = (cont_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110 /* Load packet defaults. */
111 *((uint32_t *)(&cont_pkt->entry_type)) =
112 __constant_cpu_to_le32(CONTINUE_TYPE);
113
114 return (cont_pkt);
115}
116
117/**
118 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119 * @ha: HA context
120 *
121 * Returns a pointer to the continuation type 1 IOCB packet.
122 */
123static inline cont_a64_entry_t *
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800124qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 cont_a64_entry_t *cont_pkt;
127
128 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800129 req->ring_index++;
130 if (req->ring_index == req->length) {
131 req->ring_index = 0;
132 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800134 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 }
136
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800137 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139 /* Load packet defaults. */
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400140 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
141 __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
143
144 return (cont_pkt);
145}
146
Arun Easibad75002010-05-04 15:01:30 -0700147static inline int
148qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
149{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800150 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
151 uint8_t guard = scsi_host_get_guard(cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -0700152
Arun Easibad75002010-05-04 15:01:30 -0700153 /* We always use DIFF Bundling for best performance */
154 *fw_prot_opts = 0;
155
156 /* Translate SCSI opcode to a protection opcode */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800157 switch (scsi_get_prot_op(cmd)) {
Arun Easibad75002010-05-04 15:01:30 -0700158 case SCSI_PROT_READ_STRIP:
159 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
160 break;
161 case SCSI_PROT_WRITE_INSERT:
162 *fw_prot_opts |= PO_MODE_DIF_INSERT;
163 break;
164 case SCSI_PROT_READ_INSERT:
165 *fw_prot_opts |= PO_MODE_DIF_INSERT;
166 break;
167 case SCSI_PROT_WRITE_STRIP:
168 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
169 break;
170 case SCSI_PROT_READ_PASS:
Arun Easibad75002010-05-04 15:01:30 -0700171 case SCSI_PROT_WRITE_PASS:
Arun Easi9e522cd2012-08-22 14:21:31 -0400172 if (guard & SHOST_DIX_GUARD_IP)
173 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
174 else
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
Arun Easibad75002010-05-04 15:01:30 -0700176 break;
177 default: /* Normal Request */
178 *fw_prot_opts |= PO_MODE_DIF_PASS;
179 break;
180 }
181
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800182 return scsi_prot_sg_count(cmd);
Arun Easibad75002010-05-04 15:01:30 -0700183}
184
185/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
187 * capable IOCB types.
188 *
189 * @sp: SRB command to process
190 * @cmd_pkt: Command type 2 IOCB
191 * @tot_dsds: Total number of segments to transfer
192 */
193void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
194 uint16_t tot_dsds)
195{
196 uint16_t avail_dsds;
197 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800198 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900200 struct scatterlist *sg;
201 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800203 cmd = GET_CMD_SP(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
205 /* Update entry type to indicate Command Type 2 IOCB */
206 *((uint32_t *)(&cmd_pkt->entry_type)) =
207 __constant_cpu_to_le32(COMMAND_TYPE);
208
209 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900210 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
212 return;
213 }
214
Andrew Vasquez444786d2009-01-05 11:18:10 -0800215 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700216 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 /* Three DSDs are available in the Command Type 2 IOCB */
219 avail_dsds = 3;
220 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
221
222 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900223 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
224 cont_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900226 /* Allocate additional continuation packets? */
227 if (avail_dsds == 0) {
228 /*
229 * Seven DSDs are available in the Continuation
230 * Type 0 IOCB.
231 */
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700232 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900233 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
234 avail_dsds = 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900236
237 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
238 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
239 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 }
241}
242
243/**
244 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
245 * capable IOCB types.
246 *
247 * @sp: SRB command to process
248 * @cmd_pkt: Command type 3 IOCB
249 * @tot_dsds: Total number of segments to transfer
250 */
251void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
252 uint16_t tot_dsds)
253{
254 uint16_t avail_dsds;
255 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800256 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900258 struct scatterlist *sg;
259 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800261 cmd = GET_CMD_SP(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263 /* Update entry type to indicate Command Type 3 IOCB */
264 *((uint32_t *)(&cmd_pkt->entry_type)) =
265 __constant_cpu_to_le32(COMMAND_A64_TYPE);
266
267 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900268 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
270 return;
271 }
272
Andrew Vasquez444786d2009-01-05 11:18:10 -0800273 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700274 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276 /* Two DSDs are available in the Command Type 3 IOCB */
277 avail_dsds = 2;
278 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
279
280 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900281 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
282 dma_addr_t sle_dma;
283 cont_a64_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900285 /* Allocate additional continuation packets? */
286 if (avail_dsds == 0) {
287 /*
288 * Five DSDs are available in the Continuation
289 * Type 1 IOCB.
290 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800291 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900292 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
293 avail_dsds = 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900295
296 sle_dma = sg_dma_address(sg);
297 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
298 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
299 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
300 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 }
302}
303
304/**
305 * qla2x00_start_scsi() - Send a SCSI command to the ISP
306 * @sp: command to send to the ISP
307 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700308 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 */
310int
311qla2x00_start_scsi(srb_t *sp)
312{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900313 int ret, nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 unsigned long flags;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800315 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 struct scsi_cmnd *cmd;
317 uint32_t *clr_ptr;
318 uint32_t index;
319 uint32_t handle;
320 cmd_entry_t *cmd_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 uint16_t cnt;
322 uint16_t req_cnt;
323 uint16_t tot_dsds;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700324 struct device_reg_2xxx __iomem *reg;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800325 struct qla_hw_data *ha;
326 struct req_que *req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800327 struct rsp_que *rsp;
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800328 char tag[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330 /* Setup device pointers. */
331 ret = 0;
Andrew Vasquez444786d2009-01-05 11:18:10 -0800332 vha = sp->fcport->vha;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800333 ha = vha->hw;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700334 reg = &ha->iobase->isp;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800335 cmd = GET_CMD_SP(sp);
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800336 req = ha->req_q_map[0];
337 rsp = ha->rsp_q_map[0];
83021922005-04-17 15:10:41 -0500338 /* So we know we haven't pci_map'ed anything yet */
339 tot_dsds = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800342 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700343 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
344 QLA_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 return (QLA_FUNCTION_FAILED);
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700346 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800347 vha->marker_needed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 }
349
350 /* Acquire ring specific lock */
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700351 spin_lock_irqsave(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
353 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800354 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -0500355 for (index = 1; index < req->num_outstanding_cmds; index++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -0500357 if (handle == req->num_outstanding_cmds)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800359 if (!req->outstanding_cmds[handle])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 break;
361 }
Chad Dupuis8d93f552013-01-30 03:34:37 -0500362 if (index == req->num_outstanding_cmds)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 goto queuing_error;
364
83021922005-04-17 15:10:41 -0500365 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -0700366 if (scsi_sg_count(cmd)) {
367 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
368 scsi_sg_count(cmd), cmd->sc_data_direction);
369 if (unlikely(!nseg))
370 goto queuing_error;
371 } else
372 nseg = 0;
373
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900374 tot_dsds = nseg;
83021922005-04-17 15:10:41 -0500375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 /* Calculate the number of request entries needed. */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700377 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800378 if (req->cnt < (req_cnt + 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800380 if (req->ring_index < cnt)
381 req->cnt = cnt - req->ring_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800383 req->cnt = req->length -
384 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -0400385 /* If still no head room then bail out */
386 if (req->cnt < (req_cnt + 2))
387 goto queuing_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 /* Build command packet */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800391 req->current_outstanding_cmd = handle;
392 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -0700393 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800394 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800395 req->cnt -= req_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800397 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 cmd_pkt->handle = handle;
399 /* Zero out remaining portion of packet. */
400 clr_ptr = (uint32_t *)cmd_pkt + 2;
401 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
bdf79622005-04-17 15:06:53 -0500404 /* Set target ID and LUN number*/
405 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800406 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 /* Update tagged queuing modifier */
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800409 if (scsi_populate_tag_msg(cmd, tag)) {
410 switch (tag[0]) {
411 case HEAD_OF_QUEUE_TAG:
412 cmd_pkt->control_flags =
413 __constant_cpu_to_le16(CF_HEAD_TAG);
414 break;
415 case ORDERED_QUEUE_TAG:
416 cmd_pkt->control_flags =
417 __constant_cpu_to_le16(CF_ORDERED_TAG);
418 break;
419 default:
420 cmd_pkt->control_flags =
421 __constant_cpu_to_le16(CF_SIMPLE_TAG);
422 break;
423 }
Saurav Kashyapc3ccb1d2013-07-12 14:47:51 -0400424 } else {
425 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800426 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 /* Load SCSI command packet. */
429 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900430 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
432 /* Build IOCB segments */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700433 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
435 /* Set total data segment count. */
436 cmd_pkt->entry_count = (uint8_t)req_cnt;
437 wmb();
438
439 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800440 req->ring_index++;
441 if (req->ring_index == req->length) {
442 req->ring_index = 0;
443 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800445 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 sp->flags |= SRB_DMA_VALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449 /* Set chip new ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800450 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
452
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700453 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800454 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800455 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
456 qla2x00_process_response_queue(rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700457
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700458 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 return (QLA_SUCCESS);
460
461queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900462 if (tot_dsds)
463 scsi_dma_unmap(cmd);
464
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700465 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
467 return (QLA_FUNCTION_FAILED);
468}
469
470/**
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800471 * qla2x00_start_iocbs() - Execute the IOCB command
472 */
Nicholas Bellinger2d70c102012-05-15 14:34:28 -0400473void
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800474qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
475{
476 struct qla_hw_data *ha = vha->hw;
477 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800478
Atul Deshmukh7ec0eff2013-08-27 01:37:28 -0400479 if (IS_P3P_TYPE(ha)) {
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800480 qla82xx_start_iocbs(vha);
481 } else {
482 /* Adjust ring index. */
483 req->ring_index++;
484 if (req->ring_index == req->length) {
485 req->ring_index = 0;
486 req->ring_ptr = req->ring;
487 } else
488 req->ring_ptr++;
489
490 /* Set chip new ring index. */
Chad Dupuisf73cb692014-02-26 04:15:06 -0500491 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
Giridhar Malavali6246b8a2012-02-09 11:15:34 -0800492 WRT_REG_DWORD(req->req_q_in, req->ring_index);
Arun Easi98878a12012-02-09 11:15:59 -0800493 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400494 } else if (IS_QLAFX00(ha)) {
495 WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
496 RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
497 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800498 } else if (IS_FWI2_CAPABLE(ha)) {
499 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
500 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
501 } else {
502 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
503 req->ring_index);
504 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
505 }
506 }
507}
508
509/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 * qla2x00_marker() - Send a marker IOCB to the firmware.
511 * @ha: HA context
512 * @loop_id: loop ID
513 * @lun: LUN
514 * @type: marker modifier
515 *
516 * Can be called from both normal and interrupt context.
517 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700518 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 */
Andrew Vasquez3dbe7562010-07-23 15:28:37 +0500520static int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800521__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
522 struct rsp_que *rsp, uint16_t loop_id,
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200523 uint64_t lun, uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524{
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700525 mrk_entry_t *mrk;
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400526 struct mrk_entry_24xx *mrk24 = NULL;
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400527
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800528 struct qla_hw_data *ha = vha->hw;
529 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
Giridhar Malavali99b82122011-11-18 09:03:17 -0800531 req = ha->req_q_map[0];
Saurav Kashyapfa492632012-11-21 02:40:29 -0500532 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700533 if (mrk == NULL) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700534 ql_log(ql_log_warn, base_vha, 0x3026,
535 "Failed to allocate Marker IOCB.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
537 return (QLA_FUNCTION_FAILED);
538 }
539
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700540 mrk->entry_type = MARKER_TYPE;
541 mrk->modifier = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 if (type != MK_SYNC_ALL) {
Armen Baloyanbfd73342014-02-26 04:15:07 -0500543 if (IS_FWI2_CAPABLE(ha)) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700544 mrk24 = (struct mrk_entry_24xx *) mrk;
545 mrk24->nport_handle = cpu_to_le16(loop_id);
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200546 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
Shyam Sundarb797b6d2006-08-01 13:48:13 -0700547 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800548 mrk24->vp_index = vha->vp_idx;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -0700549 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700550 } else {
551 SET_TARGET_ID(ha, mrk->target, loop_id);
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200552 mrk->lun = cpu_to_le16((uint16_t)lun);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 }
555 wmb();
556
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800557 qla2x00_start_iocbs(vha, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
559 return (QLA_SUCCESS);
560}
561
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -0700562int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800563qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200564 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800565 uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566{
567 int ret;
568 unsigned long flags = 0;
569
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800570 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
571 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
572 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
574 return (ret);
575}
576
Nicholas Bellinger2d70c102012-05-15 14:34:28 -0400577/*
578 * qla2x00_issue_marker
579 *
580 * Issue marker
581 * Caller CAN have hardware lock held as specified by ha_locked parameter.
582 * Might release it, then reaquire.
583 */
584int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
585{
586 if (ha_locked) {
587 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
588 MK_SYNC_ALL) != QLA_SUCCESS)
589 return QLA_FUNCTION_FAILED;
590 } else {
591 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
592 MK_SYNC_ALL) != QLA_SUCCESS)
593 return QLA_FUNCTION_FAILED;
594 }
595 vha->marker_needed = 0;
596
597 return QLA_SUCCESS;
598}
599
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800600static inline int
601qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
602 uint16_t tot_dsds)
603{
604 uint32_t *cur_dsd = NULL;
605 scsi_qla_host_t *vha;
606 struct qla_hw_data *ha;
607 struct scsi_cmnd *cmd;
608 struct scatterlist *cur_seg;
609 uint32_t *dsd_seg;
610 void *next_dsd;
611 uint8_t avail_dsds;
612 uint8_t first_iocb = 1;
613 uint32_t dsd_list_len;
614 struct dsd_dma *dsd_ptr;
615 struct ct6_dsd *ctx;
616
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800617 cmd = GET_CMD_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800618
619 /* Update entry type to indicate Command Type 3 IOCB */
620 *((uint32_t *)(&cmd_pkt->entry_type)) =
621 __constant_cpu_to_le32(COMMAND_TYPE_6);
622
623 /* No data transfer */
624 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
625 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
626 return 0;
627 }
628
629 vha = sp->fcport->vha;
630 ha = vha->hw;
631
632 /* Set transfer direction */
633 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
634 cmd_pkt->control_flags =
635 __constant_cpu_to_le16(CF_WRITE_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400636 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -0400637 vha->qla_stats.output_requests++;
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800638 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
639 cmd_pkt->control_flags =
640 __constant_cpu_to_le16(CF_READ_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400641 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -0400642 vha->qla_stats.input_requests++;
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800643 }
644
645 cur_seg = scsi_sglist(cmd);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800646 ctx = GET_CMD_CTX_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800647
648 while (tot_dsds) {
649 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
650 QLA_DSDS_PER_IOCB : tot_dsds;
651 tot_dsds -= avail_dsds;
652 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
653
654 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
655 struct dsd_dma, list);
656 next_dsd = dsd_ptr->dsd_addr;
657 list_del(&dsd_ptr->list);
658 ha->gbl_dsd_avail--;
659 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
660 ctx->dsd_use_cnt++;
661 ha->gbl_dsd_inuse++;
662
663 if (first_iocb) {
664 first_iocb = 0;
665 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
666 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
667 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
668 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
669 } else {
670 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
671 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
672 *cur_dsd++ = cpu_to_le32(dsd_list_len);
673 }
674 cur_dsd = (uint32_t *)next_dsd;
675 while (avail_dsds) {
676 dma_addr_t sle_dma;
677
678 sle_dma = sg_dma_address(cur_seg);
679 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
680 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
681 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
682 cur_seg = sg_next(cur_seg);
683 avail_dsds--;
684 }
685 }
686
687 /* Null termination */
688 *cur_dsd++ = 0;
689 *cur_dsd++ = 0;
690 *cur_dsd++ = 0;
691 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
692 return 0;
693}
694
695/*
696 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
697 * for Command Type 6.
698 *
699 * @dsds: number of data segment decriptors needed
700 *
701 * Returns the number of dsd list needed to store @dsds.
702 */
703inline uint16_t
704qla24xx_calc_dsd_lists(uint16_t dsds)
705{
706 uint16_t dsd_lists = 0;
707
708 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
709 if (dsds % QLA_DSDS_PER_IOCB)
710 dsd_lists++;
711 return dsd_lists;
712}
713
714
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700715/**
716 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
717 * IOCB types.
718 *
719 * @sp: SRB command to process
720 * @cmd_pkt: Command type 3 IOCB
721 * @tot_dsds: Total number of segments to transfer
722 */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700723inline void
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700724qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
725 uint16_t tot_dsds)
726{
727 uint16_t avail_dsds;
728 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800729 scsi_qla_host_t *vha;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700730 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900731 struct scatterlist *sg;
732 int i;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800733 struct req_que *req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700734
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800735 cmd = GET_CMD_SP(sp);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700736
737 /* Update entry type to indicate Command Type 3 IOCB */
738 *((uint32_t *)(&cmd_pkt->entry_type)) =
739 __constant_cpu_to_le32(COMMAND_TYPE_7);
740
741 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900742 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700743 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
744 return;
745 }
746
Andrew Vasquez444786d2009-01-05 11:18:10 -0800747 vha = sp->fcport->vha;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700748 req = vha->req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700749
750 /* Set transfer direction */
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700751 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700752 cmd_pkt->task_mgmt_flags =
753 __constant_cpu_to_le16(TMF_WRITE_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400754 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -0400755 vha->qla_stats.output_requests++;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700756 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700757 cmd_pkt->task_mgmt_flags =
758 __constant_cpu_to_le16(TMF_READ_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400759 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -0400760 vha->qla_stats.input_requests++;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700761 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700762
763 /* One DSD is available in the Command Type 3 IOCB */
764 avail_dsds = 1;
765 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
766
767 /* Load data segments */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700768
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900769 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
770 dma_addr_t sle_dma;
771 cont_a64_entry_t *cont_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700772
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900773 /* Allocate additional continuation packets? */
774 if (avail_dsds == 0) {
775 /*
776 * Five DSDs are available in the Continuation
777 * Type 1 IOCB.
778 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800779 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900780 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
781 avail_dsds = 5;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700782 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900783
784 sle_dma = sg_dma_address(sg);
785 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
786 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
787 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
788 avail_dsds--;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700789 }
790}
791
Arun Easibad75002010-05-04 15:01:30 -0700792struct fw_dif_context {
793 uint32_t ref_tag;
794 uint16_t app_tag;
795 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
796 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
797};
798
799/*
800 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
801 *
802 */
803static inline void
Arun Easie02587d2011-08-16 11:29:23 -0700804qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
Arun Easibad75002010-05-04 15:01:30 -0700805 unsigned int protcnt)
806{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800807 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -0700808
809 switch (scsi_get_prot_type(cmd)) {
Arun Easibad75002010-05-04 15:01:30 -0700810 case SCSI_PROT_DIF_TYPE0:
Arun Easi8cb20492011-08-16 11:29:22 -0700811 /*
812 * No check for ql2xenablehba_err_chk, as it would be an
813 * I/O error if hba tag generation is not done.
814 */
815 pkt->ref_tag = cpu_to_le32((uint32_t)
816 (0xffffffff & scsi_get_lba(cmd)));
Arun Easie02587d2011-08-16 11:29:23 -0700817
818 if (!qla2x00_hba_err_chk_enabled(sp))
819 break;
820
Arun Easi8cb20492011-08-16 11:29:22 -0700821 pkt->ref_tag_mask[0] = 0xff;
822 pkt->ref_tag_mask[1] = 0xff;
823 pkt->ref_tag_mask[2] = 0xff;
824 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700825 break;
826
827 /*
828 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
829 * match LBA in CDB + N
830 */
831 case SCSI_PROT_DIF_TYPE2:
Arun Easie02587d2011-08-16 11:29:23 -0700832 pkt->app_tag = __constant_cpu_to_le16(0);
833 pkt->app_tag_mask[0] = 0x0;
834 pkt->app_tag_mask[1] = 0x0;
Arun Easi0c470872010-07-23 15:28:38 +0500835
836 pkt->ref_tag = cpu_to_le32((uint32_t)
837 (0xffffffff & scsi_get_lba(cmd)));
838
Arun Easie02587d2011-08-16 11:29:23 -0700839 if (!qla2x00_hba_err_chk_enabled(sp))
840 break;
841
Arun Easi0c470872010-07-23 15:28:38 +0500842 /* enable ALL bytes of the ref tag */
843 pkt->ref_tag_mask[0] = 0xff;
844 pkt->ref_tag_mask[1] = 0xff;
845 pkt->ref_tag_mask[2] = 0xff;
846 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700847 break;
848
849 /* For Type 3 protection: 16 bit GUARD only */
850 case SCSI_PROT_DIF_TYPE3:
851 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
852 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
853 0x00;
854 break;
855
856 /*
857 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
858 * 16 bit app tag.
859 */
860 case SCSI_PROT_DIF_TYPE1:
Arun Easie02587d2011-08-16 11:29:23 -0700861 pkt->ref_tag = cpu_to_le32((uint32_t)
862 (0xffffffff & scsi_get_lba(cmd)));
863 pkt->app_tag = __constant_cpu_to_le16(0);
864 pkt->app_tag_mask[0] = 0x0;
865 pkt->app_tag_mask[1] = 0x0;
866
867 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -0700868 break;
869
Arun Easibad75002010-05-04 15:01:30 -0700870 /* enable ALL bytes of the ref tag */
871 pkt->ref_tag_mask[0] = 0xff;
872 pkt->ref_tag_mask[1] = 0xff;
873 pkt->ref_tag_mask[2] = 0xff;
874 pkt->ref_tag_mask[3] = 0xff;
875 break;
876 }
Arun Easibad75002010-05-04 15:01:30 -0700877}
878
Arun Easi8cb20492011-08-16 11:29:22 -0700879struct qla2_sgx {
880 dma_addr_t dma_addr; /* OUT */
881 uint32_t dma_len; /* OUT */
Arun Easibad75002010-05-04 15:01:30 -0700882
Arun Easi8cb20492011-08-16 11:29:22 -0700883 uint32_t tot_bytes; /* IN */
884 struct scatterlist *cur_sg; /* IN */
885
886 /* for book keeping, bzero on initial invocation */
887 uint32_t bytes_consumed;
888 uint32_t num_bytes;
889 uint32_t tot_partial;
890
891 /* for debugging */
892 uint32_t num_sg;
893 srb_t *sp;
894};
895
896static int
897qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
898 uint32_t *partial)
899{
900 struct scatterlist *sg;
901 uint32_t cumulative_partial, sg_len;
902 dma_addr_t sg_dma_addr;
903
904 if (sgx->num_bytes == sgx->tot_bytes)
905 return 0;
906
907 sg = sgx->cur_sg;
908 cumulative_partial = sgx->tot_partial;
909
910 sg_dma_addr = sg_dma_address(sg);
911 sg_len = sg_dma_len(sg);
912
913 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
914
915 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
916 sgx->dma_len = (blk_sz - cumulative_partial);
917 sgx->tot_partial = 0;
918 sgx->num_bytes += blk_sz;
919 *partial = 0;
920 } else {
921 sgx->dma_len = sg_len - sgx->bytes_consumed;
922 sgx->tot_partial += sgx->dma_len;
923 *partial = 1;
924 }
925
926 sgx->bytes_consumed += sgx->dma_len;
927
928 if (sg_len == sgx->bytes_consumed) {
929 sg = sg_next(sg);
930 sgx->num_sg++;
931 sgx->cur_sg = sg;
932 sgx->bytes_consumed = 0;
933 }
934
935 return 1;
936}
937
Quinn Tranf83adb62014-04-11 16:54:43 -0400938int
Arun Easi8cb20492011-08-16 11:29:22 -0700939qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
Quinn Tranf83adb62014-04-11 16:54:43 -0400940 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
Arun Easi8cb20492011-08-16 11:29:22 -0700941{
942 void *next_dsd;
943 uint8_t avail_dsds = 0;
944 uint32_t dsd_list_len;
945 struct dsd_dma *dsd_ptr;
946 struct scatterlist *sg_prot;
947 uint32_t *cur_dsd = dsd;
948 uint16_t used_dsds = tot_dsds;
949
Quinn Tranf83adb62014-04-11 16:54:43 -0400950 uint32_t prot_int; /* protection interval */
Arun Easi8cb20492011-08-16 11:29:22 -0700951 uint32_t partial;
952 struct qla2_sgx sgx;
953 dma_addr_t sle_dma;
954 uint32_t sle_dma_len, tot_prot_dma_len = 0;
Quinn Tranf83adb62014-04-11 16:54:43 -0400955 struct scsi_cmnd *cmd;
956 struct scsi_qla_host *vha;
Arun Easi8cb20492011-08-16 11:29:22 -0700957
958 memset(&sgx, 0, sizeof(struct qla2_sgx));
Quinn Tranf83adb62014-04-11 16:54:43 -0400959 if (sp) {
960 vha = sp->fcport->vha;
961 cmd = GET_CMD_SP(sp);
962 prot_int = cmd->device->sector_size;
Arun Easi8cb20492011-08-16 11:29:22 -0700963
Quinn Tranf83adb62014-04-11 16:54:43 -0400964 sgx.tot_bytes = scsi_bufflen(cmd);
965 sgx.cur_sg = scsi_sglist(cmd);
966 sgx.sp = sp;
967
968 sg_prot = scsi_prot_sglist(cmd);
969 } else if (tc) {
970 vha = tc->vha;
971 prot_int = tc->blk_sz;
972 sgx.tot_bytes = tc->bufflen;
973 sgx.cur_sg = tc->sg;
974 sg_prot = tc->prot_sg;
975 } else {
976 BUG();
977 return 1;
978 }
Arun Easi8cb20492011-08-16 11:29:22 -0700979
980 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
981
982 sle_dma = sgx.dma_addr;
983 sle_dma_len = sgx.dma_len;
984alloc_and_fill:
985 /* Allocate additional continuation packets? */
986 if (avail_dsds == 0) {
987 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
988 QLA_DSDS_PER_IOCB : used_dsds;
989 dsd_list_len = (avail_dsds + 1) * 12;
990 used_dsds -= avail_dsds;
991
992 /* allocate tracking DS */
993 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
994 if (!dsd_ptr)
995 return 1;
996
997 /* allocate new list */
998 dsd_ptr->dsd_addr = next_dsd =
999 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1000 &dsd_ptr->dsd_list_dma);
1001
1002 if (!next_dsd) {
1003 /*
1004 * Need to cleanup only this dsd_ptr, rest
1005 * will be done by sp_free_dma()
1006 */
1007 kfree(dsd_ptr);
1008 return 1;
1009 }
1010
Quinn Tranf83adb62014-04-11 16:54:43 -04001011 if (sp) {
1012 list_add_tail(&dsd_ptr->list,
1013 &((struct crc_context *)
1014 sp->u.scmd.ctx)->dsd_list);
Arun Easi8cb20492011-08-16 11:29:22 -07001015
Quinn Tranf83adb62014-04-11 16:54:43 -04001016 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1017 } else {
1018 list_add_tail(&dsd_ptr->list,
1019 &(tc->ctx->dsd_list));
1020 tc->ctx_dsd_alloced = 1;
1021 }
1022
Arun Easi8cb20492011-08-16 11:29:22 -07001023
1024 /* add new list to cmd iocb or last list */
1025 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1026 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1027 *cur_dsd++ = dsd_list_len;
1028 cur_dsd = (uint32_t *)next_dsd;
1029 }
1030 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1031 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1032 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1033 avail_dsds--;
1034
1035 if (partial == 0) {
1036 /* Got a full protection interval */
1037 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1038 sle_dma_len = 8;
1039
1040 tot_prot_dma_len += sle_dma_len;
1041 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1042 tot_prot_dma_len = 0;
1043 sg_prot = sg_next(sg_prot);
1044 }
1045
1046 partial = 1; /* So as to not re-enter this block */
1047 goto alloc_and_fill;
1048 }
1049 }
1050 /* Null termination */
1051 *cur_dsd++ = 0;
1052 *cur_dsd++ = 0;
1053 *cur_dsd++ = 0;
1054 return 0;
1055}
Giridhar Malavali5162cf02011-11-18 09:03:18 -08001056
Quinn Tranf83adb62014-04-11 16:54:43 -04001057int
Arun Easibad75002010-05-04 15:01:30 -07001058qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
Quinn Tranf83adb62014-04-11 16:54:43 -04001059 uint16_t tot_dsds, struct qla_tgt_cmd *tc)
Arun Easibad75002010-05-04 15:01:30 -07001060{
1061 void *next_dsd;
1062 uint8_t avail_dsds = 0;
1063 uint32_t dsd_list_len;
1064 struct dsd_dma *dsd_ptr;
Quinn Tranf83adb62014-04-11 16:54:43 -04001065 struct scatterlist *sg, *sgl;
Arun Easibad75002010-05-04 15:01:30 -07001066 uint32_t *cur_dsd = dsd;
1067 int i;
1068 uint16_t used_dsds = tot_dsds;
Quinn Tranf83adb62014-04-11 16:54:43 -04001069 struct scsi_cmnd *cmd;
1070 struct scsi_qla_host *vha;
Arun Easibad75002010-05-04 15:01:30 -07001071
Quinn Tranf83adb62014-04-11 16:54:43 -04001072 if (sp) {
1073 cmd = GET_CMD_SP(sp);
1074 sgl = scsi_sglist(cmd);
1075 vha = sp->fcport->vha;
1076 } else if (tc) {
1077 sgl = tc->sg;
1078 vha = tc->vha;
1079 } else {
1080 BUG();
1081 return 1;
1082 }
1083
1084
1085 for_each_sg(sgl, sg, tot_dsds, i) {
Arun Easibad75002010-05-04 15:01:30 -07001086 dma_addr_t sle_dma;
1087
1088 /* Allocate additional continuation packets? */
1089 if (avail_dsds == 0) {
1090 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1091 QLA_DSDS_PER_IOCB : used_dsds;
1092 dsd_list_len = (avail_dsds + 1) * 12;
1093 used_dsds -= avail_dsds;
1094
1095 /* allocate tracking DS */
1096 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1097 if (!dsd_ptr)
1098 return 1;
1099
1100 /* allocate new list */
1101 dsd_ptr->dsd_addr = next_dsd =
1102 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1103 &dsd_ptr->dsd_list_dma);
1104
1105 if (!next_dsd) {
1106 /*
1107 * Need to cleanup only this dsd_ptr, rest
1108 * will be done by sp_free_dma()
1109 */
1110 kfree(dsd_ptr);
1111 return 1;
1112 }
1113
Quinn Tranf83adb62014-04-11 16:54:43 -04001114 if (sp) {
1115 list_add_tail(&dsd_ptr->list,
1116 &((struct crc_context *)
1117 sp->u.scmd.ctx)->dsd_list);
Arun Easibad75002010-05-04 15:01:30 -07001118
Quinn Tranf83adb62014-04-11 16:54:43 -04001119 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1120 } else {
1121 list_add_tail(&dsd_ptr->list,
1122 &(tc->ctx->dsd_list));
1123 tc->ctx_dsd_alloced = 1;
1124 }
Arun Easibad75002010-05-04 15:01:30 -07001125
1126 /* add new list to cmd iocb or last list */
1127 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1128 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1129 *cur_dsd++ = dsd_list_len;
1130 cur_dsd = (uint32_t *)next_dsd;
1131 }
1132 sle_dma = sg_dma_address(sg);
Arun Easi9e522cd2012-08-22 14:21:31 -04001133
Arun Easibad75002010-05-04 15:01:30 -07001134 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1135 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1136 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1137 avail_dsds--;
1138
Arun Easibad75002010-05-04 15:01:30 -07001139 }
1140 /* Null termination */
1141 *cur_dsd++ = 0;
1142 *cur_dsd++ = 0;
1143 *cur_dsd++ = 0;
1144 return 0;
1145}
1146
Quinn Tranf83adb62014-04-11 16:54:43 -04001147int
Arun Easibad75002010-05-04 15:01:30 -07001148qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
Quinn Tranf83adb62014-04-11 16:54:43 -04001149 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
Arun Easibad75002010-05-04 15:01:30 -07001150{
1151 void *next_dsd;
1152 uint8_t avail_dsds = 0;
1153 uint32_t dsd_list_len;
1154 struct dsd_dma *dsd_ptr;
Quinn Tranf83adb62014-04-11 16:54:43 -04001155 struct scatterlist *sg, *sgl;
Arun Easibad75002010-05-04 15:01:30 -07001156 int i;
1157 struct scsi_cmnd *cmd;
1158 uint32_t *cur_dsd = dsd;
Quinn Tranf83adb62014-04-11 16:54:43 -04001159 uint16_t used_dsds = tot_dsds;
1160 struct scsi_qla_host *vha;
Arun Easibad75002010-05-04 15:01:30 -07001161
Quinn Tranf83adb62014-04-11 16:54:43 -04001162 if (sp) {
1163 cmd = GET_CMD_SP(sp);
1164 sgl = scsi_prot_sglist(cmd);
1165 vha = sp->fcport->vha;
1166 } else if (tc) {
1167 vha = tc->vha;
1168 sgl = tc->prot_sg;
1169 } else {
1170 BUG();
1171 return 1;
1172 }
1173
1174 ql_dbg(ql_dbg_tgt, vha, 0xe021,
1175 "%s: enter\n", __func__);
1176
1177 for_each_sg(sgl, sg, tot_dsds, i) {
Arun Easibad75002010-05-04 15:01:30 -07001178 dma_addr_t sle_dma;
1179
1180 /* Allocate additional continuation packets? */
1181 if (avail_dsds == 0) {
1182 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1183 QLA_DSDS_PER_IOCB : used_dsds;
1184 dsd_list_len = (avail_dsds + 1) * 12;
1185 used_dsds -= avail_dsds;
1186
1187 /* allocate tracking DS */
1188 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1189 if (!dsd_ptr)
1190 return 1;
1191
1192 /* allocate new list */
1193 dsd_ptr->dsd_addr = next_dsd =
1194 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1195 &dsd_ptr->dsd_list_dma);
1196
1197 if (!next_dsd) {
1198 /*
1199 * Need to cleanup only this dsd_ptr, rest
1200 * will be done by sp_free_dma()
1201 */
1202 kfree(dsd_ptr);
1203 return 1;
1204 }
1205
Quinn Tranf83adb62014-04-11 16:54:43 -04001206 if (sp) {
1207 list_add_tail(&dsd_ptr->list,
1208 &((struct crc_context *)
1209 sp->u.scmd.ctx)->dsd_list);
Arun Easibad75002010-05-04 15:01:30 -07001210
Quinn Tranf83adb62014-04-11 16:54:43 -04001211 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1212 } else {
1213 list_add_tail(&dsd_ptr->list,
1214 &(tc->ctx->dsd_list));
1215 tc->ctx_dsd_alloced = 1;
1216 }
Arun Easibad75002010-05-04 15:01:30 -07001217
1218 /* add new list to cmd iocb or last list */
1219 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1220 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1221 *cur_dsd++ = dsd_list_len;
1222 cur_dsd = (uint32_t *)next_dsd;
1223 }
1224 sle_dma = sg_dma_address(sg);
Arun Easi9e522cd2012-08-22 14:21:31 -04001225
Arun Easibad75002010-05-04 15:01:30 -07001226 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1227 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1228 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1229
Arun Easibad75002010-05-04 15:01:30 -07001230 avail_dsds--;
1231 }
1232 /* Null termination */
1233 *cur_dsd++ = 0;
1234 *cur_dsd++ = 0;
1235 *cur_dsd++ = 0;
1236 return 0;
1237}
1238
1239/**
1240 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1241 * Type 6 IOCB types.
1242 *
1243 * @sp: SRB command to process
1244 * @cmd_pkt: Command type 3 IOCB
1245 * @tot_dsds: Total number of segments to transfer
1246 */
1247static inline int
1248qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1249 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1250{
1251 uint32_t *cur_dsd, *fcp_dl;
1252 scsi_qla_host_t *vha;
1253 struct scsi_cmnd *cmd;
Arun Easibad75002010-05-04 15:01:30 -07001254 int sgc;
Arun Easi8cb20492011-08-16 11:29:22 -07001255 uint32_t total_bytes = 0;
Arun Easibad75002010-05-04 15:01:30 -07001256 uint32_t data_bytes;
1257 uint32_t dif_bytes;
1258 uint8_t bundling = 1;
1259 uint16_t blk_size;
1260 uint8_t *clr_ptr;
1261 struct crc_context *crc_ctx_pkt = NULL;
1262 struct qla_hw_data *ha;
1263 uint8_t additional_fcpcdb_len;
1264 uint16_t fcp_cmnd_len;
1265 struct fcp_cmnd *fcp_cmnd;
1266 dma_addr_t crc_ctx_dma;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001267 char tag[2];
Arun Easibad75002010-05-04 15:01:30 -07001268
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001269 cmd = GET_CMD_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -07001270
1271 sgc = 0;
1272 /* Update entry type to indicate Command Type CRC_2 IOCB */
1273 *((uint32_t *)(&cmd_pkt->entry_type)) =
1274 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1275
Arun Easibad75002010-05-04 15:01:30 -07001276 vha = sp->fcport->vha;
1277 ha = vha->hw;
1278
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001279 /* No data transfer */
1280 data_bytes = scsi_bufflen(cmd);
1281 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1282 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1283 return QLA_SUCCESS;
1284 }
Arun Easibad75002010-05-04 15:01:30 -07001285
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04001286 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
Arun Easibad75002010-05-04 15:01:30 -07001287
1288 /* Set transfer direction */
1289 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1290 cmd_pkt->control_flags =
1291 __constant_cpu_to_le16(CF_WRITE_DATA);
1292 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1293 cmd_pkt->control_flags =
1294 __constant_cpu_to_le16(CF_READ_DATA);
1295 }
1296
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001297 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1298 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1299 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1300 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
Arun Easibad75002010-05-04 15:01:30 -07001301 bundling = 0;
1302
1303 /* Allocate CRC context from global pool */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001304 crc_ctx_pkt = sp->u.scmd.ctx =
1305 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
Arun Easibad75002010-05-04 15:01:30 -07001306
1307 if (!crc_ctx_pkt)
1308 goto crc_queuing_error;
1309
1310 /* Zero out CTX area. */
1311 clr_ptr = (uint8_t *)crc_ctx_pkt;
1312 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1313
1314 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1315
1316 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1317
1318 /* Set handle */
1319 crc_ctx_pkt->handle = cmd_pkt->handle;
1320
1321 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1322
Arun Easie02587d2011-08-16 11:29:23 -07001323 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
Arun Easibad75002010-05-04 15:01:30 -07001324 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1325
1326 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1327 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1328 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1329
1330 /* Determine SCSI command length -- align to 4 byte boundary */
1331 if (cmd->cmd_len > 16) {
Arun Easibad75002010-05-04 15:01:30 -07001332 additional_fcpcdb_len = cmd->cmd_len - 16;
1333 if ((cmd->cmd_len % 4) != 0) {
1334 /* SCSI cmd > 16 bytes must be multiple of 4 */
1335 goto crc_queuing_error;
1336 }
1337 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1338 } else {
1339 additional_fcpcdb_len = 0;
1340 fcp_cmnd_len = 12 + 16 + 4;
1341 }
1342
1343 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1344
1345 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1346 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1347 fcp_cmnd->additional_cdb_len |= 1;
1348 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1349 fcp_cmnd->additional_cdb_len |= 2;
1350
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001351 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
Arun Easibad75002010-05-04 15:01:30 -07001352 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1353 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1354 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1355 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1356 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1357 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
Uwe Kleine-König65155b32010-06-11 12:17:01 +02001358 fcp_cmnd->task_management = 0;
Arun Easibad75002010-05-04 15:01:30 -07001359
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001360 /*
1361 * Update tagged queuing modifier if using command tag queuing
1362 */
1363 if (scsi_populate_tag_msg(cmd, tag)) {
1364 switch (tag[0]) {
1365 case HEAD_OF_QUEUE_TAG:
1366 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1367 break;
1368 case ORDERED_QUEUE_TAG:
1369 fcp_cmnd->task_attribute = TSK_ORDERED;
1370 break;
1371 default:
Saurav Kashyapc3ccb1d2013-07-12 14:47:51 -04001372 fcp_cmnd->task_attribute = TSK_SIMPLE;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001373 break;
1374 }
1375 } else {
Saurav Kashyapc3ccb1d2013-07-12 14:47:51 -04001376 fcp_cmnd->task_attribute = TSK_SIMPLE;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001377 }
1378
Arun Easibad75002010-05-04 15:01:30 -07001379 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1380
Arun Easibad75002010-05-04 15:01:30 -07001381 /* Compute dif len and adjust data len to incude protection */
Arun Easibad75002010-05-04 15:01:30 -07001382 dif_bytes = 0;
1383 blk_size = cmd->device->sector_size;
Arun Easi8cb20492011-08-16 11:29:22 -07001384 dif_bytes = (data_bytes / blk_size) * 8;
1385
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001386 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
Arun Easi8cb20492011-08-16 11:29:22 -07001387 case SCSI_PROT_READ_INSERT:
1388 case SCSI_PROT_WRITE_STRIP:
1389 total_bytes = data_bytes;
1390 data_bytes += dif_bytes;
1391 break;
1392
1393 case SCSI_PROT_READ_STRIP:
1394 case SCSI_PROT_WRITE_INSERT:
1395 case SCSI_PROT_READ_PASS:
1396 case SCSI_PROT_WRITE_PASS:
1397 total_bytes = data_bytes + dif_bytes;
1398 break;
1399 default:
1400 BUG();
Arun Easibad75002010-05-04 15:01:30 -07001401 }
1402
Arun Easie02587d2011-08-16 11:29:23 -07001403 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -07001404 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
Arun Easi9e522cd2012-08-22 14:21:31 -04001405 /* HBA error checking enabled */
1406 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1407 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1408 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1409 SCSI_PROT_DIF_TYPE2))
1410 fw_prot_opts |= BIT_10;
1411 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1412 SCSI_PROT_DIF_TYPE3)
1413 fw_prot_opts |= BIT_11;
1414 }
Arun Easibad75002010-05-04 15:01:30 -07001415
1416 if (!bundling) {
1417 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1418 } else {
1419 /*
1420 * Configure Bundling if we need to fetch interlaving
1421 * protection PCI accesses
1422 */
1423 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1424 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1425 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1426 tot_prot_dsds);
1427 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1428 }
1429
1430 /* Finish the common fields of CRC pkt */
1431 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1432 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1433 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1434 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1435 /* Fibre channel byte count */
1436 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1437 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1438 additional_fcpcdb_len);
1439 *fcp_dl = htonl(total_bytes);
1440
Arun Easi0c470872010-07-23 15:28:38 +05001441 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
Arun Easi0c470872010-07-23 15:28:38 +05001442 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1443 return QLA_SUCCESS;
1444 }
Arun Easibad75002010-05-04 15:01:30 -07001445 /* Walks data segments */
1446
1447 cmd_pkt->control_flags |=
1448 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
Arun Easi8cb20492011-08-16 11:29:22 -07001449
1450 if (!bundling && tot_prot_dsds) {
1451 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
Quinn Tranf83adb62014-04-11 16:54:43 -04001452 cur_dsd, tot_dsds, NULL))
Arun Easi8cb20492011-08-16 11:29:22 -07001453 goto crc_queuing_error;
1454 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
Quinn Tranf83adb62014-04-11 16:54:43 -04001455 (tot_dsds - tot_prot_dsds), NULL))
Arun Easibad75002010-05-04 15:01:30 -07001456 goto crc_queuing_error;
1457
1458 if (bundling && tot_prot_dsds) {
1459 /* Walks dif segments */
Arun Easibad75002010-05-04 15:01:30 -07001460 cmd_pkt->control_flags |=
1461 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1462 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1463 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
Quinn Tranf83adb62014-04-11 16:54:43 -04001464 tot_prot_dsds, NULL))
Arun Easibad75002010-05-04 15:01:30 -07001465 goto crc_queuing_error;
1466 }
1467 return QLA_SUCCESS;
1468
1469crc_queuing_error:
Arun Easibad75002010-05-04 15:01:30 -07001470 /* Cleanup will be performed by the caller */
1471
1472 return QLA_FUNCTION_FAILED;
1473}
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001474
1475/**
1476 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1477 * @sp: command to send to the ISP
1478 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -07001479 * Returns non-zero if a failure occurred, else zero.
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001480 */
1481int
1482qla24xx_start_scsi(srb_t *sp)
1483{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001484 int ret, nseg;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001485 unsigned long flags;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001486 uint32_t *clr_ptr;
1487 uint32_t index;
1488 uint32_t handle;
1489 struct cmd_type_7 *cmd_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001490 uint16_t cnt;
1491 uint16_t req_cnt;
1492 uint16_t tot_dsds;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001493 struct req_que *req = NULL;
1494 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001495 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Andrew Vasquez444786d2009-01-05 11:18:10 -08001496 struct scsi_qla_host *vha = sp->fcport->vha;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001497 struct qla_hw_data *ha = vha->hw;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001498 char tag[2];
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001499
1500 /* Setup device pointers. */
1501 ret = 0;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001502
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001503 qla25xx_set_que(sp, &rsp);
1504 req = vha->req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001505
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001506 /* So we know we haven't pci_map'ed anything yet */
1507 tot_dsds = 0;
1508
1509 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001510 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001511 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1512 QLA_SUCCESS)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001513 return QLA_FUNCTION_FAILED;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001514 vha->marker_needed = 0;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001515 }
1516
1517 /* Acquire ring specific lock */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001518 spin_lock_irqsave(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001519
1520 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001521 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -05001522 for (index = 1; index < req->num_outstanding_cmds; index++) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001523 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -05001524 if (handle == req->num_outstanding_cmds)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001525 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001526 if (!req->outstanding_cmds[handle])
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001527 break;
1528 }
Chad Dupuis8d93f552013-01-30 03:34:37 -05001529 if (index == req->num_outstanding_cmds)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001530 goto queuing_error;
1531
1532 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001533 if (scsi_sg_count(cmd)) {
1534 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1535 scsi_sg_count(cmd), cmd->sc_data_direction);
1536 if (unlikely(!nseg))
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001537 goto queuing_error;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001538 } else
1539 nseg = 0;
1540
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001541 tot_dsds = nseg;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001542 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001543 if (req->cnt < (req_cnt + 2)) {
Joe Carnuccio7c6300e2014-04-11 16:54:37 -04001544 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1545 RD_REG_DWORD_RELAXED(req->req_q_out);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001546 if (req->ring_index < cnt)
1547 req->cnt = cnt - req->ring_index;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001548 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001549 req->cnt = req->length -
1550 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04001551 if (req->cnt < (req_cnt + 2))
1552 goto queuing_error;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001553 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001554
1555 /* Build command packet. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001556 req->current_outstanding_cmd = handle;
1557 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -07001558 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001559 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001560 req->cnt -= req_cnt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001561
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001562 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001563 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001564
1565 /* Zero out remaining portion of packet. */
James Bottomley72df8322005-10-28 14:41:19 -05001566 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001567 clr_ptr = (uint32_t *)cmd_pkt + 2;
1568 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1569 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1570
1571 /* Set NPORT-ID and LUN number*/
1572 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1573 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1574 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1575 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04001576 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001577
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001578 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
andrew.vasquez@qlogic.com0d4be122006-02-07 08:45:35 -08001579 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001580
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001581 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1582 if (scsi_populate_tag_msg(cmd, tag)) {
1583 switch (tag[0]) {
1584 case HEAD_OF_QUEUE_TAG:
1585 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1586 break;
1587 case ORDERED_QUEUE_TAG:
1588 cmd_pkt->task = TSK_ORDERED;
1589 break;
Saurav Kashyapc3ccb1d2013-07-12 14:47:51 -04001590 default:
1591 cmd_pkt->task = TSK_SIMPLE;
1592 break;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001593 }
Saurav Kashyapc3ccb1d2013-07-12 14:47:51 -04001594 } else {
1595 cmd_pkt->task = TSK_SIMPLE;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001596 }
1597
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001598 /* Load SCSI command packet. */
1599 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1600 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1601
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001602 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001603
1604 /* Build IOCB segments */
1605 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1606
1607 /* Set total data segment count. */
1608 cmd_pkt->entry_count = (uint8_t)req_cnt;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001609 /* Specify response queue number where completion should happen */
1610 cmd_pkt->entry_status = (uint8_t) rsp->id;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001611 wmb();
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001612 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001613 req->ring_index++;
1614 if (req->ring_index == req->length) {
1615 req->ring_index = 0;
1616 req->ring_ptr = req->ring;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001617 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001618 req->ring_ptr++;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001619
1620 sp->flags |= SRB_DMA_VALID;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001621
1622 /* Set chip new ring index. */
Andrew Vasquez08029992009-03-24 09:07:55 -07001623 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1624 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001625
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001626 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001627 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001628 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001629 qla24xx_process_response_queue(vha, rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001630
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001631 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001632 return QLA_SUCCESS;
1633
1634queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001635 if (tot_dsds)
1636 scsi_dma_unmap(cmd);
1637
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001638 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001639
1640 return QLA_FUNCTION_FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641}
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001642
Arun Easibad75002010-05-04 15:01:30 -07001643/**
1644 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1645 * @sp: command to send to the ISP
1646 *
1647 * Returns non-zero if a failure occurred, else zero.
1648 */
1649int
1650qla24xx_dif_start_scsi(srb_t *sp)
1651{
1652 int nseg;
1653 unsigned long flags;
1654 uint32_t *clr_ptr;
1655 uint32_t index;
1656 uint32_t handle;
1657 uint16_t cnt;
1658 uint16_t req_cnt = 0;
1659 uint16_t tot_dsds;
1660 uint16_t tot_prot_dsds;
1661 uint16_t fw_prot_opts = 0;
1662 struct req_que *req = NULL;
1663 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001664 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -07001665 struct scsi_qla_host *vha = sp->fcport->vha;
1666 struct qla_hw_data *ha = vha->hw;
1667 struct cmd_type_crc_2 *cmd_pkt;
1668 uint32_t status = 0;
1669
1670#define QDSS_GOT_Q_SPACE BIT_0
1671
Arun Easi0c470872010-07-23 15:28:38 +05001672 /* Only process protection or >16 cdb in this routine */
1673 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1674 if (cmd->cmd_len <= 16)
1675 return qla24xx_start_scsi(sp);
1676 }
Arun Easibad75002010-05-04 15:01:30 -07001677
1678 /* Setup device pointers. */
1679
1680 qla25xx_set_que(sp, &rsp);
1681 req = vha->req;
1682
1683 /* So we know we haven't pci_map'ed anything yet */
1684 tot_dsds = 0;
1685
1686 /* Send marker if required */
1687 if (vha->marker_needed != 0) {
1688 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1689 QLA_SUCCESS)
1690 return QLA_FUNCTION_FAILED;
1691 vha->marker_needed = 0;
1692 }
1693
1694 /* Acquire ring specific lock */
1695 spin_lock_irqsave(&ha->hardware_lock, flags);
1696
1697 /* Check for room in outstanding command list. */
1698 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -05001699 for (index = 1; index < req->num_outstanding_cmds; index++) {
Arun Easibad75002010-05-04 15:01:30 -07001700 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -05001701 if (handle == req->num_outstanding_cmds)
Arun Easibad75002010-05-04 15:01:30 -07001702 handle = 1;
1703 if (!req->outstanding_cmds[handle])
1704 break;
1705 }
1706
Chad Dupuis8d93f552013-01-30 03:34:37 -05001707 if (index == req->num_outstanding_cmds)
Arun Easibad75002010-05-04 15:01:30 -07001708 goto queuing_error;
1709
1710 /* Compute number of required data segments */
1711 /* Map the sg table so we have an accurate count of sg entries needed */
1712 if (scsi_sg_count(cmd)) {
1713 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1714 scsi_sg_count(cmd), cmd->sc_data_direction);
1715 if (unlikely(!nseg))
1716 goto queuing_error;
1717 else
1718 sp->flags |= SRB_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001719
1720 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1721 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1722 struct qla2_sgx sgx;
1723 uint32_t partial;
1724
1725 memset(&sgx, 0, sizeof(struct qla2_sgx));
1726 sgx.tot_bytes = scsi_bufflen(cmd);
1727 sgx.cur_sg = scsi_sglist(cmd);
1728 sgx.sp = sp;
1729
1730 nseg = 0;
1731 while (qla24xx_get_one_block_sg(
1732 cmd->device->sector_size, &sgx, &partial))
1733 nseg++;
1734 }
Arun Easibad75002010-05-04 15:01:30 -07001735 } else
1736 nseg = 0;
1737
1738 /* number of required data segments */
1739 tot_dsds = nseg;
1740
1741 /* Compute number of required protection segments */
1742 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1743 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1744 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1745 if (unlikely(!nseg))
1746 goto queuing_error;
1747 else
1748 sp->flags |= SRB_CRC_PROT_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001749
1750 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1751 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1752 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1753 }
Arun Easibad75002010-05-04 15:01:30 -07001754 } else {
1755 nseg = 0;
1756 }
1757
1758 req_cnt = 1;
1759 /* Total Data and protection sg segment(s) */
1760 tot_prot_dsds = nseg;
1761 tot_dsds += nseg;
1762 if (req->cnt < (req_cnt + 2)) {
Joe Carnuccio7c6300e2014-04-11 16:54:37 -04001763 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1764 RD_REG_DWORD_RELAXED(req->req_q_out);
Arun Easibad75002010-05-04 15:01:30 -07001765 if (req->ring_index < cnt)
1766 req->cnt = cnt - req->ring_index;
1767 else
1768 req->cnt = req->length -
1769 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04001770 if (req->cnt < (req_cnt + 2))
1771 goto queuing_error;
Arun Easibad75002010-05-04 15:01:30 -07001772 }
1773
Arun Easibad75002010-05-04 15:01:30 -07001774 status |= QDSS_GOT_Q_SPACE;
1775
1776 /* Build header part of command packet (excluding the OPCODE). */
1777 req->current_outstanding_cmd = handle;
1778 req->outstanding_cmds[handle] = sp;
Arun Easi8cb20492011-08-16 11:29:22 -07001779 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001780 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Arun Easibad75002010-05-04 15:01:30 -07001781 req->cnt -= req_cnt;
1782
1783 /* Fill-in common area */
1784 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1785 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1786
1787 clr_ptr = (uint32_t *)cmd_pkt + 2;
1788 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1789
1790 /* Set NPORT-ID and LUN number*/
1791 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1792 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1793 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1794 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1795
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001796 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Arun Easibad75002010-05-04 15:01:30 -07001797 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1798
1799 /* Total Data and protection segment(s) */
1800 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1801
1802 /* Build IOCB segments and adjust for data protection segments */
1803 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1804 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1805 QLA_SUCCESS)
1806 goto queuing_error;
1807
1808 cmd_pkt->entry_count = (uint8_t)req_cnt;
1809 /* Specify response queue number where completion should happen */
1810 cmd_pkt->entry_status = (uint8_t) rsp->id;
1811 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1812 wmb();
1813
1814 /* Adjust ring index. */
1815 req->ring_index++;
1816 if (req->ring_index == req->length) {
1817 req->ring_index = 0;
1818 req->ring_ptr = req->ring;
1819 } else
1820 req->ring_ptr++;
1821
1822 /* Set chip new ring index. */
1823 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1824 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1825
1826 /* Manage unprocessed RIO/ZIO commands in response queue. */
1827 if (vha->flags.process_response_queue &&
1828 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1829 qla24xx_process_response_queue(vha, rsp);
1830
1831 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1832
1833 return QLA_SUCCESS;
1834
1835queuing_error:
1836 if (status & QDSS_GOT_Q_SPACE) {
1837 req->outstanding_cmds[handle] = NULL;
1838 req->cnt += req_cnt;
1839 }
1840 /* Cleanup will be performed by the caller (queuecommand) */
1841
1842 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Arun Easibad75002010-05-04 15:01:30 -07001843 return QLA_FUNCTION_FAILED;
1844}
1845
1846
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001847static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001848{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001849 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001850 struct qla_hw_data *ha = sp->fcport->vha->hw;
1851 int affinity = cmd->request->cpu;
1852
Anirban Chakraborty7163ea82009-08-05 09:18:40 -07001853 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001854 affinity < ha->max_rsp_queues - 1)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001855 *rsp = ha->rsp_q_map[affinity + 1];
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001856 else
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001857 *rsp = ha->rsp_q_map[0];
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001858}
Andrew Vasquezac280b62009-08-20 11:06:05 -07001859
1860/* Generic Control-SRB manipulation functions. */
Arun Easib6a029e2014-09-25 06:14:52 -04001861
1862/* hardware_lock assumed to be held. */
1863void *
1864qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
1865{
1866 if (qla2x00_reset_active(vha))
1867 return NULL;
1868
1869 return qla2x00_alloc_iocbs(vha, sp);
1870}
1871
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001872void *
1873qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001874{
Andrew Vasquezac280b62009-08-20 11:06:05 -07001875 struct qla_hw_data *ha = vha->hw;
1876 struct req_que *req = ha->req_q_map[0];
1877 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1878 uint32_t index, handle;
1879 request_t *pkt;
1880 uint16_t cnt, req_cnt;
1881
1882 pkt = NULL;
1883 req_cnt = 1;
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001884 handle = 0;
1885
1886 if (!sp)
1887 goto skip_cmd_array;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001888
1889 /* Check for room in outstanding command list. */
1890 handle = req->current_outstanding_cmd;
Chad Dupuis4b4f30c2014-03-07 02:43:52 -05001891 for (index = 1; index < req->num_outstanding_cmds; index++) {
Andrew Vasquezac280b62009-08-20 11:06:05 -07001892 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -05001893 if (handle == req->num_outstanding_cmds)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001894 handle = 1;
1895 if (!req->outstanding_cmds[handle])
1896 break;
1897 }
Chad Dupuis8d93f552013-01-30 03:34:37 -05001898 if (index == req->num_outstanding_cmds) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001899 ql_log(ql_log_warn, vha, 0x700b,
Masanari Iidad6a03582012-08-22 14:20:58 -04001900 "No room on outstanding cmd array.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07001901 goto queuing_error;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001902 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07001903
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001904 /* Prep command array. */
1905 req->current_outstanding_cmd = handle;
1906 req->outstanding_cmds[handle] = sp;
1907 sp->handle = handle;
1908
Andrew Vasquez57807902011-11-18 09:03:20 -08001909 /* Adjust entry-counts as needed. */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001910 if (sp->type != SRB_SCSI_CMD)
1911 req_cnt = sp->iocbs;
Andrew Vasquez57807902011-11-18 09:03:20 -08001912
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001913skip_cmd_array:
Andrew Vasquezac280b62009-08-20 11:06:05 -07001914 /* Check for room on request queue. */
Himanshu Madhani94007032014-09-25 06:14:46 -04001915 if (req->cnt < req_cnt + 2) {
Chad Dupuisf73cb692014-02-26 04:15:06 -05001916 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
Andrew Vasquezac280b62009-08-20 11:06:05 -07001917 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
Atul Deshmukh7ec0eff2013-08-27 01:37:28 -04001918 else if (IS_P3P_TYPE(ha))
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001919 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
Andrew Vasquezac280b62009-08-20 11:06:05 -07001920 else if (IS_FWI2_CAPABLE(ha))
1921 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04001922 else if (IS_QLAFX00(ha))
1923 cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
Andrew Vasquezac280b62009-08-20 11:06:05 -07001924 else
1925 cnt = qla2x00_debounce_register(
1926 ISP_REQ_Q_OUT(ha, &reg->isp));
1927
1928 if (req->ring_index < cnt)
1929 req->cnt = cnt - req->ring_index;
1930 else
1931 req->cnt = req->length -
1932 (req->ring_index - cnt);
1933 }
Himanshu Madhani94007032014-09-25 06:14:46 -04001934 if (req->cnt < req_cnt + 2)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001935 goto queuing_error;
1936
1937 /* Prep packet */
Andrew Vasquezac280b62009-08-20 11:06:05 -07001938 req->cnt -= req_cnt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001939 pkt = req->ring_ptr;
1940 memset(pkt, 0, REQUEST_ENTRY_SIZE);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04001941 if (IS_QLAFX00(ha)) {
Saurav Kashyap1f8deef2013-06-25 11:27:21 -04001942 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1943 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04001944 } else {
1945 pkt->entry_count = req_cnt;
1946 pkt->handle = handle;
1947 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07001948
1949queuing_error:
1950 return pkt;
1951}
1952
1953static void
Andrew Vasquezac280b62009-08-20 11:06:05 -07001954qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1955{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001956 struct srb_iocb *lio = &sp->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001957
1958 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1959 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001960 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001961 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001962 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001963 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1964 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1965 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1966 logio->port_id[1] = sp->fcport->d_id.b.area;
1967 logio->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04001968 logio->vp_index = sp->fcport->vha->vp_idx;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001969}
1970
1971static void
1972qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1973{
1974 struct qla_hw_data *ha = sp->fcport->vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001975 struct srb_iocb *lio = &sp->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001976 uint16_t opts;
1977
Giridhar Malavalib9637522010-05-28 15:08:15 -07001978 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001979 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1980 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001981 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1982 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001983 if (HAS_EXTENDED_IDS(ha)) {
1984 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1985 mbx->mb10 = cpu_to_le16(opts);
1986 } else {
1987 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1988 }
1989 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1990 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1991 sp->fcport->d_id.b.al_pa);
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04001992 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
Andrew Vasquezac280b62009-08-20 11:06:05 -07001993}
1994
1995static void
1996qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1997{
1998 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1999 logio->control_flags =
2000 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2001 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2002 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2003 logio->port_id[1] = sp->fcport->d_id.b.area;
2004 logio->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002005 logio->vp_index = sp->fcport->vha->vp_idx;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002006}
2007
2008static void
2009qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2010{
2011 struct qla_hw_data *ha = sp->fcport->vha->hw;
2012
Giridhar Malavalib9637522010-05-28 15:08:15 -07002013 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002014 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2015 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2016 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2017 cpu_to_le16(sp->fcport->loop_id):
2018 cpu_to_le16(sp->fcport->loop_id << 8);
2019 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2020 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2021 sp->fcport->d_id.b.al_pa);
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002022 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002023 /* Implicit: mbx->mbx10 = 0. */
2024}
2025
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002026static void
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002027qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2028{
2029 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2030 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2031 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002032 logio->vp_index = sp->fcport->vha->vp_idx;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002033}
2034
2035static void
2036qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2037{
2038 struct qla_hw_data *ha = sp->fcport->vha->hw;
2039
2040 mbx->entry_type = MBX_IOCB_TYPE;
2041 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2042 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2043 if (HAS_EXTENDED_IDS(ha)) {
2044 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2045 mbx->mb10 = cpu_to_le16(BIT_0);
2046 } else {
2047 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2048 }
2049 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2050 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2051 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2052 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002053 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002054}
2055
2056static void
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002057qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2058{
2059 uint32_t flags;
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02002060 uint64_t lun;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002061 struct fc_port *fcport = sp->fcport;
2062 scsi_qla_host_t *vha = fcport->vha;
2063 struct qla_hw_data *ha = vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002064 struct srb_iocb *iocb = &sp->u.iocb_cmd;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002065 struct req_que *req = vha->req;
2066
2067 flags = iocb->u.tmf.flags;
2068 lun = iocb->u.tmf.lun;
2069
2070 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2071 tsk->entry_count = 1;
2072 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2073 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2074 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2075 tsk->control_flags = cpu_to_le32(flags);
2076 tsk->port_id[0] = fcport->d_id.b.al_pa;
2077 tsk->port_id[1] = fcport->d_id.b.area;
2078 tsk->port_id[2] = fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002079 tsk->vp_index = fcport->vha->vp_idx;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002080
2081 if (flags == TCF_LUN_RESET) {
2082 int_to_scsilun(lun, &tsk->lun);
2083 host_to_fcp_swap((uint8_t *)&tsk->lun,
2084 sizeof(tsk->lun));
2085 }
2086}
2087
2088static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002089qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2090{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002091 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002092
2093 els_iocb->entry_type = ELS_IOCB_TYPE;
2094 els_iocb->entry_count = 1;
2095 els_iocb->sys_define = 0;
2096 els_iocb->entry_status = 0;
2097 els_iocb->handle = sp->handle;
2098 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2099 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002100 els_iocb->vp_index = sp->fcport->vha->vp_idx;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002101 els_iocb->sof_type = EST_SOFI3;
2102 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2103
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002104 els_iocb->opcode =
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002105 sp->type == SRB_ELS_CMD_RPT ?
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002106 bsg_job->request->rqst_data.r_els.els_code :
2107 bsg_job->request->rqst_data.h_els.command_code;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002108 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2109 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2110 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2111 els_iocb->control_flags = 0;
2112 els_iocb->rx_byte_count =
2113 cpu_to_le32(bsg_job->reply_payload.payload_len);
2114 els_iocb->tx_byte_count =
2115 cpu_to_le32(bsg_job->request_payload.payload_len);
2116
2117 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2118 (bsg_job->request_payload.sg_list)));
2119 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2120 (bsg_job->request_payload.sg_list)));
2121 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2122 (bsg_job->request_payload.sg_list));
2123
2124 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2125 (bsg_job->reply_payload.sg_list)));
2126 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2127 (bsg_job->reply_payload.sg_list)));
2128 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2129 (bsg_job->reply_payload.sg_list));
Joe Carnucciofabbb8d2013-08-27 01:37:40 -04002130
2131 sp->fcport->vha->qla_stats.control_requests++;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002132}
2133
2134static void
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002135qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2136{
2137 uint16_t avail_dsds;
2138 uint32_t *cur_dsd;
2139 struct scatterlist *sg;
2140 int index;
2141 uint16_t tot_dsds;
2142 scsi_qla_host_t *vha = sp->fcport->vha;
2143 struct qla_hw_data *ha = vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002144 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002145 int loop_iterartion = 0;
2146 int cont_iocb_prsnt = 0;
2147 int entry_count = 1;
2148
2149 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2150 ct_iocb->entry_type = CT_IOCB_TYPE;
2151 ct_iocb->entry_status = 0;
2152 ct_iocb->handle1 = sp->handle;
2153 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2154 ct_iocb->status = __constant_cpu_to_le16(0);
2155 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2156 ct_iocb->timeout = 0;
2157 ct_iocb->cmd_dsd_count =
2158 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2159 ct_iocb->total_dsd_count =
2160 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2161 ct_iocb->req_bytecount =
2162 cpu_to_le32(bsg_job->request_payload.payload_len);
2163 ct_iocb->rsp_bytecount =
2164 cpu_to_le32(bsg_job->reply_payload.payload_len);
2165
2166 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2167 (bsg_job->request_payload.sg_list)));
2168 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2169 (bsg_job->request_payload.sg_list)));
2170 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2171
2172 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2173 (bsg_job->reply_payload.sg_list)));
2174 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2175 (bsg_job->reply_payload.sg_list)));
2176 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2177
2178 avail_dsds = 1;
2179 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2180 index = 0;
2181 tot_dsds = bsg_job->reply_payload.sg_cnt;
2182
2183 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2184 dma_addr_t sle_dma;
2185 cont_a64_entry_t *cont_pkt;
2186
2187 /* Allocate additional continuation packets? */
2188 if (avail_dsds == 0) {
2189 /*
2190 * Five DSDs are available in the Cont.
2191 * Type 1 IOCB.
2192 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002193 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2194 vha->hw->req_q_map[0]);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002195 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2196 avail_dsds = 5;
2197 cont_iocb_prsnt = 1;
2198 entry_count++;
2199 }
2200
2201 sle_dma = sg_dma_address(sg);
2202 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2203 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2204 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2205 loop_iterartion++;
2206 avail_dsds--;
2207 }
2208 ct_iocb->entry_count = entry_count;
Joe Carnucciofabbb8d2013-08-27 01:37:40 -04002209
2210 sp->fcport->vha->qla_stats.control_requests++;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002211}
2212
2213static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002214qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2215{
2216 uint16_t avail_dsds;
2217 uint32_t *cur_dsd;
2218 struct scatterlist *sg;
2219 int index;
2220 uint16_t tot_dsds;
2221 scsi_qla_host_t *vha = sp->fcport->vha;
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002222 struct qla_hw_data *ha = vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002223 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002224 int loop_iterartion = 0;
2225 int cont_iocb_prsnt = 0;
2226 int entry_count = 1;
2227
2228 ct_iocb->entry_type = CT_IOCB_TYPE;
2229 ct_iocb->entry_status = 0;
2230 ct_iocb->sys_define = 0;
2231 ct_iocb->handle = sp->handle;
2232
2233 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002234 ct_iocb->vp_index = sp->fcport->vha->vp_idx;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002235 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2236
2237 ct_iocb->cmd_dsd_count =
2238 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2239 ct_iocb->timeout = 0;
2240 ct_iocb->rsp_dsd_count =
2241 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2242 ct_iocb->rsp_byte_count =
2243 cpu_to_le32(bsg_job->reply_payload.payload_len);
2244 ct_iocb->cmd_byte_count =
2245 cpu_to_le32(bsg_job->request_payload.payload_len);
2246 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2247 (bsg_job->request_payload.sg_list)));
2248 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2249 (bsg_job->request_payload.sg_list)));
2250 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2251 (bsg_job->request_payload.sg_list));
2252
2253 avail_dsds = 1;
2254 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2255 index = 0;
2256 tot_dsds = bsg_job->reply_payload.sg_cnt;
2257
2258 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2259 dma_addr_t sle_dma;
2260 cont_a64_entry_t *cont_pkt;
2261
2262 /* Allocate additional continuation packets? */
2263 if (avail_dsds == 0) {
2264 /*
2265 * Five DSDs are available in the Cont.
2266 * Type 1 IOCB.
2267 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002268 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2269 ha->req_q_map[0]);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002270 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2271 avail_dsds = 5;
2272 cont_iocb_prsnt = 1;
2273 entry_count++;
2274 }
2275
2276 sle_dma = sg_dma_address(sg);
2277 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2278 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2279 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2280 loop_iterartion++;
2281 avail_dsds--;
2282 }
2283 ct_iocb->entry_count = entry_count;
2284}
2285
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002286/*
2287 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2288 * @sp: command to send to the ISP
2289 *
2290 * Returns non-zero if a failure occurred, else zero.
2291 */
2292int
2293qla82xx_start_scsi(srb_t *sp)
2294{
2295 int ret, nseg;
2296 unsigned long flags;
2297 struct scsi_cmnd *cmd;
2298 uint32_t *clr_ptr;
2299 uint32_t index;
2300 uint32_t handle;
2301 uint16_t cnt;
2302 uint16_t req_cnt;
2303 uint16_t tot_dsds;
2304 struct device_reg_82xx __iomem *reg;
2305 uint32_t dbval;
2306 uint32_t *fcp_dl;
2307 uint8_t additional_cdb_len;
2308 struct ct6_dsd *ctx;
2309 struct scsi_qla_host *vha = sp->fcport->vha;
2310 struct qla_hw_data *ha = vha->hw;
2311 struct req_que *req = NULL;
2312 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002313 char tag[2];
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002314
2315 /* Setup device pointers. */
2316 ret = 0;
2317 reg = &ha->iobase->isp82;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002318 cmd = GET_CMD_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002319 req = vha->req;
2320 rsp = ha->rsp_q_map[0];
2321
2322 /* So we know we haven't pci_map'ed anything yet */
2323 tot_dsds = 0;
2324
2325 dbval = 0x04 | (ha->portnum << 5);
2326
2327 /* Send marker if required */
2328 if (vha->marker_needed != 0) {
2329 if (qla2x00_marker(vha, req,
2330 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2331 ql_log(ql_log_warn, vha, 0x300c,
2332 "qla2x00_marker failed for cmd=%p.\n", cmd);
2333 return QLA_FUNCTION_FAILED;
2334 }
2335 vha->marker_needed = 0;
2336 }
2337
2338 /* Acquire ring specific lock */
2339 spin_lock_irqsave(&ha->hardware_lock, flags);
2340
2341 /* Check for room in outstanding command list. */
2342 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -05002343 for (index = 1; index < req->num_outstanding_cmds; index++) {
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002344 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -05002345 if (handle == req->num_outstanding_cmds)
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002346 handle = 1;
2347 if (!req->outstanding_cmds[handle])
2348 break;
2349 }
Chad Dupuis8d93f552013-01-30 03:34:37 -05002350 if (index == req->num_outstanding_cmds)
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002351 goto queuing_error;
2352
2353 /* Map the sg table so we have an accurate count of sg entries needed */
2354 if (scsi_sg_count(cmd)) {
2355 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2356 scsi_sg_count(cmd), cmd->sc_data_direction);
2357 if (unlikely(!nseg))
2358 goto queuing_error;
2359 } else
2360 nseg = 0;
2361
2362 tot_dsds = nseg;
2363
2364 if (tot_dsds > ql2xshiftctondsd) {
2365 struct cmd_type_6 *cmd_pkt;
2366 uint16_t more_dsd_lists = 0;
2367 struct dsd_dma *dsd_ptr;
2368 uint16_t i;
2369
2370 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2371 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2372 ql_dbg(ql_dbg_io, vha, 0x300d,
2373 "Num of DSD list %d is than %d for cmd=%p.\n",
2374 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2375 cmd);
2376 goto queuing_error;
2377 }
2378
2379 if (more_dsd_lists <= ha->gbl_dsd_avail)
2380 goto sufficient_dsds;
2381 else
2382 more_dsd_lists -= ha->gbl_dsd_avail;
2383
2384 for (i = 0; i < more_dsd_lists; i++) {
2385 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2386 if (!dsd_ptr) {
2387 ql_log(ql_log_fatal, vha, 0x300e,
2388 "Failed to allocate memory for dsd_dma "
2389 "for cmd=%p.\n", cmd);
2390 goto queuing_error;
2391 }
2392
2393 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2394 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2395 if (!dsd_ptr->dsd_addr) {
2396 kfree(dsd_ptr);
2397 ql_log(ql_log_fatal, vha, 0x300f,
2398 "Failed to allocate memory for dsd_addr "
2399 "for cmd=%p.\n", cmd);
2400 goto queuing_error;
2401 }
2402 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2403 ha->gbl_dsd_avail++;
2404 }
2405
2406sufficient_dsds:
2407 req_cnt = 1;
2408
2409 if (req->cnt < (req_cnt + 2)) {
2410 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2411 &reg->req_q_out[0]);
2412 if (req->ring_index < cnt)
2413 req->cnt = cnt - req->ring_index;
2414 else
2415 req->cnt = req->length -
2416 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04002417 if (req->cnt < (req_cnt + 2))
2418 goto queuing_error;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002419 }
2420
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002421 ctx = sp->u.scmd.ctx =
2422 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2423 if (!ctx) {
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002424 ql_log(ql_log_fatal, vha, 0x3010,
2425 "Failed to allocate ctx for cmd=%p.\n", cmd);
2426 goto queuing_error;
2427 }
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002428
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002429 memset(ctx, 0, sizeof(struct ct6_dsd));
2430 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2431 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2432 if (!ctx->fcp_cmnd) {
2433 ql_log(ql_log_fatal, vha, 0x3011,
2434 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
Dan Carpenter841f97b2012-05-17 10:13:40 +03002435 goto queuing_error;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002436 }
2437
2438 /* Initialize the DSD list and dma handle */
2439 INIT_LIST_HEAD(&ctx->dsd_list);
2440 ctx->dsd_use_cnt = 0;
2441
2442 if (cmd->cmd_len > 16) {
2443 additional_cdb_len = cmd->cmd_len - 16;
2444 if ((cmd->cmd_len % 4) != 0) {
2445 /* SCSI command bigger than 16 bytes must be
2446 * multiple of 4
2447 */
2448 ql_log(ql_log_warn, vha, 0x3012,
2449 "scsi cmd len %d not multiple of 4 "
2450 "for cmd=%p.\n", cmd->cmd_len, cmd);
2451 goto queuing_error_fcp_cmnd;
2452 }
2453 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2454 } else {
2455 additional_cdb_len = 0;
2456 ctx->fcp_cmnd_len = 12 + 16 + 4;
2457 }
2458
2459 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2460 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2461
2462 /* Zero out remaining portion of packet. */
2463 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2464 clr_ptr = (uint32_t *)cmd_pkt + 2;
2465 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2466 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2467
2468 /* Set NPORT-ID and LUN number*/
2469 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2470 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2471 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2472 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002473 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002474
2475 /* Build IOCB segments */
2476 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2477 goto queuing_error_fcp_cmnd;
2478
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002479 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002480 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2481
2482 /* build FCP_CMND IU */
2483 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002484 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002485 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2486
2487 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2488 ctx->fcp_cmnd->additional_cdb_len |= 1;
2489 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2490 ctx->fcp_cmnd->additional_cdb_len |= 2;
2491
2492 /*
2493 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2494 */
2495 if (scsi_populate_tag_msg(cmd, tag)) {
2496 switch (tag[0]) {
2497 case HEAD_OF_QUEUE_TAG:
2498 ctx->fcp_cmnd->task_attribute =
2499 TSK_HEAD_OF_QUEUE;
2500 break;
2501 case ORDERED_QUEUE_TAG:
2502 ctx->fcp_cmnd->task_attribute =
2503 TSK_ORDERED;
2504 break;
2505 }
2506 }
2507
Saurav Kashyapa00f6292011-11-18 09:03:19 -08002508 /* Populate the FCP_PRIO. */
2509 if (ha->flags.fcp_prio_enabled)
2510 ctx->fcp_cmnd->task_attribute |=
2511 sp->fcport->fcp_prio << 3;
2512
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002513 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2514
2515 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2516 additional_cdb_len);
2517 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2518
2519 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2520 cmd_pkt->fcp_cmnd_dseg_address[0] =
2521 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2522 cmd_pkt->fcp_cmnd_dseg_address[1] =
2523 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2524
2525 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2526 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2527 /* Set total data segment count. */
2528 cmd_pkt->entry_count = (uint8_t)req_cnt;
2529 /* Specify response queue number where
2530 * completion should happen
2531 */
2532 cmd_pkt->entry_status = (uint8_t) rsp->id;
2533 } else {
2534 struct cmd_type_7 *cmd_pkt;
2535 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2536 if (req->cnt < (req_cnt + 2)) {
2537 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2538 &reg->req_q_out[0]);
2539 if (req->ring_index < cnt)
2540 req->cnt = cnt - req->ring_index;
2541 else
2542 req->cnt = req->length -
2543 (req->ring_index - cnt);
2544 }
2545 if (req->cnt < (req_cnt + 2))
2546 goto queuing_error;
2547
2548 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2549 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2550
2551 /* Zero out remaining portion of packet. */
2552 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2553 clr_ptr = (uint32_t *)cmd_pkt + 2;
2554 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2555 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2556
2557 /* Set NPORT-ID and LUN number*/
2558 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2559 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2560 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2561 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002562 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002563
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002564 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002565 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002566 sizeof(cmd_pkt->lun));
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002567
2568 /*
2569 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2570 */
2571 if (scsi_populate_tag_msg(cmd, tag)) {
2572 switch (tag[0]) {
2573 case HEAD_OF_QUEUE_TAG:
2574 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2575 break;
2576 case ORDERED_QUEUE_TAG:
2577 cmd_pkt->task = TSK_ORDERED;
2578 break;
2579 }
2580 }
2581
Saurav Kashyapa00f6292011-11-18 09:03:19 -08002582 /* Populate the FCP_PRIO. */
2583 if (ha->flags.fcp_prio_enabled)
2584 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2585
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002586 /* Load SCSI command packet. */
2587 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2588 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2589
2590 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2591
2592 /* Build IOCB segments */
2593 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2594
2595 /* Set total data segment count. */
2596 cmd_pkt->entry_count = (uint8_t)req_cnt;
2597 /* Specify response queue number where
2598 * completion should happen.
2599 */
2600 cmd_pkt->entry_status = (uint8_t) rsp->id;
2601
2602 }
2603 /* Build command packet. */
2604 req->current_outstanding_cmd = handle;
2605 req->outstanding_cmds[handle] = sp;
2606 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002607 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002608 req->cnt -= req_cnt;
2609 wmb();
2610
2611 /* Adjust ring index. */
2612 req->ring_index++;
2613 if (req->ring_index == req->length) {
2614 req->ring_index = 0;
2615 req->ring_ptr = req->ring;
2616 } else
2617 req->ring_ptr++;
2618
2619 sp->flags |= SRB_DMA_VALID;
2620
2621 /* Set chip new ring index. */
2622 /* write, read and verify logic */
2623 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2624 if (ql2xdbwr)
2625 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2626 else {
2627 WRT_REG_DWORD(
2628 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2629 dbval);
2630 wmb();
Saurav Kashyapfa492632012-11-21 02:40:29 -05002631 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002632 WRT_REG_DWORD(
2633 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2634 dbval);
2635 wmb();
2636 }
2637 }
2638
2639 /* Manage unprocessed RIO/ZIO commands in response queue. */
2640 if (vha->flags.process_response_queue &&
2641 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2642 qla24xx_process_response_queue(vha, rsp);
2643
2644 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2645 return QLA_SUCCESS;
2646
2647queuing_error_fcp_cmnd:
2648 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2649queuing_error:
2650 if (tot_dsds)
2651 scsi_dma_unmap(cmd);
2652
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002653 if (sp->u.scmd.ctx) {
2654 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2655 sp->u.scmd.ctx = NULL;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002656 }
2657 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2658
2659 return QLA_FUNCTION_FAILED;
2660}
2661
Joe Carnuccio6d78e552014-09-25 05:17:05 -04002662static void
Armen Baloyan4440e462014-02-26 04:15:18 -05002663qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
2664{
2665 struct srb_iocb *aio = &sp->u.iocb_cmd;
2666 scsi_qla_host_t *vha = sp->fcport->vha;
2667 struct req_que *req = vha->req;
2668
2669 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
2670 abt_iocb->entry_type = ABORT_IOCB_TYPE;
2671 abt_iocb->entry_count = 1;
2672 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
2673 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2674 abt_iocb->handle_to_abort =
2675 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
2676 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2677 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
2678 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2679 abt_iocb->vp_index = vha->vp_idx;
2680 abt_iocb->req_que_no = cpu_to_le16(req->id);
2681 /* Send the command to the firmware */
2682 wmb();
2683}
2684
Andrew Vasquezac280b62009-08-20 11:06:05 -07002685int
2686qla2x00_start_sp(srb_t *sp)
2687{
2688 int rval;
2689 struct qla_hw_data *ha = sp->fcport->vha->hw;
2690 void *pkt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002691 unsigned long flags;
2692
2693 rval = QLA_FUNCTION_FAILED;
2694 spin_lock_irqsave(&ha->hardware_lock, flags);
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002695 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002696 if (!pkt) {
2697 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2698 "qla2x00_alloc_iocbs failed.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07002699 goto done;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002700 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07002701
2702 rval = QLA_SUCCESS;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002703 switch (sp->type) {
Andrew Vasquezac280b62009-08-20 11:06:05 -07002704 case SRB_LOGIN_CMD:
2705 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002706 qla24xx_login_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002707 qla2x00_login_iocb(sp, pkt);
2708 break;
2709 case SRB_LOGOUT_CMD:
2710 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002711 qla24xx_logout_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002712 qla2x00_logout_iocb(sp, pkt);
2713 break;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002714 case SRB_ELS_CMD_RPT:
2715 case SRB_ELS_CMD_HST:
2716 qla24xx_els_iocb(sp, pkt);
2717 break;
2718 case SRB_CT_CMD:
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002719 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez57807902011-11-18 09:03:20 -08002720 qla24xx_ct_iocb(sp, pkt) :
2721 qla2x00_ct_iocb(sp, pkt);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002722 break;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002723 case SRB_ADISC_CMD:
2724 IS_FWI2_CAPABLE(ha) ?
2725 qla24xx_adisc_iocb(sp, pkt) :
2726 qla2x00_adisc_iocb(sp, pkt);
2727 break;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002728 case SRB_TM_CMD:
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04002729 IS_QLAFX00(ha) ?
2730 qlafx00_tm_iocb(sp, pkt) :
2731 qla24xx_tm_iocb(sp, pkt);
2732 break;
2733 case SRB_FXIOCB_DCMD:
2734 case SRB_FXIOCB_BCMD:
2735 qlafx00_fxdisc_iocb(sp, pkt);
2736 break;
2737 case SRB_ABT_CMD:
Armen Baloyan4440e462014-02-26 04:15:18 -05002738 IS_QLAFX00(ha) ?
2739 qlafx00_abort_iocb(sp, pkt) :
2740 qla24xx_abort_iocb(sp, pkt);
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002741 break;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002742 default:
2743 break;
2744 }
2745
2746 wmb();
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002747 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002748done:
2749 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2750 return rval;
2751}
Saurav Kashyapa9b6f7222012-08-22 14:21:01 -04002752
2753static void
2754qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2755 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2756{
2757 uint16_t avail_dsds;
2758 uint32_t *cur_dsd;
2759 uint32_t req_data_len = 0;
2760 uint32_t rsp_data_len = 0;
2761 struct scatterlist *sg;
2762 int index;
2763 int entry_count = 1;
2764 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2765
2766 /*Update entry type to indicate bidir command */
2767 *((uint32_t *)(&cmd_pkt->entry_type)) =
2768 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2769
2770 /* Set the transfer direction, in this set both flags
2771 * Also set the BD_WRAP_BACK flag, firmware will take care
2772 * assigning DID=SID for outgoing pkts.
2773 */
2774 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2775 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2776 cmd_pkt->control_flags =
2777 __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2778 BD_WRAP_BACK);
2779
2780 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2781 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2782 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2783 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2784
2785 vha->bidi_stats.transfer_bytes += req_data_len;
2786 vha->bidi_stats.io_count++;
2787
Joe Carnucciofabbb8d2013-08-27 01:37:40 -04002788 vha->qla_stats.output_bytes += req_data_len;
2789 vha->qla_stats.output_requests++;
2790
Saurav Kashyapa9b6f7222012-08-22 14:21:01 -04002791 /* Only one dsd is available for bidirectional IOCB, remaining dsds
2792 * are bundled in continuation iocb
2793 */
2794 avail_dsds = 1;
2795 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2796
2797 index = 0;
2798
2799 for_each_sg(bsg_job->request_payload.sg_list, sg,
2800 bsg_job->request_payload.sg_cnt, index) {
2801 dma_addr_t sle_dma;
2802 cont_a64_entry_t *cont_pkt;
2803
2804 /* Allocate additional continuation packets */
2805 if (avail_dsds == 0) {
2806 /* Continuation type 1 IOCB can accomodate
2807 * 5 DSDS
2808 */
2809 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2810 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2811 avail_dsds = 5;
2812 entry_count++;
2813 }
2814 sle_dma = sg_dma_address(sg);
2815 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2816 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2817 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2818 avail_dsds--;
2819 }
2820 /* For read request DSD will always goes to continuation IOCB
2821 * and follow the write DSD. If there is room on the current IOCB
2822 * then it is added to that IOCB else new continuation IOCB is
2823 * allocated.
2824 */
2825 for_each_sg(bsg_job->reply_payload.sg_list, sg,
2826 bsg_job->reply_payload.sg_cnt, index) {
2827 dma_addr_t sle_dma;
2828 cont_a64_entry_t *cont_pkt;
2829
2830 /* Allocate additional continuation packets */
2831 if (avail_dsds == 0) {
2832 /* Continuation type 1 IOCB can accomodate
2833 * 5 DSDS
2834 */
2835 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2836 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2837 avail_dsds = 5;
2838 entry_count++;
2839 }
2840 sle_dma = sg_dma_address(sg);
2841 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2842 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2843 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2844 avail_dsds--;
2845 }
2846 /* This value should be same as number of IOCB required for this cmd */
2847 cmd_pkt->entry_count = entry_count;
2848}
2849
2850int
2851qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2852{
2853
2854 struct qla_hw_data *ha = vha->hw;
2855 unsigned long flags;
2856 uint32_t handle;
2857 uint32_t index;
2858 uint16_t req_cnt;
2859 uint16_t cnt;
2860 uint32_t *clr_ptr;
2861 struct cmd_bidir *cmd_pkt = NULL;
2862 struct rsp_que *rsp;
2863 struct req_que *req;
2864 int rval = EXT_STATUS_OK;
Saurav Kashyapa9b6f7222012-08-22 14:21:01 -04002865
2866 rval = QLA_SUCCESS;
2867
2868 rsp = ha->rsp_q_map[0];
2869 req = vha->req;
2870
2871 /* Send marker if required */
2872 if (vha->marker_needed != 0) {
2873 if (qla2x00_marker(vha, req,
2874 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2875 return EXT_STATUS_MAILBOX;
2876 vha->marker_needed = 0;
2877 }
2878
2879 /* Acquire ring specific lock */
2880 spin_lock_irqsave(&ha->hardware_lock, flags);
2881
2882 /* Check for room in outstanding command list. */
2883 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -05002884 for (index = 1; index < req->num_outstanding_cmds; index++) {
Saurav Kashyapa9b6f7222012-08-22 14:21:01 -04002885 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -05002886 if (handle == req->num_outstanding_cmds)
Saurav Kashyapa9b6f7222012-08-22 14:21:01 -04002887 handle = 1;
2888 if (!req->outstanding_cmds[handle])
2889 break;
2890 }
2891
Chad Dupuis8d93f552013-01-30 03:34:37 -05002892 if (index == req->num_outstanding_cmds) {
Saurav Kashyapa9b6f7222012-08-22 14:21:01 -04002893 rval = EXT_STATUS_BUSY;
2894 goto queuing_error;
2895 }
2896
2897 /* Calculate number of IOCB required */
2898 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2899
2900 /* Check for room on request queue. */
2901 if (req->cnt < req_cnt + 2) {
Joe Carnuccio7c6300e2014-04-11 16:54:37 -04002902 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2903 RD_REG_DWORD_RELAXED(req->req_q_out);
Saurav Kashyapa9b6f7222012-08-22 14:21:01 -04002904 if (req->ring_index < cnt)
2905 req->cnt = cnt - req->ring_index;
2906 else
2907 req->cnt = req->length -
2908 (req->ring_index - cnt);
2909 }
2910 if (req->cnt < req_cnt + 2) {
2911 rval = EXT_STATUS_BUSY;
2912 goto queuing_error;
2913 }
2914
2915 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2916 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2917
2918 /* Zero out remaining portion of packet. */
2919 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2920 clr_ptr = (uint32_t *)cmd_pkt + 2;
2921 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2922
2923 /* Set NPORT-ID (of vha)*/
2924 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2925 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2926 cmd_pkt->port_id[1] = vha->d_id.b.area;
2927 cmd_pkt->port_id[2] = vha->d_id.b.domain;
2928
2929 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2930 cmd_pkt->entry_status = (uint8_t) rsp->id;
2931 /* Build command packet. */
2932 req->current_outstanding_cmd = handle;
2933 req->outstanding_cmds[handle] = sp;
2934 sp->handle = handle;
2935 req->cnt -= req_cnt;
2936
2937 /* Send the command to the firmware */
2938 wmb();
2939 qla2x00_start_iocbs(vha, req);
2940queuing_error:
2941 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2942 return rval;
2943}