blob: 73cc96da9421591d9a2cfb5ae7289894819d3423 [file] [log] [blame]
Andrew Vasquezfa90c542005-10-27 11:10:08 -07001/*
2 * QLogic Fibre Channel HBA Driver
Andrew Vasquez07e264b2011-03-30 11:46:23 -07003 * Copyright (c) 2003-2011 QLogic Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Andrew Vasquezfa90c542005-10-27 11:10:08 -07005 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "qla_def.h"
Nicholas Bellinger2d70c102012-05-15 14:34:28 -04008#include "qla_target.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
10#include <linux/blkdev.h>
11#include <linux/delay.h>
12
13#include <scsi/scsi_tcq.h>
14
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -070015static void qla25xx_set_que(srb_t *, struct rsp_que **);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016/**
17 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18 * @cmd: SCSI command
19 *
20 * Returns the proper CF_* direction based on CDB.
21 */
22static inline uint16_t
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070023qla2x00_get_cmd_direction(srb_t *sp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070024{
25 uint16_t cflags;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080026 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -040027 struct scsi_qla_host *vha = sp->fcport->vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29 cflags = 0;
30
31 /* Set transfer direction */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080032 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 cflags = CF_WRITE;
Saurav Kashyap2be21fa2012-05-15 14:34:16 -040034 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080035 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 cflags = CF_READ;
Saurav Kashyap2be21fa2012-05-15 14:34:16 -040037 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070038 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 return (cflags);
40}
41
42/**
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
45 *
46 * @dsds: number of data segment decriptors needed
47 *
48 * Returns the number of IOCB entries needed to store @dsds.
49 */
50uint16_t
51qla2x00_calc_iocbs_32(uint16_t dsds)
52{
53 uint16_t iocbs;
54
55 iocbs = 1;
56 if (dsds > 3) {
57 iocbs += (dsds - 3) / 7;
58 if ((dsds - 3) % 7)
59 iocbs++;
60 }
61 return (iocbs);
62}
63
64/**
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
67 *
68 * @dsds: number of data segment decriptors needed
69 *
70 * Returns the number of IOCB entries needed to store @dsds.
71 */
72uint16_t
73qla2x00_calc_iocbs_64(uint16_t dsds)
74{
75 uint16_t iocbs;
76
77 iocbs = 1;
78 if (dsds > 2) {
79 iocbs += (dsds - 2) / 5;
80 if ((dsds - 2) % 5)
81 iocbs++;
82 }
83 return (iocbs);
84}
85
86/**
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
88 * @ha: HA context
89 *
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
91 */
92static inline cont_entry_t *
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070093qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
95 cont_entry_t *cont_pkt;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070096 struct req_que *req = vha->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080098 req->ring_index++;
99 if (req->ring_index == req->length) {
100 req->ring_index = 0;
101 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800103 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 }
105
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800106 cont_pkt = (cont_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 /* Load packet defaults. */
109 *((uint32_t *)(&cont_pkt->entry_type)) =
110 __constant_cpu_to_le32(CONTINUE_TYPE);
111
112 return (cont_pkt);
113}
114
115/**
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @ha: HA context
118 *
119 * Returns a pointer to the continuation type 1 IOCB packet.
120 */
121static inline cont_a64_entry_t *
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800122qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
124 cont_a64_entry_t *cont_pkt;
125
126 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800127 req->ring_index++;
128 if (req->ring_index == req->length) {
129 req->ring_index = 0;
130 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800132 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 }
134
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt->entry_type)) =
139 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
140
141 return (cont_pkt);
142}
143
Arun Easibad75002010-05-04 15:01:30 -0700144static inline int
145qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
146{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800147 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 uint8_t guard = scsi_host_get_guard(cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -0700149
150 /* We only support T10 DIF right now */
151 if (guard != SHOST_DIX_GUARD_CRC) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700152 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800153 "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
Arun Easibad75002010-05-04 15:01:30 -0700154 return 0;
155 }
156
157 /* We always use DIFF Bundling for best performance */
158 *fw_prot_opts = 0;
159
160 /* Translate SCSI opcode to a protection opcode */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800161 switch (scsi_get_prot_op(cmd)) {
Arun Easibad75002010-05-04 15:01:30 -0700162 case SCSI_PROT_READ_STRIP:
163 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
164 break;
165 case SCSI_PROT_WRITE_INSERT:
166 *fw_prot_opts |= PO_MODE_DIF_INSERT;
167 break;
168 case SCSI_PROT_READ_INSERT:
169 *fw_prot_opts |= PO_MODE_DIF_INSERT;
170 break;
171 case SCSI_PROT_WRITE_STRIP:
172 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
173 break;
174 case SCSI_PROT_READ_PASS:
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
176 break;
177 case SCSI_PROT_WRITE_PASS:
178 *fw_prot_opts |= PO_MODE_DIF_PASS;
179 break;
180 default: /* Normal Request */
181 *fw_prot_opts |= PO_MODE_DIF_PASS;
182 break;
183 }
184
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800185 return scsi_prot_sg_count(cmd);
Arun Easibad75002010-05-04 15:01:30 -0700186}
187
188/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190 * capable IOCB types.
191 *
192 * @sp: SRB command to process
193 * @cmd_pkt: Command type 2 IOCB
194 * @tot_dsds: Total number of segments to transfer
195 */
196void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197 uint16_t tot_dsds)
198{
199 uint16_t avail_dsds;
200 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800201 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900203 struct scatterlist *sg;
204 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800206 cmd = GET_CMD_SP(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208 /* Update entry type to indicate Command Type 2 IOCB */
209 *((uint32_t *)(&cmd_pkt->entry_type)) =
210 __constant_cpu_to_le32(COMMAND_TYPE);
211
212 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900213 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215 return;
216 }
217
Andrew Vasquez444786d2009-01-05 11:18:10 -0800218 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700219 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221 /* Three DSDs are available in the Command Type 2 IOCB */
222 avail_dsds = 3;
223 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
224
225 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900226 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
227 cont_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900229 /* Allocate additional continuation packets? */
230 if (avail_dsds == 0) {
231 /*
232 * Seven DSDs are available in the Continuation
233 * Type 0 IOCB.
234 */
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700235 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900236 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237 avail_dsds = 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900239
240 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
241 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 }
244}
245
246/**
247 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248 * capable IOCB types.
249 *
250 * @sp: SRB command to process
251 * @cmd_pkt: Command type 3 IOCB
252 * @tot_dsds: Total number of segments to transfer
253 */
254void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255 uint16_t tot_dsds)
256{
257 uint16_t avail_dsds;
258 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800259 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900261 struct scatterlist *sg;
262 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800264 cmd = GET_CMD_SP(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
266 /* Update entry type to indicate Command Type 3 IOCB */
267 *((uint32_t *)(&cmd_pkt->entry_type)) =
268 __constant_cpu_to_le32(COMMAND_A64_TYPE);
269
270 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900271 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273 return;
274 }
275
Andrew Vasquez444786d2009-01-05 11:18:10 -0800276 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700277 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
279 /* Two DSDs are available in the Command Type 3 IOCB */
280 avail_dsds = 2;
281 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
282
283 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900284 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
285 dma_addr_t sle_dma;
286 cont_a64_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900288 /* Allocate additional continuation packets? */
289 if (avail_dsds == 0) {
290 /*
291 * Five DSDs are available in the Continuation
292 * Type 1 IOCB.
293 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800294 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900295 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296 avail_dsds = 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900298
299 sle_dma = sg_dma_address(sg);
300 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
301 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
302 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 }
305}
306
307/**
308 * qla2x00_start_scsi() - Send a SCSI command to the ISP
309 * @sp: command to send to the ISP
310 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700311 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 */
313int
314qla2x00_start_scsi(srb_t *sp)
315{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900316 int ret, nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 unsigned long flags;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800318 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 struct scsi_cmnd *cmd;
320 uint32_t *clr_ptr;
321 uint32_t index;
322 uint32_t handle;
323 cmd_entry_t *cmd_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 uint16_t cnt;
325 uint16_t req_cnt;
326 uint16_t tot_dsds;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700327 struct device_reg_2xxx __iomem *reg;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800328 struct qla_hw_data *ha;
329 struct req_que *req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800330 struct rsp_que *rsp;
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800331 char tag[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
333 /* Setup device pointers. */
334 ret = 0;
Andrew Vasquez444786d2009-01-05 11:18:10 -0800335 vha = sp->fcport->vha;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800336 ha = vha->hw;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700337 reg = &ha->iobase->isp;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800338 cmd = GET_CMD_SP(sp);
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800339 req = ha->req_q_map[0];
340 rsp = ha->rsp_q_map[0];
83021922005-04-17 15:10:41 -0500341 /* So we know we haven't pci_map'ed anything yet */
342 tot_dsds = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800345 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700346 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
347 QLA_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 return (QLA_FUNCTION_FAILED);
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700349 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800350 vha->marker_needed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 }
352
353 /* Acquire ring specific lock */
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700354 spin_lock_irqsave(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
356 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800357 handle = req->current_outstanding_cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
359 handle++;
360 if (handle == MAX_OUTSTANDING_COMMANDS)
361 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800362 if (!req->outstanding_cmds[handle])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 break;
364 }
365 if (index == MAX_OUTSTANDING_COMMANDS)
366 goto queuing_error;
367
83021922005-04-17 15:10:41 -0500368 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -0700369 if (scsi_sg_count(cmd)) {
370 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
371 scsi_sg_count(cmd), cmd->sc_data_direction);
372 if (unlikely(!nseg))
373 goto queuing_error;
374 } else
375 nseg = 0;
376
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900377 tot_dsds = nseg;
83021922005-04-17 15:10:41 -0500378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 /* Calculate the number of request entries needed. */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700380 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800381 if (req->cnt < (req_cnt + 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800383 if (req->ring_index < cnt)
384 req->cnt = cnt - req->ring_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800386 req->cnt = req->length -
387 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -0400388 /* If still no head room then bail out */
389 if (req->cnt < (req_cnt + 2))
390 goto queuing_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 /* Build command packet */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800394 req->current_outstanding_cmd = handle;
395 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -0700396 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800397 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800398 req->cnt -= req_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800400 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 cmd_pkt->handle = handle;
402 /* Zero out remaining portion of packet. */
403 clr_ptr = (uint32_t *)cmd_pkt + 2;
404 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
405 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
406
bdf79622005-04-17 15:06:53 -0500407 /* Set target ID and LUN number*/
408 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800409 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
411 /* Update tagged queuing modifier */
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800412 if (scsi_populate_tag_msg(cmd, tag)) {
413 switch (tag[0]) {
414 case HEAD_OF_QUEUE_TAG:
415 cmd_pkt->control_flags =
416 __constant_cpu_to_le16(CF_HEAD_TAG);
417 break;
418 case ORDERED_QUEUE_TAG:
419 cmd_pkt->control_flags =
420 __constant_cpu_to_le16(CF_ORDERED_TAG);
421 break;
422 default:
423 cmd_pkt->control_flags =
424 __constant_cpu_to_le16(CF_SIMPLE_TAG);
425 break;
426 }
427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 /* Load SCSI command packet. */
430 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900431 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
433 /* Build IOCB segments */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700434 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
436 /* Set total data segment count. */
437 cmd_pkt->entry_count = (uint8_t)req_cnt;
438 wmb();
439
440 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800441 req->ring_index++;
442 if (req->ring_index == req->length) {
443 req->ring_index = 0;
444 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800446 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 sp->flags |= SRB_DMA_VALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
450 /* Set chip new ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800451 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
453
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700454 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800455 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800456 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
457 qla2x00_process_response_queue(rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700458
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700459 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 return (QLA_SUCCESS);
461
462queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900463 if (tot_dsds)
464 scsi_dma_unmap(cmd);
465
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700466 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
468 return (QLA_FUNCTION_FAILED);
469}
470
471/**
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800472 * qla2x00_start_iocbs() - Execute the IOCB command
473 */
Nicholas Bellinger2d70c102012-05-15 14:34:28 -0400474void
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800475qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
476{
477 struct qla_hw_data *ha = vha->hw;
478 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800479
480 if (IS_QLA82XX(ha)) {
481 qla82xx_start_iocbs(vha);
482 } else {
483 /* Adjust ring index. */
484 req->ring_index++;
485 if (req->ring_index == req->length) {
486 req->ring_index = 0;
487 req->ring_ptr = req->ring;
488 } else
489 req->ring_ptr++;
490
491 /* Set chip new ring index. */
Giridhar Malavali6246b8a2012-02-09 11:15:34 -0800492 if (ha->mqenable || IS_QLA83XX(ha)) {
493 WRT_REG_DWORD(req->req_q_in, req->ring_index);
Arun Easi98878a12012-02-09 11:15:59 -0800494 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800495 } else if (IS_FWI2_CAPABLE(ha)) {
496 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
497 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
498 } else {
499 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
500 req->ring_index);
501 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
502 }
503 }
504}
505
506/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 * qla2x00_marker() - Send a marker IOCB to the firmware.
508 * @ha: HA context
509 * @loop_id: loop ID
510 * @lun: LUN
511 * @type: marker modifier
512 *
513 * Can be called from both normal and interrupt context.
514 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700515 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 */
Andrew Vasquez3dbe7562010-07-23 15:28:37 +0500517static int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800518__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
519 struct rsp_que *rsp, uint16_t loop_id,
520 uint16_t lun, uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521{
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700522 mrk_entry_t *mrk;
523 struct mrk_entry_24xx *mrk24;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800524 struct qla_hw_data *ha = vha->hw;
525 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700527 mrk24 = NULL;
Giridhar Malavali99b82122011-11-18 09:03:17 -0800528 req = ha->req_q_map[0];
Giridhar Malavalid94d10e2010-07-23 15:28:23 +0500529 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700530 if (mrk == NULL) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700531 ql_log(ql_log_warn, base_vha, 0x3026,
532 "Failed to allocate Marker IOCB.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
534 return (QLA_FUNCTION_FAILED);
535 }
536
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700537 mrk->entry_type = MARKER_TYPE;
538 mrk->modifier = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 if (type != MK_SYNC_ALL) {
Andrew Vasqueze4289242007-07-19 15:05:56 -0700540 if (IS_FWI2_CAPABLE(ha)) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700541 mrk24 = (struct mrk_entry_24xx *) mrk;
542 mrk24->nport_handle = cpu_to_le16(loop_id);
543 mrk24->lun[1] = LSB(lun);
544 mrk24->lun[2] = MSB(lun);
Shyam Sundarb797b6d2006-08-01 13:48:13 -0700545 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800546 mrk24->vp_index = vha->vp_idx;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -0700547 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700548 } else {
549 SET_TARGET_ID(ha, mrk->target, loop_id);
550 mrk->lun = cpu_to_le16(lun);
551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 }
553 wmb();
554
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800555 qla2x00_start_iocbs(vha, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
557 return (QLA_SUCCESS);
558}
559
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -0700560int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800561qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
562 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
563 uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564{
565 int ret;
566 unsigned long flags = 0;
567
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800568 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
569 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
570 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
572 return (ret);
573}
574
Nicholas Bellinger2d70c102012-05-15 14:34:28 -0400575/*
576 * qla2x00_issue_marker
577 *
578 * Issue marker
579 * Caller CAN have hardware lock held as specified by ha_locked parameter.
580 * Might release it, then reaquire.
581 */
582int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
583{
584 if (ha_locked) {
585 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
586 MK_SYNC_ALL) != QLA_SUCCESS)
587 return QLA_FUNCTION_FAILED;
588 } else {
589 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
590 MK_SYNC_ALL) != QLA_SUCCESS)
591 return QLA_FUNCTION_FAILED;
592 }
593 vha->marker_needed = 0;
594
595 return QLA_SUCCESS;
596}
597
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598/**
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700599 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
600 * Continuation Type 1 IOCBs to allocate.
601 *
602 * @dsds: number of data segment decriptors needed
603 *
604 * Returns the number of IOCB entries needed to store @dsds.
605 */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700606inline uint16_t
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700607qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700608{
609 uint16_t iocbs;
610
611 iocbs = 1;
612 if (dsds > 1) {
613 iocbs += (dsds - 1) / 5;
614 if ((dsds - 1) % 5)
615 iocbs++;
616 }
617 return iocbs;
618}
619
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800620static inline int
621qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
622 uint16_t tot_dsds)
623{
624 uint32_t *cur_dsd = NULL;
625 scsi_qla_host_t *vha;
626 struct qla_hw_data *ha;
627 struct scsi_cmnd *cmd;
628 struct scatterlist *cur_seg;
629 uint32_t *dsd_seg;
630 void *next_dsd;
631 uint8_t avail_dsds;
632 uint8_t first_iocb = 1;
633 uint32_t dsd_list_len;
634 struct dsd_dma *dsd_ptr;
635 struct ct6_dsd *ctx;
636
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800637 cmd = GET_CMD_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800638
639 /* Update entry type to indicate Command Type 3 IOCB */
640 *((uint32_t *)(&cmd_pkt->entry_type)) =
641 __constant_cpu_to_le32(COMMAND_TYPE_6);
642
643 /* No data transfer */
644 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
645 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
646 return 0;
647 }
648
649 vha = sp->fcport->vha;
650 ha = vha->hw;
651
652 /* Set transfer direction */
653 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
654 cmd_pkt->control_flags =
655 __constant_cpu_to_le16(CF_WRITE_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400656 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800657 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
658 cmd_pkt->control_flags =
659 __constant_cpu_to_le16(CF_READ_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400660 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800661 }
662
663 cur_seg = scsi_sglist(cmd);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800664 ctx = GET_CMD_CTX_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800665
666 while (tot_dsds) {
667 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
668 QLA_DSDS_PER_IOCB : tot_dsds;
669 tot_dsds -= avail_dsds;
670 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
671
672 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
673 struct dsd_dma, list);
674 next_dsd = dsd_ptr->dsd_addr;
675 list_del(&dsd_ptr->list);
676 ha->gbl_dsd_avail--;
677 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
678 ctx->dsd_use_cnt++;
679 ha->gbl_dsd_inuse++;
680
681 if (first_iocb) {
682 first_iocb = 0;
683 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
684 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
685 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
686 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
687 } else {
688 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
689 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
690 *cur_dsd++ = cpu_to_le32(dsd_list_len);
691 }
692 cur_dsd = (uint32_t *)next_dsd;
693 while (avail_dsds) {
694 dma_addr_t sle_dma;
695
696 sle_dma = sg_dma_address(cur_seg);
697 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
698 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
699 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
700 cur_seg = sg_next(cur_seg);
701 avail_dsds--;
702 }
703 }
704
705 /* Null termination */
706 *cur_dsd++ = 0;
707 *cur_dsd++ = 0;
708 *cur_dsd++ = 0;
709 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
710 return 0;
711}
712
713/*
714 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
715 * for Command Type 6.
716 *
717 * @dsds: number of data segment decriptors needed
718 *
719 * Returns the number of dsd list needed to store @dsds.
720 */
721inline uint16_t
722qla24xx_calc_dsd_lists(uint16_t dsds)
723{
724 uint16_t dsd_lists = 0;
725
726 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
727 if (dsds % QLA_DSDS_PER_IOCB)
728 dsd_lists++;
729 return dsd_lists;
730}
731
732
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700733/**
734 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
735 * IOCB types.
736 *
737 * @sp: SRB command to process
738 * @cmd_pkt: Command type 3 IOCB
739 * @tot_dsds: Total number of segments to transfer
740 */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700741inline void
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700742qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
743 uint16_t tot_dsds)
744{
745 uint16_t avail_dsds;
746 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800747 scsi_qla_host_t *vha;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700748 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900749 struct scatterlist *sg;
750 int i;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800751 struct req_que *req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700752
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800753 cmd = GET_CMD_SP(sp);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700754
755 /* Update entry type to indicate Command Type 3 IOCB */
756 *((uint32_t *)(&cmd_pkt->entry_type)) =
757 __constant_cpu_to_le32(COMMAND_TYPE_7);
758
759 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900760 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700761 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
762 return;
763 }
764
Andrew Vasquez444786d2009-01-05 11:18:10 -0800765 vha = sp->fcport->vha;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700766 req = vha->req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700767
768 /* Set transfer direction */
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700769 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700770 cmd_pkt->task_mgmt_flags =
771 __constant_cpu_to_le16(TMF_WRITE_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400772 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700773 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700774 cmd_pkt->task_mgmt_flags =
775 __constant_cpu_to_le16(TMF_READ_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400776 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700777 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700778
779 /* One DSD is available in the Command Type 3 IOCB */
780 avail_dsds = 1;
781 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
782
783 /* Load data segments */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700784
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900785 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
786 dma_addr_t sle_dma;
787 cont_a64_entry_t *cont_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700788
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900789 /* Allocate additional continuation packets? */
790 if (avail_dsds == 0) {
791 /*
792 * Five DSDs are available in the Continuation
793 * Type 1 IOCB.
794 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800795 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900796 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
797 avail_dsds = 5;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700798 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900799
800 sle_dma = sg_dma_address(sg);
801 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
802 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
803 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
804 avail_dsds--;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700805 }
806}
807
Arun Easibad75002010-05-04 15:01:30 -0700808struct fw_dif_context {
809 uint32_t ref_tag;
810 uint16_t app_tag;
811 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
812 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
813};
814
815/*
816 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
817 *
818 */
819static inline void
Arun Easie02587d2011-08-16 11:29:23 -0700820qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
Arun Easibad75002010-05-04 15:01:30 -0700821 unsigned int protcnt)
822{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800823 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700824 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -0700825
826 switch (scsi_get_prot_type(cmd)) {
Arun Easibad75002010-05-04 15:01:30 -0700827 case SCSI_PROT_DIF_TYPE0:
Arun Easi8cb20492011-08-16 11:29:22 -0700828 /*
829 * No check for ql2xenablehba_err_chk, as it would be an
830 * I/O error if hba tag generation is not done.
831 */
832 pkt->ref_tag = cpu_to_le32((uint32_t)
833 (0xffffffff & scsi_get_lba(cmd)));
Arun Easie02587d2011-08-16 11:29:23 -0700834
835 if (!qla2x00_hba_err_chk_enabled(sp))
836 break;
837
Arun Easi8cb20492011-08-16 11:29:22 -0700838 pkt->ref_tag_mask[0] = 0xff;
839 pkt->ref_tag_mask[1] = 0xff;
840 pkt->ref_tag_mask[2] = 0xff;
841 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700842 break;
843
844 /*
845 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
846 * match LBA in CDB + N
847 */
848 case SCSI_PROT_DIF_TYPE2:
Arun Easie02587d2011-08-16 11:29:23 -0700849 pkt->app_tag = __constant_cpu_to_le16(0);
850 pkt->app_tag_mask[0] = 0x0;
851 pkt->app_tag_mask[1] = 0x0;
Arun Easi0c470872010-07-23 15:28:38 +0500852
853 pkt->ref_tag = cpu_to_le32((uint32_t)
854 (0xffffffff & scsi_get_lba(cmd)));
855
Arun Easie02587d2011-08-16 11:29:23 -0700856 if (!qla2x00_hba_err_chk_enabled(sp))
857 break;
858
Arun Easi0c470872010-07-23 15:28:38 +0500859 /* enable ALL bytes of the ref tag */
860 pkt->ref_tag_mask[0] = 0xff;
861 pkt->ref_tag_mask[1] = 0xff;
862 pkt->ref_tag_mask[2] = 0xff;
863 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700864 break;
865
866 /* For Type 3 protection: 16 bit GUARD only */
867 case SCSI_PROT_DIF_TYPE3:
868 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
869 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
870 0x00;
871 break;
872
873 /*
874 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
875 * 16 bit app tag.
876 */
877 case SCSI_PROT_DIF_TYPE1:
Arun Easie02587d2011-08-16 11:29:23 -0700878 pkt->ref_tag = cpu_to_le32((uint32_t)
879 (0xffffffff & scsi_get_lba(cmd)));
880 pkt->app_tag = __constant_cpu_to_le16(0);
881 pkt->app_tag_mask[0] = 0x0;
882 pkt->app_tag_mask[1] = 0x0;
883
884 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -0700885 break;
886
Arun Easibad75002010-05-04 15:01:30 -0700887 /* enable ALL bytes of the ref tag */
888 pkt->ref_tag_mask[0] = 0xff;
889 pkt->ref_tag_mask[1] = 0xff;
890 pkt->ref_tag_mask[2] = 0xff;
891 pkt->ref_tag_mask[3] = 0xff;
892 break;
893 }
894
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700895 ql_dbg(ql_dbg_io, vha, 0x3009,
896 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
897 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
898 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
899 scsi_get_prot_type(cmd), cmd);
Arun Easibad75002010-05-04 15:01:30 -0700900}
901
Arun Easi8cb20492011-08-16 11:29:22 -0700902struct qla2_sgx {
903 dma_addr_t dma_addr; /* OUT */
904 uint32_t dma_len; /* OUT */
Arun Easibad75002010-05-04 15:01:30 -0700905
Arun Easi8cb20492011-08-16 11:29:22 -0700906 uint32_t tot_bytes; /* IN */
907 struct scatterlist *cur_sg; /* IN */
908
909 /* for book keeping, bzero on initial invocation */
910 uint32_t bytes_consumed;
911 uint32_t num_bytes;
912 uint32_t tot_partial;
913
914 /* for debugging */
915 uint32_t num_sg;
916 srb_t *sp;
917};
918
919static int
920qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
921 uint32_t *partial)
922{
923 struct scatterlist *sg;
924 uint32_t cumulative_partial, sg_len;
925 dma_addr_t sg_dma_addr;
926
927 if (sgx->num_bytes == sgx->tot_bytes)
928 return 0;
929
930 sg = sgx->cur_sg;
931 cumulative_partial = sgx->tot_partial;
932
933 sg_dma_addr = sg_dma_address(sg);
934 sg_len = sg_dma_len(sg);
935
936 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
937
938 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
939 sgx->dma_len = (blk_sz - cumulative_partial);
940 sgx->tot_partial = 0;
941 sgx->num_bytes += blk_sz;
942 *partial = 0;
943 } else {
944 sgx->dma_len = sg_len - sgx->bytes_consumed;
945 sgx->tot_partial += sgx->dma_len;
946 *partial = 1;
947 }
948
949 sgx->bytes_consumed += sgx->dma_len;
950
951 if (sg_len == sgx->bytes_consumed) {
952 sg = sg_next(sg);
953 sgx->num_sg++;
954 sgx->cur_sg = sg;
955 sgx->bytes_consumed = 0;
956 }
957
958 return 1;
959}
960
961static int
962qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
963 uint32_t *dsd, uint16_t tot_dsds)
964{
965 void *next_dsd;
966 uint8_t avail_dsds = 0;
967 uint32_t dsd_list_len;
968 struct dsd_dma *dsd_ptr;
969 struct scatterlist *sg_prot;
970 uint32_t *cur_dsd = dsd;
971 uint16_t used_dsds = tot_dsds;
972
973 uint32_t prot_int;
974 uint32_t partial;
975 struct qla2_sgx sgx;
976 dma_addr_t sle_dma;
977 uint32_t sle_dma_len, tot_prot_dma_len = 0;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800978 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Arun Easi8cb20492011-08-16 11:29:22 -0700979
980 prot_int = cmd->device->sector_size;
981
982 memset(&sgx, 0, sizeof(struct qla2_sgx));
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800983 sgx.tot_bytes = scsi_bufflen(cmd);
984 sgx.cur_sg = scsi_sglist(cmd);
Arun Easi8cb20492011-08-16 11:29:22 -0700985 sgx.sp = sp;
986
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800987 sg_prot = scsi_prot_sglist(cmd);
Arun Easi8cb20492011-08-16 11:29:22 -0700988
989 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
990
991 sle_dma = sgx.dma_addr;
992 sle_dma_len = sgx.dma_len;
993alloc_and_fill:
994 /* Allocate additional continuation packets? */
995 if (avail_dsds == 0) {
996 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
997 QLA_DSDS_PER_IOCB : used_dsds;
998 dsd_list_len = (avail_dsds + 1) * 12;
999 used_dsds -= avail_dsds;
1000
1001 /* allocate tracking DS */
1002 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1003 if (!dsd_ptr)
1004 return 1;
1005
1006 /* allocate new list */
1007 dsd_ptr->dsd_addr = next_dsd =
1008 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1009 &dsd_ptr->dsd_list_dma);
1010
1011 if (!next_dsd) {
1012 /*
1013 * Need to cleanup only this dsd_ptr, rest
1014 * will be done by sp_free_dma()
1015 */
1016 kfree(dsd_ptr);
1017 return 1;
1018 }
1019
1020 list_add_tail(&dsd_ptr->list,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001021 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
Arun Easi8cb20492011-08-16 11:29:22 -07001022
1023 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1024
1025 /* add new list to cmd iocb or last list */
1026 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1027 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1028 *cur_dsd++ = dsd_list_len;
1029 cur_dsd = (uint32_t *)next_dsd;
1030 }
1031 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1032 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1033 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1034 avail_dsds--;
1035
1036 if (partial == 0) {
1037 /* Got a full protection interval */
1038 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1039 sle_dma_len = 8;
1040
1041 tot_prot_dma_len += sle_dma_len;
1042 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1043 tot_prot_dma_len = 0;
1044 sg_prot = sg_next(sg_prot);
1045 }
1046
1047 partial = 1; /* So as to not re-enter this block */
1048 goto alloc_and_fill;
1049 }
1050 }
1051 /* Null termination */
1052 *cur_dsd++ = 0;
1053 *cur_dsd++ = 0;
1054 *cur_dsd++ = 0;
1055 return 0;
1056}
Giridhar Malavali5162cf02011-11-18 09:03:18 -08001057
Arun Easibad75002010-05-04 15:01:30 -07001058static int
1059qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1060 uint16_t tot_dsds)
1061{
1062 void *next_dsd;
1063 uint8_t avail_dsds = 0;
1064 uint32_t dsd_list_len;
1065 struct dsd_dma *dsd_ptr;
1066 struct scatterlist *sg;
1067 uint32_t *cur_dsd = dsd;
1068 int i;
1069 uint16_t used_dsds = tot_dsds;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001070 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1071 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -07001072
1073 uint8_t *cp;
1074
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001075 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
Arun Easibad75002010-05-04 15:01:30 -07001076 dma_addr_t sle_dma;
1077
1078 /* Allocate additional continuation packets? */
1079 if (avail_dsds == 0) {
1080 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1081 QLA_DSDS_PER_IOCB : used_dsds;
1082 dsd_list_len = (avail_dsds + 1) * 12;
1083 used_dsds -= avail_dsds;
1084
1085 /* allocate tracking DS */
1086 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1087 if (!dsd_ptr)
1088 return 1;
1089
1090 /* allocate new list */
1091 dsd_ptr->dsd_addr = next_dsd =
1092 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1093 &dsd_ptr->dsd_list_dma);
1094
1095 if (!next_dsd) {
1096 /*
1097 * Need to cleanup only this dsd_ptr, rest
1098 * will be done by sp_free_dma()
1099 */
1100 kfree(dsd_ptr);
1101 return 1;
1102 }
1103
1104 list_add_tail(&dsd_ptr->list,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001105 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
Arun Easibad75002010-05-04 15:01:30 -07001106
1107 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1108
1109 /* add new list to cmd iocb or last list */
1110 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1111 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1112 *cur_dsd++ = dsd_list_len;
1113 cur_dsd = (uint32_t *)next_dsd;
1114 }
1115 sle_dma = sg_dma_address(sg);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001116 ql_dbg(ql_dbg_io, vha, 0x300a,
1117 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001118 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
Arun Easibad75002010-05-04 15:01:30 -07001119 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1120 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1121 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1122 avail_dsds--;
1123
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001124 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
Arun Easibad75002010-05-04 15:01:30 -07001125 cp = page_address(sg_page(sg)) + sg->offset;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001126 ql_dbg(ql_dbg_io, vha, 0x300b,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001127 "User data buffer=%p for cmd=%p.\n", cp, cmd);
Arun Easibad75002010-05-04 15:01:30 -07001128 }
1129 }
1130 /* Null termination */
1131 *cur_dsd++ = 0;
1132 *cur_dsd++ = 0;
1133 *cur_dsd++ = 0;
1134 return 0;
1135}
1136
1137static int
1138qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1139 uint32_t *dsd,
1140 uint16_t tot_dsds)
1141{
1142 void *next_dsd;
1143 uint8_t avail_dsds = 0;
1144 uint32_t dsd_list_len;
1145 struct dsd_dma *dsd_ptr;
1146 struct scatterlist *sg;
1147 int i;
1148 struct scsi_cmnd *cmd;
1149 uint32_t *cur_dsd = dsd;
1150 uint16_t used_dsds = tot_dsds;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001151 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
Arun Easibad75002010-05-04 15:01:30 -07001152 uint8_t *cp;
1153
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001154 cmd = GET_CMD_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -07001155 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1156 dma_addr_t sle_dma;
1157
1158 /* Allocate additional continuation packets? */
1159 if (avail_dsds == 0) {
1160 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1161 QLA_DSDS_PER_IOCB : used_dsds;
1162 dsd_list_len = (avail_dsds + 1) * 12;
1163 used_dsds -= avail_dsds;
1164
1165 /* allocate tracking DS */
1166 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1167 if (!dsd_ptr)
1168 return 1;
1169
1170 /* allocate new list */
1171 dsd_ptr->dsd_addr = next_dsd =
1172 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1173 &dsd_ptr->dsd_list_dma);
1174
1175 if (!next_dsd) {
1176 /*
1177 * Need to cleanup only this dsd_ptr, rest
1178 * will be done by sp_free_dma()
1179 */
1180 kfree(dsd_ptr);
1181 return 1;
1182 }
1183
1184 list_add_tail(&dsd_ptr->list,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001185 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
Arun Easibad75002010-05-04 15:01:30 -07001186
1187 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1188
1189 /* add new list to cmd iocb or last list */
1190 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1191 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1192 *cur_dsd++ = dsd_list_len;
1193 cur_dsd = (uint32_t *)next_dsd;
1194 }
1195 sle_dma = sg_dma_address(sg);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001196 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001197 ql_dbg(ql_dbg_io, vha, 0x3027,
1198 "%s(): %p, sg_entry %d - "
1199 "addr=0x%x0x%x, len=%d.\n",
1200 __func__, cur_dsd, i,
1201 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
Arun Easibad75002010-05-04 15:01:30 -07001202 }
1203 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1204 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1205 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1206
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001207 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
Arun Easibad75002010-05-04 15:01:30 -07001208 cp = page_address(sg_page(sg)) + sg->offset;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001209 ql_dbg(ql_dbg_io, vha, 0x3028,
1210 "%s(): Protection Data buffer = %p.\n", __func__,
1211 cp);
Arun Easibad75002010-05-04 15:01:30 -07001212 }
1213 avail_dsds--;
1214 }
1215 /* Null termination */
1216 *cur_dsd++ = 0;
1217 *cur_dsd++ = 0;
1218 *cur_dsd++ = 0;
1219 return 0;
1220}
1221
1222/**
1223 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1224 * Type 6 IOCB types.
1225 *
1226 * @sp: SRB command to process
1227 * @cmd_pkt: Command type 3 IOCB
1228 * @tot_dsds: Total number of segments to transfer
1229 */
1230static inline int
1231qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1232 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1233{
1234 uint32_t *cur_dsd, *fcp_dl;
1235 scsi_qla_host_t *vha;
1236 struct scsi_cmnd *cmd;
1237 struct scatterlist *cur_seg;
1238 int sgc;
Arun Easi8cb20492011-08-16 11:29:22 -07001239 uint32_t total_bytes = 0;
Arun Easibad75002010-05-04 15:01:30 -07001240 uint32_t data_bytes;
1241 uint32_t dif_bytes;
1242 uint8_t bundling = 1;
1243 uint16_t blk_size;
1244 uint8_t *clr_ptr;
1245 struct crc_context *crc_ctx_pkt = NULL;
1246 struct qla_hw_data *ha;
1247 uint8_t additional_fcpcdb_len;
1248 uint16_t fcp_cmnd_len;
1249 struct fcp_cmnd *fcp_cmnd;
1250 dma_addr_t crc_ctx_dma;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001251 char tag[2];
Arun Easibad75002010-05-04 15:01:30 -07001252
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001253 cmd = GET_CMD_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -07001254
1255 sgc = 0;
1256 /* Update entry type to indicate Command Type CRC_2 IOCB */
1257 *((uint32_t *)(&cmd_pkt->entry_type)) =
1258 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1259
Arun Easibad75002010-05-04 15:01:30 -07001260 vha = sp->fcport->vha;
1261 ha = vha->hw;
1262
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001263 /* No data transfer */
1264 data_bytes = scsi_bufflen(cmd);
1265 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1266 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1267 return QLA_SUCCESS;
1268 }
Arun Easibad75002010-05-04 15:01:30 -07001269
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04001270 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
Arun Easibad75002010-05-04 15:01:30 -07001271
1272 /* Set transfer direction */
1273 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1274 cmd_pkt->control_flags =
1275 __constant_cpu_to_le16(CF_WRITE_DATA);
1276 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1277 cmd_pkt->control_flags =
1278 __constant_cpu_to_le16(CF_READ_DATA);
1279 }
1280
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001281 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1282 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1283 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1284 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
Arun Easibad75002010-05-04 15:01:30 -07001285 bundling = 0;
1286
1287 /* Allocate CRC context from global pool */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001288 crc_ctx_pkt = sp->u.scmd.ctx =
1289 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
Arun Easibad75002010-05-04 15:01:30 -07001290
1291 if (!crc_ctx_pkt)
1292 goto crc_queuing_error;
1293
1294 /* Zero out CTX area. */
1295 clr_ptr = (uint8_t *)crc_ctx_pkt;
1296 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1297
1298 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1299
1300 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1301
1302 /* Set handle */
1303 crc_ctx_pkt->handle = cmd_pkt->handle;
1304
1305 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1306
Arun Easie02587d2011-08-16 11:29:23 -07001307 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
Arun Easibad75002010-05-04 15:01:30 -07001308 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1309
1310 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1311 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1312 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1313
1314 /* Determine SCSI command length -- align to 4 byte boundary */
1315 if (cmd->cmd_len > 16) {
Arun Easibad75002010-05-04 15:01:30 -07001316 additional_fcpcdb_len = cmd->cmd_len - 16;
1317 if ((cmd->cmd_len % 4) != 0) {
1318 /* SCSI cmd > 16 bytes must be multiple of 4 */
1319 goto crc_queuing_error;
1320 }
1321 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1322 } else {
1323 additional_fcpcdb_len = 0;
1324 fcp_cmnd_len = 12 + 16 + 4;
1325 }
1326
1327 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1328
1329 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1330 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1331 fcp_cmnd->additional_cdb_len |= 1;
1332 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1333 fcp_cmnd->additional_cdb_len |= 2;
1334
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001335 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
Arun Easibad75002010-05-04 15:01:30 -07001336 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1337 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1338 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1339 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1340 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1341 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
Uwe Kleine-König65155b32010-06-11 12:17:01 +02001342 fcp_cmnd->task_management = 0;
Arun Easibad75002010-05-04 15:01:30 -07001343
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001344 /*
1345 * Update tagged queuing modifier if using command tag queuing
1346 */
1347 if (scsi_populate_tag_msg(cmd, tag)) {
1348 switch (tag[0]) {
1349 case HEAD_OF_QUEUE_TAG:
1350 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1351 break;
1352 case ORDERED_QUEUE_TAG:
1353 fcp_cmnd->task_attribute = TSK_ORDERED;
1354 break;
1355 default:
1356 fcp_cmnd->task_attribute = 0;
1357 break;
1358 }
1359 } else {
1360 fcp_cmnd->task_attribute = 0;
1361 }
1362
Arun Easibad75002010-05-04 15:01:30 -07001363 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1364
Arun Easibad75002010-05-04 15:01:30 -07001365 /* Compute dif len and adjust data len to incude protection */
Arun Easibad75002010-05-04 15:01:30 -07001366 dif_bytes = 0;
1367 blk_size = cmd->device->sector_size;
Arun Easi8cb20492011-08-16 11:29:22 -07001368 dif_bytes = (data_bytes / blk_size) * 8;
1369
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001370 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
Arun Easi8cb20492011-08-16 11:29:22 -07001371 case SCSI_PROT_READ_INSERT:
1372 case SCSI_PROT_WRITE_STRIP:
1373 total_bytes = data_bytes;
1374 data_bytes += dif_bytes;
1375 break;
1376
1377 case SCSI_PROT_READ_STRIP:
1378 case SCSI_PROT_WRITE_INSERT:
1379 case SCSI_PROT_READ_PASS:
1380 case SCSI_PROT_WRITE_PASS:
1381 total_bytes = data_bytes + dif_bytes;
1382 break;
1383 default:
1384 BUG();
Arun Easibad75002010-05-04 15:01:30 -07001385 }
1386
Arun Easie02587d2011-08-16 11:29:23 -07001387 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -07001388 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1389
1390 if (!bundling) {
1391 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1392 } else {
1393 /*
1394 * Configure Bundling if we need to fetch interlaving
1395 * protection PCI accesses
1396 */
1397 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1398 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1399 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1400 tot_prot_dsds);
1401 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1402 }
1403
1404 /* Finish the common fields of CRC pkt */
1405 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1406 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1407 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1408 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1409 /* Fibre channel byte count */
1410 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1411 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1412 additional_fcpcdb_len);
1413 *fcp_dl = htonl(total_bytes);
1414
Arun Easi0c470872010-07-23 15:28:38 +05001415 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
Arun Easi0c470872010-07-23 15:28:38 +05001416 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1417 return QLA_SUCCESS;
1418 }
Arun Easibad75002010-05-04 15:01:30 -07001419 /* Walks data segments */
1420
1421 cmd_pkt->control_flags |=
1422 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
Arun Easi8cb20492011-08-16 11:29:22 -07001423
1424 if (!bundling && tot_prot_dsds) {
1425 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1426 cur_dsd, tot_dsds))
1427 goto crc_queuing_error;
1428 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
Arun Easibad75002010-05-04 15:01:30 -07001429 (tot_dsds - tot_prot_dsds)))
1430 goto crc_queuing_error;
1431
1432 if (bundling && tot_prot_dsds) {
1433 /* Walks dif segments */
1434 cur_seg = scsi_prot_sglist(cmd);
1435 cmd_pkt->control_flags |=
1436 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1437 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1438 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1439 tot_prot_dsds))
1440 goto crc_queuing_error;
1441 }
1442 return QLA_SUCCESS;
1443
1444crc_queuing_error:
Arun Easibad75002010-05-04 15:01:30 -07001445 /* Cleanup will be performed by the caller */
1446
1447 return QLA_FUNCTION_FAILED;
1448}
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001449
1450/**
1451 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1452 * @sp: command to send to the ISP
1453 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -07001454 * Returns non-zero if a failure occurred, else zero.
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001455 */
1456int
1457qla24xx_start_scsi(srb_t *sp)
1458{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001459 int ret, nseg;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001460 unsigned long flags;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001461 uint32_t *clr_ptr;
1462 uint32_t index;
1463 uint32_t handle;
1464 struct cmd_type_7 *cmd_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001465 uint16_t cnt;
1466 uint16_t req_cnt;
1467 uint16_t tot_dsds;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001468 struct req_que *req = NULL;
1469 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001470 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Andrew Vasquez444786d2009-01-05 11:18:10 -08001471 struct scsi_qla_host *vha = sp->fcport->vha;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001472 struct qla_hw_data *ha = vha->hw;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001473 char tag[2];
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001474
1475 /* Setup device pointers. */
1476 ret = 0;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001477
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001478 qla25xx_set_que(sp, &rsp);
1479 req = vha->req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001480
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001481 /* So we know we haven't pci_map'ed anything yet */
1482 tot_dsds = 0;
1483
1484 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001485 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001486 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1487 QLA_SUCCESS)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001488 return QLA_FUNCTION_FAILED;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001489 vha->marker_needed = 0;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001490 }
1491
1492 /* Acquire ring specific lock */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001493 spin_lock_irqsave(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001494
1495 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001496 handle = req->current_outstanding_cmd;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001497 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1498 handle++;
1499 if (handle == MAX_OUTSTANDING_COMMANDS)
1500 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001501 if (!req->outstanding_cmds[handle])
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001502 break;
1503 }
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001504 if (index == MAX_OUTSTANDING_COMMANDS) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001505 goto queuing_error;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001506 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001507
1508 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001509 if (scsi_sg_count(cmd)) {
1510 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1511 scsi_sg_count(cmd), cmd->sc_data_direction);
1512 if (unlikely(!nseg))
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001513 goto queuing_error;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001514 } else
1515 nseg = 0;
1516
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001517 tot_dsds = nseg;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001518 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001519 if (req->cnt < (req_cnt + 2)) {
Andrew Vasquez08029992009-03-24 09:07:55 -07001520 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001521
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001522 if (req->ring_index < cnt)
1523 req->cnt = cnt - req->ring_index;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001524 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001525 req->cnt = req->length -
1526 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04001527 if (req->cnt < (req_cnt + 2))
1528 goto queuing_error;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001529 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001530
1531 /* Build command packet. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001532 req->current_outstanding_cmd = handle;
1533 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -07001534 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001535 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001536 req->cnt -= req_cnt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001537
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001538 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001539 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001540
1541 /* Zero out remaining portion of packet. */
James Bottomley72df8322005-10-28 14:41:19 -05001542 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001543 clr_ptr = (uint32_t *)cmd_pkt + 2;
1544 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1545 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1546
1547 /* Set NPORT-ID and LUN number*/
1548 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1549 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1550 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1551 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04001552 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001553
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001554 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
andrew.vasquez@qlogic.com0d4be122006-02-07 08:45:35 -08001555 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001556
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001557 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1558 if (scsi_populate_tag_msg(cmd, tag)) {
1559 switch (tag[0]) {
1560 case HEAD_OF_QUEUE_TAG:
1561 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1562 break;
1563 case ORDERED_QUEUE_TAG:
1564 cmd_pkt->task = TSK_ORDERED;
1565 break;
1566 }
1567 }
1568
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001569 /* Load SCSI command packet. */
1570 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1571 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1572
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001573 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001574
1575 /* Build IOCB segments */
1576 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1577
1578 /* Set total data segment count. */
1579 cmd_pkt->entry_count = (uint8_t)req_cnt;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001580 /* Specify response queue number where completion should happen */
1581 cmd_pkt->entry_status = (uint8_t) rsp->id;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001582 wmb();
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001583 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001584 req->ring_index++;
1585 if (req->ring_index == req->length) {
1586 req->ring_index = 0;
1587 req->ring_ptr = req->ring;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001588 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001589 req->ring_ptr++;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001590
1591 sp->flags |= SRB_DMA_VALID;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001592
1593 /* Set chip new ring index. */
Andrew Vasquez08029992009-03-24 09:07:55 -07001594 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1595 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001596
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001597 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001598 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001599 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001600 qla24xx_process_response_queue(vha, rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001601
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001602 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001603 return QLA_SUCCESS;
1604
1605queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001606 if (tot_dsds)
1607 scsi_dma_unmap(cmd);
1608
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001609 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001610
1611 return QLA_FUNCTION_FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612}
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001613
Arun Easibad75002010-05-04 15:01:30 -07001614
1615/**
1616 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1617 * @sp: command to send to the ISP
1618 *
1619 * Returns non-zero if a failure occurred, else zero.
1620 */
1621int
1622qla24xx_dif_start_scsi(srb_t *sp)
1623{
1624 int nseg;
1625 unsigned long flags;
1626 uint32_t *clr_ptr;
1627 uint32_t index;
1628 uint32_t handle;
1629 uint16_t cnt;
1630 uint16_t req_cnt = 0;
1631 uint16_t tot_dsds;
1632 uint16_t tot_prot_dsds;
1633 uint16_t fw_prot_opts = 0;
1634 struct req_que *req = NULL;
1635 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001636 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -07001637 struct scsi_qla_host *vha = sp->fcport->vha;
1638 struct qla_hw_data *ha = vha->hw;
1639 struct cmd_type_crc_2 *cmd_pkt;
1640 uint32_t status = 0;
1641
1642#define QDSS_GOT_Q_SPACE BIT_0
1643
Arun Easi0c470872010-07-23 15:28:38 +05001644 /* Only process protection or >16 cdb in this routine */
1645 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1646 if (cmd->cmd_len <= 16)
1647 return qla24xx_start_scsi(sp);
1648 }
Arun Easibad75002010-05-04 15:01:30 -07001649
1650 /* Setup device pointers. */
1651
1652 qla25xx_set_que(sp, &rsp);
1653 req = vha->req;
1654
1655 /* So we know we haven't pci_map'ed anything yet */
1656 tot_dsds = 0;
1657
1658 /* Send marker if required */
1659 if (vha->marker_needed != 0) {
1660 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1661 QLA_SUCCESS)
1662 return QLA_FUNCTION_FAILED;
1663 vha->marker_needed = 0;
1664 }
1665
1666 /* Acquire ring specific lock */
1667 spin_lock_irqsave(&ha->hardware_lock, flags);
1668
1669 /* Check for room in outstanding command list. */
1670 handle = req->current_outstanding_cmd;
1671 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1672 handle++;
1673 if (handle == MAX_OUTSTANDING_COMMANDS)
1674 handle = 1;
1675 if (!req->outstanding_cmds[handle])
1676 break;
1677 }
1678
1679 if (index == MAX_OUTSTANDING_COMMANDS)
1680 goto queuing_error;
1681
1682 /* Compute number of required data segments */
1683 /* Map the sg table so we have an accurate count of sg entries needed */
1684 if (scsi_sg_count(cmd)) {
1685 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1686 scsi_sg_count(cmd), cmd->sc_data_direction);
1687 if (unlikely(!nseg))
1688 goto queuing_error;
1689 else
1690 sp->flags |= SRB_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001691
1692 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1693 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1694 struct qla2_sgx sgx;
1695 uint32_t partial;
1696
1697 memset(&sgx, 0, sizeof(struct qla2_sgx));
1698 sgx.tot_bytes = scsi_bufflen(cmd);
1699 sgx.cur_sg = scsi_sglist(cmd);
1700 sgx.sp = sp;
1701
1702 nseg = 0;
1703 while (qla24xx_get_one_block_sg(
1704 cmd->device->sector_size, &sgx, &partial))
1705 nseg++;
1706 }
Arun Easibad75002010-05-04 15:01:30 -07001707 } else
1708 nseg = 0;
1709
1710 /* number of required data segments */
1711 tot_dsds = nseg;
1712
1713 /* Compute number of required protection segments */
1714 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1715 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1716 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1717 if (unlikely(!nseg))
1718 goto queuing_error;
1719 else
1720 sp->flags |= SRB_CRC_PROT_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001721
1722 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1723 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1724 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1725 }
Arun Easibad75002010-05-04 15:01:30 -07001726 } else {
1727 nseg = 0;
1728 }
1729
1730 req_cnt = 1;
1731 /* Total Data and protection sg segment(s) */
1732 tot_prot_dsds = nseg;
1733 tot_dsds += nseg;
1734 if (req->cnt < (req_cnt + 2)) {
1735 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1736
1737 if (req->ring_index < cnt)
1738 req->cnt = cnt - req->ring_index;
1739 else
1740 req->cnt = req->length -
1741 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04001742 if (req->cnt < (req_cnt + 2))
1743 goto queuing_error;
Arun Easibad75002010-05-04 15:01:30 -07001744 }
1745
Arun Easibad75002010-05-04 15:01:30 -07001746 status |= QDSS_GOT_Q_SPACE;
1747
1748 /* Build header part of command packet (excluding the OPCODE). */
1749 req->current_outstanding_cmd = handle;
1750 req->outstanding_cmds[handle] = sp;
Arun Easi8cb20492011-08-16 11:29:22 -07001751 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001752 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Arun Easibad75002010-05-04 15:01:30 -07001753 req->cnt -= req_cnt;
1754
1755 /* Fill-in common area */
1756 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1757 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1758
1759 clr_ptr = (uint32_t *)cmd_pkt + 2;
1760 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1761
1762 /* Set NPORT-ID and LUN number*/
1763 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1764 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1765 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1766 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1767
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001768 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Arun Easibad75002010-05-04 15:01:30 -07001769 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1770
1771 /* Total Data and protection segment(s) */
1772 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1773
1774 /* Build IOCB segments and adjust for data protection segments */
1775 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1776 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1777 QLA_SUCCESS)
1778 goto queuing_error;
1779
1780 cmd_pkt->entry_count = (uint8_t)req_cnt;
1781 /* Specify response queue number where completion should happen */
1782 cmd_pkt->entry_status = (uint8_t) rsp->id;
1783 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1784 wmb();
1785
1786 /* Adjust ring index. */
1787 req->ring_index++;
1788 if (req->ring_index == req->length) {
1789 req->ring_index = 0;
1790 req->ring_ptr = req->ring;
1791 } else
1792 req->ring_ptr++;
1793
1794 /* Set chip new ring index. */
1795 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1796 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1797
1798 /* Manage unprocessed RIO/ZIO commands in response queue. */
1799 if (vha->flags.process_response_queue &&
1800 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1801 qla24xx_process_response_queue(vha, rsp);
1802
1803 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1804
1805 return QLA_SUCCESS;
1806
1807queuing_error:
1808 if (status & QDSS_GOT_Q_SPACE) {
1809 req->outstanding_cmds[handle] = NULL;
1810 req->cnt += req_cnt;
1811 }
1812 /* Cleanup will be performed by the caller (queuecommand) */
1813
1814 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Arun Easibad75002010-05-04 15:01:30 -07001815 return QLA_FUNCTION_FAILED;
1816}
1817
1818
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001819static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001820{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001821 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001822 struct qla_hw_data *ha = sp->fcport->vha->hw;
1823 int affinity = cmd->request->cpu;
1824
Anirban Chakraborty7163ea82009-08-05 09:18:40 -07001825 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001826 affinity < ha->max_rsp_queues - 1)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001827 *rsp = ha->rsp_q_map[affinity + 1];
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001828 else
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001829 *rsp = ha->rsp_q_map[0];
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001830}
Andrew Vasquezac280b62009-08-20 11:06:05 -07001831
1832/* Generic Control-SRB manipulation functions. */
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001833void *
1834qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001835{
Andrew Vasquezac280b62009-08-20 11:06:05 -07001836 struct qla_hw_data *ha = vha->hw;
1837 struct req_que *req = ha->req_q_map[0];
1838 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1839 uint32_t index, handle;
1840 request_t *pkt;
1841 uint16_t cnt, req_cnt;
1842
1843 pkt = NULL;
1844 req_cnt = 1;
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001845 handle = 0;
1846
1847 if (!sp)
1848 goto skip_cmd_array;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001849
1850 /* Check for room in outstanding command list. */
1851 handle = req->current_outstanding_cmd;
1852 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1853 handle++;
1854 if (handle == MAX_OUTSTANDING_COMMANDS)
1855 handle = 1;
1856 if (!req->outstanding_cmds[handle])
1857 break;
1858 }
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001859 if (index == MAX_OUTSTANDING_COMMANDS) {
1860 ql_log(ql_log_warn, vha, 0x700b,
1861 "No room on oustanding cmd array.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07001862 goto queuing_error;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001863 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07001864
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001865 /* Prep command array. */
1866 req->current_outstanding_cmd = handle;
1867 req->outstanding_cmds[handle] = sp;
1868 sp->handle = handle;
1869
Andrew Vasquez57807902011-11-18 09:03:20 -08001870 /* Adjust entry-counts as needed. */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001871 if (sp->type != SRB_SCSI_CMD)
1872 req_cnt = sp->iocbs;
Andrew Vasquez57807902011-11-18 09:03:20 -08001873
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001874skip_cmd_array:
Andrew Vasquezac280b62009-08-20 11:06:05 -07001875 /* Check for room on request queue. */
1876 if (req->cnt < req_cnt) {
Giridhar Malavali6246b8a2012-02-09 11:15:34 -08001877 if (ha->mqenable || IS_QLA83XX(ha))
Andrew Vasquezac280b62009-08-20 11:06:05 -07001878 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001879 else if (IS_QLA82XX(ha))
1880 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
Andrew Vasquezac280b62009-08-20 11:06:05 -07001881 else if (IS_FWI2_CAPABLE(ha))
1882 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1883 else
1884 cnt = qla2x00_debounce_register(
1885 ISP_REQ_Q_OUT(ha, &reg->isp));
1886
1887 if (req->ring_index < cnt)
1888 req->cnt = cnt - req->ring_index;
1889 else
1890 req->cnt = req->length -
1891 (req->ring_index - cnt);
1892 }
1893 if (req->cnt < req_cnt)
1894 goto queuing_error;
1895
1896 /* Prep packet */
Andrew Vasquezac280b62009-08-20 11:06:05 -07001897 req->cnt -= req_cnt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001898 pkt = req->ring_ptr;
1899 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1900 pkt->entry_count = req_cnt;
1901 pkt->handle = handle;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001902
1903queuing_error:
1904 return pkt;
1905}
1906
1907static void
Andrew Vasquezac280b62009-08-20 11:06:05 -07001908qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1909{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001910 struct srb_iocb *lio = &sp->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001911
1912 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1913 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001914 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001915 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001916 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001917 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1918 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1919 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1920 logio->port_id[1] = sp->fcport->d_id.b.area;
1921 logio->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04001922 logio->vp_index = sp->fcport->vha->vp_idx;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001923}
1924
1925static void
1926qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1927{
1928 struct qla_hw_data *ha = sp->fcport->vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001929 struct srb_iocb *lio = &sp->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001930 uint16_t opts;
1931
Giridhar Malavalib9637522010-05-28 15:08:15 -07001932 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001933 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1934 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001935 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1936 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001937 if (HAS_EXTENDED_IDS(ha)) {
1938 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1939 mbx->mb10 = cpu_to_le16(opts);
1940 } else {
1941 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1942 }
1943 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1944 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1945 sp->fcport->d_id.b.al_pa);
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04001946 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
Andrew Vasquezac280b62009-08-20 11:06:05 -07001947}
1948
1949static void
1950qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1951{
1952 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1953 logio->control_flags =
1954 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1955 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1956 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1957 logio->port_id[1] = sp->fcport->d_id.b.area;
1958 logio->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04001959 logio->vp_index = sp->fcport->vha->vp_idx;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001960}
1961
1962static void
1963qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1964{
1965 struct qla_hw_data *ha = sp->fcport->vha->hw;
1966
Giridhar Malavalib9637522010-05-28 15:08:15 -07001967 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001968 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1969 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1970 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1971 cpu_to_le16(sp->fcport->loop_id):
1972 cpu_to_le16(sp->fcport->loop_id << 8);
1973 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1974 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1975 sp->fcport->d_id.b.al_pa);
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04001976 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
Andrew Vasquezac280b62009-08-20 11:06:05 -07001977 /* Implicit: mbx->mbx10 = 0. */
1978}
1979
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001980static void
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07001981qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1982{
1983 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1984 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1985 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04001986 logio->vp_index = sp->fcport->vha->vp_idx;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07001987}
1988
1989static void
1990qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1991{
1992 struct qla_hw_data *ha = sp->fcport->vha->hw;
1993
1994 mbx->entry_type = MBX_IOCB_TYPE;
1995 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1996 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1997 if (HAS_EXTENDED_IDS(ha)) {
1998 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1999 mbx->mb10 = cpu_to_le16(BIT_0);
2000 } else {
2001 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2002 }
2003 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2004 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2005 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2006 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002007 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002008}
2009
2010static void
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002011qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2012{
2013 uint32_t flags;
2014 unsigned int lun;
2015 struct fc_port *fcport = sp->fcport;
2016 scsi_qla_host_t *vha = fcport->vha;
2017 struct qla_hw_data *ha = vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002018 struct srb_iocb *iocb = &sp->u.iocb_cmd;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002019 struct req_que *req = vha->req;
2020
2021 flags = iocb->u.tmf.flags;
2022 lun = iocb->u.tmf.lun;
2023
2024 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2025 tsk->entry_count = 1;
2026 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2027 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2028 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2029 tsk->control_flags = cpu_to_le32(flags);
2030 tsk->port_id[0] = fcport->d_id.b.al_pa;
2031 tsk->port_id[1] = fcport->d_id.b.area;
2032 tsk->port_id[2] = fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002033 tsk->vp_index = fcport->vha->vp_idx;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002034
2035 if (flags == TCF_LUN_RESET) {
2036 int_to_scsilun(lun, &tsk->lun);
2037 host_to_fcp_swap((uint8_t *)&tsk->lun,
2038 sizeof(tsk->lun));
2039 }
2040}
2041
2042static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002043qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2044{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002045 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002046
2047 els_iocb->entry_type = ELS_IOCB_TYPE;
2048 els_iocb->entry_count = 1;
2049 els_iocb->sys_define = 0;
2050 els_iocb->entry_status = 0;
2051 els_iocb->handle = sp->handle;
2052 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2053 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002054 els_iocb->vp_index = sp->fcport->vha->vp_idx;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002055 els_iocb->sof_type = EST_SOFI3;
2056 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2057
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002058 els_iocb->opcode =
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002059 sp->type == SRB_ELS_CMD_RPT ?
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002060 bsg_job->request->rqst_data.r_els.els_code :
2061 bsg_job->request->rqst_data.h_els.command_code;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002062 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2063 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2064 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2065 els_iocb->control_flags = 0;
2066 els_iocb->rx_byte_count =
2067 cpu_to_le32(bsg_job->reply_payload.payload_len);
2068 els_iocb->tx_byte_count =
2069 cpu_to_le32(bsg_job->request_payload.payload_len);
2070
2071 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2072 (bsg_job->request_payload.sg_list)));
2073 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2074 (bsg_job->request_payload.sg_list)));
2075 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2076 (bsg_job->request_payload.sg_list));
2077
2078 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2079 (bsg_job->reply_payload.sg_list)));
2080 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2081 (bsg_job->reply_payload.sg_list)));
2082 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2083 (bsg_job->reply_payload.sg_list));
2084}
2085
2086static void
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002087qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2088{
2089 uint16_t avail_dsds;
2090 uint32_t *cur_dsd;
2091 struct scatterlist *sg;
2092 int index;
2093 uint16_t tot_dsds;
2094 scsi_qla_host_t *vha = sp->fcport->vha;
2095 struct qla_hw_data *ha = vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002096 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002097 int loop_iterartion = 0;
2098 int cont_iocb_prsnt = 0;
2099 int entry_count = 1;
2100
2101 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2102 ct_iocb->entry_type = CT_IOCB_TYPE;
2103 ct_iocb->entry_status = 0;
2104 ct_iocb->handle1 = sp->handle;
2105 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2106 ct_iocb->status = __constant_cpu_to_le16(0);
2107 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2108 ct_iocb->timeout = 0;
2109 ct_iocb->cmd_dsd_count =
2110 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2111 ct_iocb->total_dsd_count =
2112 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2113 ct_iocb->req_bytecount =
2114 cpu_to_le32(bsg_job->request_payload.payload_len);
2115 ct_iocb->rsp_bytecount =
2116 cpu_to_le32(bsg_job->reply_payload.payload_len);
2117
2118 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2119 (bsg_job->request_payload.sg_list)));
2120 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2121 (bsg_job->request_payload.sg_list)));
2122 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2123
2124 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2125 (bsg_job->reply_payload.sg_list)));
2126 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2127 (bsg_job->reply_payload.sg_list)));
2128 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2129
2130 avail_dsds = 1;
2131 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2132 index = 0;
2133 tot_dsds = bsg_job->reply_payload.sg_cnt;
2134
2135 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2136 dma_addr_t sle_dma;
2137 cont_a64_entry_t *cont_pkt;
2138
2139 /* Allocate additional continuation packets? */
2140 if (avail_dsds == 0) {
2141 /*
2142 * Five DSDs are available in the Cont.
2143 * Type 1 IOCB.
2144 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002145 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2146 vha->hw->req_q_map[0]);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002147 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2148 avail_dsds = 5;
2149 cont_iocb_prsnt = 1;
2150 entry_count++;
2151 }
2152
2153 sle_dma = sg_dma_address(sg);
2154 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2155 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2156 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2157 loop_iterartion++;
2158 avail_dsds--;
2159 }
2160 ct_iocb->entry_count = entry_count;
2161}
2162
2163static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002164qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2165{
2166 uint16_t avail_dsds;
2167 uint32_t *cur_dsd;
2168 struct scatterlist *sg;
2169 int index;
2170 uint16_t tot_dsds;
2171 scsi_qla_host_t *vha = sp->fcport->vha;
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002172 struct qla_hw_data *ha = vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002173 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002174 int loop_iterartion = 0;
2175 int cont_iocb_prsnt = 0;
2176 int entry_count = 1;
2177
2178 ct_iocb->entry_type = CT_IOCB_TYPE;
2179 ct_iocb->entry_status = 0;
2180 ct_iocb->sys_define = 0;
2181 ct_iocb->handle = sp->handle;
2182
2183 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002184 ct_iocb->vp_index = sp->fcport->vha->vp_idx;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002185 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2186
2187 ct_iocb->cmd_dsd_count =
2188 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2189 ct_iocb->timeout = 0;
2190 ct_iocb->rsp_dsd_count =
2191 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2192 ct_iocb->rsp_byte_count =
2193 cpu_to_le32(bsg_job->reply_payload.payload_len);
2194 ct_iocb->cmd_byte_count =
2195 cpu_to_le32(bsg_job->request_payload.payload_len);
2196 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2197 (bsg_job->request_payload.sg_list)));
2198 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2199 (bsg_job->request_payload.sg_list)));
2200 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2201 (bsg_job->request_payload.sg_list));
2202
2203 avail_dsds = 1;
2204 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2205 index = 0;
2206 tot_dsds = bsg_job->reply_payload.sg_cnt;
2207
2208 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2209 dma_addr_t sle_dma;
2210 cont_a64_entry_t *cont_pkt;
2211
2212 /* Allocate additional continuation packets? */
2213 if (avail_dsds == 0) {
2214 /*
2215 * Five DSDs are available in the Cont.
2216 * Type 1 IOCB.
2217 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002218 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2219 ha->req_q_map[0]);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002220 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2221 avail_dsds = 5;
2222 cont_iocb_prsnt = 1;
2223 entry_count++;
2224 }
2225
2226 sle_dma = sg_dma_address(sg);
2227 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2228 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2229 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2230 loop_iterartion++;
2231 avail_dsds--;
2232 }
2233 ct_iocb->entry_count = entry_count;
2234}
2235
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002236/*
2237 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2238 * @sp: command to send to the ISP
2239 *
2240 * Returns non-zero if a failure occurred, else zero.
2241 */
2242int
2243qla82xx_start_scsi(srb_t *sp)
2244{
2245 int ret, nseg;
2246 unsigned long flags;
2247 struct scsi_cmnd *cmd;
2248 uint32_t *clr_ptr;
2249 uint32_t index;
2250 uint32_t handle;
2251 uint16_t cnt;
2252 uint16_t req_cnt;
2253 uint16_t tot_dsds;
2254 struct device_reg_82xx __iomem *reg;
2255 uint32_t dbval;
2256 uint32_t *fcp_dl;
2257 uint8_t additional_cdb_len;
2258 struct ct6_dsd *ctx;
2259 struct scsi_qla_host *vha = sp->fcport->vha;
2260 struct qla_hw_data *ha = vha->hw;
2261 struct req_que *req = NULL;
2262 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002263 char tag[2];
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002264
2265 /* Setup device pointers. */
2266 ret = 0;
2267 reg = &ha->iobase->isp82;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002268 cmd = GET_CMD_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002269 req = vha->req;
2270 rsp = ha->rsp_q_map[0];
2271
2272 /* So we know we haven't pci_map'ed anything yet */
2273 tot_dsds = 0;
2274
2275 dbval = 0x04 | (ha->portnum << 5);
2276
2277 /* Send marker if required */
2278 if (vha->marker_needed != 0) {
2279 if (qla2x00_marker(vha, req,
2280 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2281 ql_log(ql_log_warn, vha, 0x300c,
2282 "qla2x00_marker failed for cmd=%p.\n", cmd);
2283 return QLA_FUNCTION_FAILED;
2284 }
2285 vha->marker_needed = 0;
2286 }
2287
2288 /* Acquire ring specific lock */
2289 spin_lock_irqsave(&ha->hardware_lock, flags);
2290
2291 /* Check for room in outstanding command list. */
2292 handle = req->current_outstanding_cmd;
2293 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2294 handle++;
2295 if (handle == MAX_OUTSTANDING_COMMANDS)
2296 handle = 1;
2297 if (!req->outstanding_cmds[handle])
2298 break;
2299 }
2300 if (index == MAX_OUTSTANDING_COMMANDS)
2301 goto queuing_error;
2302
2303 /* Map the sg table so we have an accurate count of sg entries needed */
2304 if (scsi_sg_count(cmd)) {
2305 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2306 scsi_sg_count(cmd), cmd->sc_data_direction);
2307 if (unlikely(!nseg))
2308 goto queuing_error;
2309 } else
2310 nseg = 0;
2311
2312 tot_dsds = nseg;
2313
2314 if (tot_dsds > ql2xshiftctondsd) {
2315 struct cmd_type_6 *cmd_pkt;
2316 uint16_t more_dsd_lists = 0;
2317 struct dsd_dma *dsd_ptr;
2318 uint16_t i;
2319
2320 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2321 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2322 ql_dbg(ql_dbg_io, vha, 0x300d,
2323 "Num of DSD list %d is than %d for cmd=%p.\n",
2324 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2325 cmd);
2326 goto queuing_error;
2327 }
2328
2329 if (more_dsd_lists <= ha->gbl_dsd_avail)
2330 goto sufficient_dsds;
2331 else
2332 more_dsd_lists -= ha->gbl_dsd_avail;
2333
2334 for (i = 0; i < more_dsd_lists; i++) {
2335 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2336 if (!dsd_ptr) {
2337 ql_log(ql_log_fatal, vha, 0x300e,
2338 "Failed to allocate memory for dsd_dma "
2339 "for cmd=%p.\n", cmd);
2340 goto queuing_error;
2341 }
2342
2343 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2344 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2345 if (!dsd_ptr->dsd_addr) {
2346 kfree(dsd_ptr);
2347 ql_log(ql_log_fatal, vha, 0x300f,
2348 "Failed to allocate memory for dsd_addr "
2349 "for cmd=%p.\n", cmd);
2350 goto queuing_error;
2351 }
2352 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2353 ha->gbl_dsd_avail++;
2354 }
2355
2356sufficient_dsds:
2357 req_cnt = 1;
2358
2359 if (req->cnt < (req_cnt + 2)) {
2360 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2361 &reg->req_q_out[0]);
2362 if (req->ring_index < cnt)
2363 req->cnt = cnt - req->ring_index;
2364 else
2365 req->cnt = req->length -
2366 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04002367 if (req->cnt < (req_cnt + 2))
2368 goto queuing_error;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002369 }
2370
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002371 ctx = sp->u.scmd.ctx =
2372 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2373 if (!ctx) {
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002374 ql_log(ql_log_fatal, vha, 0x3010,
2375 "Failed to allocate ctx for cmd=%p.\n", cmd);
2376 goto queuing_error;
2377 }
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002378
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002379 memset(ctx, 0, sizeof(struct ct6_dsd));
2380 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2381 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2382 if (!ctx->fcp_cmnd) {
2383 ql_log(ql_log_fatal, vha, 0x3011,
2384 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2385 goto queuing_error_fcp_cmnd;
2386 }
2387
2388 /* Initialize the DSD list and dma handle */
2389 INIT_LIST_HEAD(&ctx->dsd_list);
2390 ctx->dsd_use_cnt = 0;
2391
2392 if (cmd->cmd_len > 16) {
2393 additional_cdb_len = cmd->cmd_len - 16;
2394 if ((cmd->cmd_len % 4) != 0) {
2395 /* SCSI command bigger than 16 bytes must be
2396 * multiple of 4
2397 */
2398 ql_log(ql_log_warn, vha, 0x3012,
2399 "scsi cmd len %d not multiple of 4 "
2400 "for cmd=%p.\n", cmd->cmd_len, cmd);
2401 goto queuing_error_fcp_cmnd;
2402 }
2403 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2404 } else {
2405 additional_cdb_len = 0;
2406 ctx->fcp_cmnd_len = 12 + 16 + 4;
2407 }
2408
2409 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2410 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2411
2412 /* Zero out remaining portion of packet. */
2413 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2414 clr_ptr = (uint32_t *)cmd_pkt + 2;
2415 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2416 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2417
2418 /* Set NPORT-ID and LUN number*/
2419 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2420 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2421 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2422 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002423 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002424
2425 /* Build IOCB segments */
2426 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2427 goto queuing_error_fcp_cmnd;
2428
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002429 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002430 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2431
2432 /* build FCP_CMND IU */
2433 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002434 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002435 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2436
2437 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2438 ctx->fcp_cmnd->additional_cdb_len |= 1;
2439 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2440 ctx->fcp_cmnd->additional_cdb_len |= 2;
2441
2442 /*
2443 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2444 */
2445 if (scsi_populate_tag_msg(cmd, tag)) {
2446 switch (tag[0]) {
2447 case HEAD_OF_QUEUE_TAG:
2448 ctx->fcp_cmnd->task_attribute =
2449 TSK_HEAD_OF_QUEUE;
2450 break;
2451 case ORDERED_QUEUE_TAG:
2452 ctx->fcp_cmnd->task_attribute =
2453 TSK_ORDERED;
2454 break;
2455 }
2456 }
2457
Saurav Kashyapa00f6292011-11-18 09:03:19 -08002458 /* Populate the FCP_PRIO. */
2459 if (ha->flags.fcp_prio_enabled)
2460 ctx->fcp_cmnd->task_attribute |=
2461 sp->fcport->fcp_prio << 3;
2462
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002463 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2464
2465 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2466 additional_cdb_len);
2467 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2468
2469 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2470 cmd_pkt->fcp_cmnd_dseg_address[0] =
2471 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2472 cmd_pkt->fcp_cmnd_dseg_address[1] =
2473 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2474
2475 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2476 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2477 /* Set total data segment count. */
2478 cmd_pkt->entry_count = (uint8_t)req_cnt;
2479 /* Specify response queue number where
2480 * completion should happen
2481 */
2482 cmd_pkt->entry_status = (uint8_t) rsp->id;
2483 } else {
2484 struct cmd_type_7 *cmd_pkt;
2485 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2486 if (req->cnt < (req_cnt + 2)) {
2487 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2488 &reg->req_q_out[0]);
2489 if (req->ring_index < cnt)
2490 req->cnt = cnt - req->ring_index;
2491 else
2492 req->cnt = req->length -
2493 (req->ring_index - cnt);
2494 }
2495 if (req->cnt < (req_cnt + 2))
2496 goto queuing_error;
2497
2498 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2499 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2500
2501 /* Zero out remaining portion of packet. */
2502 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2503 clr_ptr = (uint32_t *)cmd_pkt + 2;
2504 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2505 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2506
2507 /* Set NPORT-ID and LUN number*/
2508 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2509 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2510 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2511 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002512 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002513
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002514 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002515 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002516 sizeof(cmd_pkt->lun));
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002517
2518 /*
2519 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2520 */
2521 if (scsi_populate_tag_msg(cmd, tag)) {
2522 switch (tag[0]) {
2523 case HEAD_OF_QUEUE_TAG:
2524 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2525 break;
2526 case ORDERED_QUEUE_TAG:
2527 cmd_pkt->task = TSK_ORDERED;
2528 break;
2529 }
2530 }
2531
Saurav Kashyapa00f6292011-11-18 09:03:19 -08002532 /* Populate the FCP_PRIO. */
2533 if (ha->flags.fcp_prio_enabled)
2534 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2535
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002536 /* Load SCSI command packet. */
2537 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2538 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2539
2540 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2541
2542 /* Build IOCB segments */
2543 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2544
2545 /* Set total data segment count. */
2546 cmd_pkt->entry_count = (uint8_t)req_cnt;
2547 /* Specify response queue number where
2548 * completion should happen.
2549 */
2550 cmd_pkt->entry_status = (uint8_t) rsp->id;
2551
2552 }
2553 /* Build command packet. */
2554 req->current_outstanding_cmd = handle;
2555 req->outstanding_cmds[handle] = sp;
2556 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002557 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002558 req->cnt -= req_cnt;
2559 wmb();
2560
2561 /* Adjust ring index. */
2562 req->ring_index++;
2563 if (req->ring_index == req->length) {
2564 req->ring_index = 0;
2565 req->ring_ptr = req->ring;
2566 } else
2567 req->ring_ptr++;
2568
2569 sp->flags |= SRB_DMA_VALID;
2570
2571 /* Set chip new ring index. */
2572 /* write, read and verify logic */
2573 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2574 if (ql2xdbwr)
2575 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2576 else {
2577 WRT_REG_DWORD(
2578 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2579 dbval);
2580 wmb();
2581 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2582 WRT_REG_DWORD(
2583 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2584 dbval);
2585 wmb();
2586 }
2587 }
2588
2589 /* Manage unprocessed RIO/ZIO commands in response queue. */
2590 if (vha->flags.process_response_queue &&
2591 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2592 qla24xx_process_response_queue(vha, rsp);
2593
2594 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2595 return QLA_SUCCESS;
2596
2597queuing_error_fcp_cmnd:
2598 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2599queuing_error:
2600 if (tot_dsds)
2601 scsi_dma_unmap(cmd);
2602
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002603 if (sp->u.scmd.ctx) {
2604 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2605 sp->u.scmd.ctx = NULL;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002606 }
2607 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2608
2609 return QLA_FUNCTION_FAILED;
2610}
2611
Andrew Vasquezac280b62009-08-20 11:06:05 -07002612int
2613qla2x00_start_sp(srb_t *sp)
2614{
2615 int rval;
2616 struct qla_hw_data *ha = sp->fcport->vha->hw;
2617 void *pkt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002618 unsigned long flags;
2619
2620 rval = QLA_FUNCTION_FAILED;
2621 spin_lock_irqsave(&ha->hardware_lock, flags);
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002622 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002623 if (!pkt) {
2624 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2625 "qla2x00_alloc_iocbs failed.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07002626 goto done;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002627 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07002628
2629 rval = QLA_SUCCESS;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002630 switch (sp->type) {
Andrew Vasquezac280b62009-08-20 11:06:05 -07002631 case SRB_LOGIN_CMD:
2632 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002633 qla24xx_login_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002634 qla2x00_login_iocb(sp, pkt);
2635 break;
2636 case SRB_LOGOUT_CMD:
2637 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002638 qla24xx_logout_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002639 qla2x00_logout_iocb(sp, pkt);
2640 break;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002641 case SRB_ELS_CMD_RPT:
2642 case SRB_ELS_CMD_HST:
2643 qla24xx_els_iocb(sp, pkt);
2644 break;
2645 case SRB_CT_CMD:
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002646 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez57807902011-11-18 09:03:20 -08002647 qla24xx_ct_iocb(sp, pkt) :
2648 qla2x00_ct_iocb(sp, pkt);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002649 break;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002650 case SRB_ADISC_CMD:
2651 IS_FWI2_CAPABLE(ha) ?
2652 qla24xx_adisc_iocb(sp, pkt) :
2653 qla2x00_adisc_iocb(sp, pkt);
2654 break;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002655 case SRB_TM_CMD:
2656 qla24xx_tm_iocb(sp, pkt);
2657 break;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002658 default:
2659 break;
2660 }
2661
2662 wmb();
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002663 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002664done:
2665 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2666 return rval;
2667}