blob: d78d5896fc33f837557a02c649f1f5ae42f69b06 [file] [log] [blame]
Andrew Vasquezfa90c542005-10-27 11:10:08 -07001/*
2 * QLogic Fibre Channel HBA Driver
Giridhar Malavalide7c5d02010-07-23 15:28:36 +05003 * Copyright (c) 2003-2010 QLogic Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Andrew Vasquezfa90c542005-10-27 11:10:08 -07005 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "qla_def.h"
8
9#include <linux/blkdev.h>
10#include <linux/delay.h>
11
12#include <scsi/scsi_tcq.h>
13
Anirban Chakraborty73208df2008-12-09 16:45:39 -080014static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -070016static void qla25xx_set_que(srb_t *, struct rsp_que **);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017/**
18 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * @cmd: SCSI command
20 *
21 * Returns the proper CF_* direction based on CDB.
22 */
23static inline uint16_t
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070024qla2x00_get_cmd_direction(srb_t *sp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070025{
26 uint16_t cflags;
27
28 cflags = 0;
29
30 /* Set transfer direction */
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070031 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 cflags = CF_WRITE;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080033 sp->fcport->vha->hw->qla_stats.output_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070034 scsi_bufflen(sp->cmd);
35 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 cflags = CF_READ;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080037 sp->fcport->vha->hw->qla_stats.input_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070038 scsi_bufflen(sp->cmd);
39 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 return (cflags);
41}
42
43/**
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
46 *
47 * @dsds: number of data segment decriptors needed
48 *
49 * Returns the number of IOCB entries needed to store @dsds.
50 */
51uint16_t
52qla2x00_calc_iocbs_32(uint16_t dsds)
53{
54 uint16_t iocbs;
55
56 iocbs = 1;
57 if (dsds > 3) {
58 iocbs += (dsds - 3) / 7;
59 if ((dsds - 3) % 7)
60 iocbs++;
61 }
62 return (iocbs);
63}
64
65/**
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
68 *
69 * @dsds: number of data segment decriptors needed
70 *
71 * Returns the number of IOCB entries needed to store @dsds.
72 */
73uint16_t
74qla2x00_calc_iocbs_64(uint16_t dsds)
75{
76 uint16_t iocbs;
77
78 iocbs = 1;
79 if (dsds > 2) {
80 iocbs += (dsds - 2) / 5;
81 if ((dsds - 2) % 5)
82 iocbs++;
83 }
84 return (iocbs);
85}
86
87/**
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89 * @ha: HA context
90 *
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */
93static inline cont_entry_t *
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070094qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
96 cont_entry_t *cont_pkt;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070097 struct req_que *req = vha->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080099 req->ring_index++;
100 if (req->ring_index == req->length) {
101 req->ring_index = 0;
102 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800104 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 }
106
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800107 cont_pkt = (cont_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) =
111 __constant_cpu_to_le32(CONTINUE_TYPE);
112
113 return (cont_pkt);
114}
115
116/**
117 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118 * @ha: HA context
119 *
120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */
122static inline cont_a64_entry_t *
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124{
125 cont_a64_entry_t *cont_pkt;
126
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700127 struct req_que *req = vha->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800129 req->ring_index++;
130 if (req->ring_index == req->length) {
131 req->ring_index = 0;
132 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800134 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 }
136
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800137 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139 /* Load packet defaults. */
140 *((uint32_t *)(&cont_pkt->entry_type)) =
141 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
142
143 return (cont_pkt);
144}
145
Arun Easibad75002010-05-04 15:01:30 -0700146static inline int
147qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148{
149 uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
150
151 /* We only support T10 DIF right now */
152 if (guard != SHOST_DIX_GUARD_CRC) {
153 DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard));
154 return 0;
155 }
156
157 /* We always use DIFF Bundling for best performance */
158 *fw_prot_opts = 0;
159
160 /* Translate SCSI opcode to a protection opcode */
161 switch (scsi_get_prot_op(sp->cmd)) {
162 case SCSI_PROT_READ_STRIP:
163 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
164 break;
165 case SCSI_PROT_WRITE_INSERT:
166 *fw_prot_opts |= PO_MODE_DIF_INSERT;
167 break;
168 case SCSI_PROT_READ_INSERT:
169 *fw_prot_opts |= PO_MODE_DIF_INSERT;
170 break;
171 case SCSI_PROT_WRITE_STRIP:
172 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
173 break;
174 case SCSI_PROT_READ_PASS:
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
176 break;
177 case SCSI_PROT_WRITE_PASS:
178 *fw_prot_opts |= PO_MODE_DIF_PASS;
179 break;
180 default: /* Normal Request */
181 *fw_prot_opts |= PO_MODE_DIF_PASS;
182 break;
183 }
184
185 return scsi_prot_sg_count(sp->cmd);
186}
187
188/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190 * capable IOCB types.
191 *
192 * @sp: SRB command to process
193 * @cmd_pkt: Command type 2 IOCB
194 * @tot_dsds: Total number of segments to transfer
195 */
196void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197 uint16_t tot_dsds)
198{
199 uint16_t avail_dsds;
200 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800201 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900203 struct scatterlist *sg;
204 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 cmd = sp->cmd;
207
208 /* Update entry type to indicate Command Type 2 IOCB */
209 *((uint32_t *)(&cmd_pkt->entry_type)) =
210 __constant_cpu_to_le32(COMMAND_TYPE);
211
212 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900213 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215 return;
216 }
217
Andrew Vasquez444786d2009-01-05 11:18:10 -0800218 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700219 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221 /* Three DSDs are available in the Command Type 2 IOCB */
222 avail_dsds = 3;
223 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
224
225 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900226 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
227 cont_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900229 /* Allocate additional continuation packets? */
230 if (avail_dsds == 0) {
231 /*
232 * Seven DSDs are available in the Continuation
233 * Type 0 IOCB.
234 */
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700235 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900236 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237 avail_dsds = 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900239
240 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
241 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 }
244}
245
246/**
247 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248 * capable IOCB types.
249 *
250 * @sp: SRB command to process
251 * @cmd_pkt: Command type 3 IOCB
252 * @tot_dsds: Total number of segments to transfer
253 */
254void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255 uint16_t tot_dsds)
256{
257 uint16_t avail_dsds;
258 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800259 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900261 struct scatterlist *sg;
262 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264 cmd = sp->cmd;
265
266 /* Update entry type to indicate Command Type 3 IOCB */
267 *((uint32_t *)(&cmd_pkt->entry_type)) =
268 __constant_cpu_to_le32(COMMAND_A64_TYPE);
269
270 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900271 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273 return;
274 }
275
Andrew Vasquez444786d2009-01-05 11:18:10 -0800276 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700277 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
279 /* Two DSDs are available in the Command Type 3 IOCB */
280 avail_dsds = 2;
281 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
282
283 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900284 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
285 dma_addr_t sle_dma;
286 cont_a64_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900288 /* Allocate additional continuation packets? */
289 if (avail_dsds == 0) {
290 /*
291 * Five DSDs are available in the Continuation
292 * Type 1 IOCB.
293 */
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700294 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900295 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296 avail_dsds = 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900298
299 sle_dma = sg_dma_address(sg);
300 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
301 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
302 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 }
305}
306
307/**
308 * qla2x00_start_scsi() - Send a SCSI command to the ISP
309 * @sp: command to send to the ISP
310 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700311 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 */
313int
314qla2x00_start_scsi(srb_t *sp)
315{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900316 int ret, nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 unsigned long flags;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800318 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 struct scsi_cmnd *cmd;
320 uint32_t *clr_ptr;
321 uint32_t index;
322 uint32_t handle;
323 cmd_entry_t *cmd_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 uint16_t cnt;
325 uint16_t req_cnt;
326 uint16_t tot_dsds;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700327 struct device_reg_2xxx __iomem *reg;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800328 struct qla_hw_data *ha;
329 struct req_que *req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800330 struct rsp_que *rsp;
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800331 char tag[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
333 /* Setup device pointers. */
334 ret = 0;
Andrew Vasquez444786d2009-01-05 11:18:10 -0800335 vha = sp->fcport->vha;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800336 ha = vha->hw;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700337 reg = &ha->iobase->isp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 cmd = sp->cmd;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800339 req = ha->req_q_map[0];
340 rsp = ha->rsp_q_map[0];
83021922005-04-17 15:10:41 -0500341 /* So we know we haven't pci_map'ed anything yet */
342 tot_dsds = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800345 if (vha->marker_needed != 0) {
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800346 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
347 != QLA_SUCCESS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 return (QLA_FUNCTION_FAILED);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800349 vha->marker_needed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 }
351
352 /* Acquire ring specific lock */
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700353 spin_lock_irqsave(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
355 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800356 handle = req->current_outstanding_cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
358 handle++;
359 if (handle == MAX_OUTSTANDING_COMMANDS)
360 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800361 if (!req->outstanding_cmds[handle])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 break;
363 }
364 if (index == MAX_OUTSTANDING_COMMANDS)
365 goto queuing_error;
366
83021922005-04-17 15:10:41 -0500367 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -0700368 if (scsi_sg_count(cmd)) {
369 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
370 scsi_sg_count(cmd), cmd->sc_data_direction);
371 if (unlikely(!nseg))
372 goto queuing_error;
373 } else
374 nseg = 0;
375
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900376 tot_dsds = nseg;
83021922005-04-17 15:10:41 -0500377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 /* Calculate the number of request entries needed. */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700379 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800380 if (req->cnt < (req_cnt + 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800382 if (req->ring_index < cnt)
383 req->cnt = cnt - req->ring_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800385 req->cnt = req->length -
386 (req->ring_index - cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800388 if (req->cnt < (req_cnt + 2))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 goto queuing_error;
390
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 /* Build command packet */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800392 req->current_outstanding_cmd = handle;
393 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -0700394 sp->handle = handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800396 req->cnt -= req_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800398 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 cmd_pkt->handle = handle;
400 /* Zero out remaining portion of packet. */
401 clr_ptr = (uint32_t *)cmd_pkt + 2;
402 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
403 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
404
bdf79622005-04-17 15:06:53 -0500405 /* Set target ID and LUN number*/
406 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
407 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
409 /* Update tagged queuing modifier */
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800410 if (scsi_populate_tag_msg(cmd, tag)) {
411 switch (tag[0]) {
412 case HEAD_OF_QUEUE_TAG:
413 cmd_pkt->control_flags =
414 __constant_cpu_to_le16(CF_HEAD_TAG);
415 break;
416 case ORDERED_QUEUE_TAG:
417 cmd_pkt->control_flags =
418 __constant_cpu_to_le16(CF_ORDERED_TAG);
419 break;
420 default:
421 cmd_pkt->control_flags =
422 __constant_cpu_to_le16(CF_SIMPLE_TAG);
423 break;
424 }
425 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 /* Load SCSI command packet. */
428 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900429 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
431 /* Build IOCB segments */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700432 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
434 /* Set total data segment count. */
435 cmd_pkt->entry_count = (uint8_t)req_cnt;
436 wmb();
437
438 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800439 req->ring_index++;
440 if (req->ring_index == req->length) {
441 req->ring_index = 0;
442 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800444 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 sp->flags |= SRB_DMA_VALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 /* Set chip new ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800449 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
451
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700452 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800453 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800454 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
455 qla2x00_process_response_queue(rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700456
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700457 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 return (QLA_SUCCESS);
459
460queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900461 if (tot_dsds)
462 scsi_dma_unmap(cmd);
463
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700464 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
466 return (QLA_FUNCTION_FAILED);
467}
468
469/**
470 * qla2x00_marker() - Send a marker IOCB to the firmware.
471 * @ha: HA context
472 * @loop_id: loop ID
473 * @lun: LUN
474 * @type: marker modifier
475 *
476 * Can be called from both normal and interrupt context.
477 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700478 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 */
Andrew Vasquez3dbe7562010-07-23 15:28:37 +0500480static int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800481__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
482 struct rsp_que *rsp, uint16_t loop_id,
483 uint16_t lun, uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700485 mrk_entry_t *mrk;
486 struct mrk_entry_24xx *mrk24;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800487 struct qla_hw_data *ha = vha->hw;
488 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700490 mrk24 = NULL;
Giridhar Malavalid94d10e2010-07-23 15:28:23 +0500491 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700492 if (mrk == NULL) {
493 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800494 __func__, base_vha->host_no));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
496 return (QLA_FUNCTION_FAILED);
497 }
498
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700499 mrk->entry_type = MARKER_TYPE;
500 mrk->modifier = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 if (type != MK_SYNC_ALL) {
Andrew Vasqueze4289242007-07-19 15:05:56 -0700502 if (IS_FWI2_CAPABLE(ha)) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700503 mrk24 = (struct mrk_entry_24xx *) mrk;
504 mrk24->nport_handle = cpu_to_le16(loop_id);
505 mrk24->lun[1] = LSB(lun);
506 mrk24->lun[2] = MSB(lun);
Shyam Sundarb797b6d2006-08-01 13:48:13 -0700507 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800508 mrk24->vp_index = vha->vp_idx;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -0700509 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700510 } else {
511 SET_TARGET_ID(ha, mrk->target, loop_id);
512 mrk->lun = cpu_to_le16(lun);
513 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 }
515 wmb();
516
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800517 qla2x00_isp_cmd(vha, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
519 return (QLA_SUCCESS);
520}
521
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -0700522int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800523qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
524 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
525 uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526{
527 int ret;
528 unsigned long flags = 0;
529
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800530 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
531 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
532 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
534 return (ret);
535}
536
537/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 * qla2x00_isp_cmd() - Modify the request ring pointer.
539 * @ha: HA context
540 *
541 * Note: The caller must hold the hardware lock before calling this routine.
542 */
Adrian Bunk413975a2006-06-30 02:33:06 -0700543static void
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800544qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545{
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800546 struct qla_hw_data *ha = vha->hw;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800547 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
Anirban Chakraborty17d98632008-12-18 10:06:15 -0800548 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
550 DEBUG5(printk("%s(): IOCB data:\n", __func__));
551 DEBUG5(qla2x00_dump_buffer(
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800552 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
554 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800555 req->ring_index++;
556 if (req->ring_index == req->length) {
557 req->ring_index = 0;
558 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800560 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
562 /* Set chip new ring index. */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700563 if (IS_QLA82XX(ha)) {
564 uint32_t dbval = 0x04 | (ha->portnum << 5);
565
566 /* write, read and verify logic */
567 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
568 if (ql2xdbwr)
569 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
570 else {
571 WRT_REG_DWORD(
572 (unsigned long __iomem *)ha->nxdb_wr_ptr,
573 dbval);
574 wmb();
575 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
576 WRT_REG_DWORD((unsigned long __iomem *)
577 ha->nxdb_wr_ptr, dbval);
578 wmb();
579 }
580 }
581 } else if (ha->mqenable) {
582 /* Set chip new ring index. */
Anirban Chakraborty17d98632008-12-18 10:06:15 -0800583 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
584 RD_REG_DWORD(&ioreg->hccr);
Giridhar Malavalia9083012010-04-12 17:59:55 -0700585 } else {
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800586 if (IS_FWI2_CAPABLE(ha)) {
587 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
588 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
589 } else {
590 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
591 req->ring_index);
592 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
593 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700594 }
595
596}
597
598/**
599 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
600 * Continuation Type 1 IOCBs to allocate.
601 *
602 * @dsds: number of data segment decriptors needed
603 *
604 * Returns the number of IOCB entries needed to store @dsds.
605 */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700606inline uint16_t
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700607qla24xx_calc_iocbs(uint16_t dsds)
608{
609 uint16_t iocbs;
610
611 iocbs = 1;
612 if (dsds > 1) {
613 iocbs += (dsds - 1) / 5;
614 if ((dsds - 1) % 5)
615 iocbs++;
616 }
Arun Easibad75002010-05-04 15:01:30 -0700617 DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
618 __func__, iocbs));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700619 return iocbs;
620}
621
622/**
623 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
624 * IOCB types.
625 *
626 * @sp: SRB command to process
627 * @cmd_pkt: Command type 3 IOCB
628 * @tot_dsds: Total number of segments to transfer
629 */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700630inline void
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700631qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
632 uint16_t tot_dsds)
633{
634 uint16_t avail_dsds;
635 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800636 scsi_qla_host_t *vha;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700637 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900638 struct scatterlist *sg;
639 int i;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800640 struct req_que *req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700641
642 cmd = sp->cmd;
643
644 /* Update entry type to indicate Command Type 3 IOCB */
645 *((uint32_t *)(&cmd_pkt->entry_type)) =
646 __constant_cpu_to_le32(COMMAND_TYPE_7);
647
648 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900649 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700650 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
651 return;
652 }
653
Andrew Vasquez444786d2009-01-05 11:18:10 -0800654 vha = sp->fcport->vha;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700655 req = vha->req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700656
657 /* Set transfer direction */
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700658 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700659 cmd_pkt->task_mgmt_flags =
660 __constant_cpu_to_le16(TMF_WRITE_DATA);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800661 sp->fcport->vha->hw->qla_stats.output_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700662 scsi_bufflen(sp->cmd);
663 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700664 cmd_pkt->task_mgmt_flags =
665 __constant_cpu_to_le16(TMF_READ_DATA);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800666 sp->fcport->vha->hw->qla_stats.input_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700667 scsi_bufflen(sp->cmd);
668 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700669
670 /* One DSD is available in the Command Type 3 IOCB */
671 avail_dsds = 1;
672 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
673
674 /* Load data segments */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700675
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900676 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
677 dma_addr_t sle_dma;
678 cont_a64_entry_t *cont_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700679
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900680 /* Allocate additional continuation packets? */
681 if (avail_dsds == 0) {
682 /*
683 * Five DSDs are available in the Continuation
684 * Type 1 IOCB.
685 */
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700686 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900687 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
688 avail_dsds = 5;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700689 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900690
691 sle_dma = sg_dma_address(sg);
692 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
693 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
694 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
695 avail_dsds--;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700696 }
697}
698
Arun Easibad75002010-05-04 15:01:30 -0700699struct fw_dif_context {
700 uint32_t ref_tag;
701 uint16_t app_tag;
702 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
703 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
704};
705
706/*
707 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
708 *
709 */
710static inline void
711qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
712 unsigned int protcnt)
713{
714 struct sd_dif_tuple *spt;
715 unsigned char op = scsi_get_prot_op(cmd);
716
717 switch (scsi_get_prot_type(cmd)) {
718 /* For TYPE 0 protection: no checking */
719 case SCSI_PROT_DIF_TYPE0:
720 pkt->ref_tag_mask[0] = 0x00;
721 pkt->ref_tag_mask[1] = 0x00;
722 pkt->ref_tag_mask[2] = 0x00;
723 pkt->ref_tag_mask[3] = 0x00;
724 break;
725
726 /*
727 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
728 * match LBA in CDB + N
729 */
730 case SCSI_PROT_DIF_TYPE2:
Arun Easi0c470872010-07-23 15:28:38 +0500731 if (!ql2xenablehba_err_chk)
732 break;
733
734 if (scsi_prot_sg_count(cmd)) {
735 spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
736 scsi_prot_sglist(cmd)[0].offset;
737 pkt->app_tag = swab32(spt->app_tag);
738 pkt->app_tag_mask[0] = 0xff;
739 pkt->app_tag_mask[1] = 0xff;
740 }
741
742 pkt->ref_tag = cpu_to_le32((uint32_t)
743 (0xffffffff & scsi_get_lba(cmd)));
744
745 /* enable ALL bytes of the ref tag */
746 pkt->ref_tag_mask[0] = 0xff;
747 pkt->ref_tag_mask[1] = 0xff;
748 pkt->ref_tag_mask[2] = 0xff;
749 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700750 break;
751
752 /* For Type 3 protection: 16 bit GUARD only */
753 case SCSI_PROT_DIF_TYPE3:
754 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
755 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
756 0x00;
757 break;
758
759 /*
760 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
761 * 16 bit app tag.
762 */
763 case SCSI_PROT_DIF_TYPE1:
764 if (!ql2xenablehba_err_chk)
765 break;
766
767 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
768 op == SCSI_PROT_WRITE_PASS)) {
769 spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
770 scsi_prot_sglist(cmd)[0].offset;
771 DEBUG18(printk(KERN_DEBUG
772 "%s(): LBA from user %p, lba = 0x%x\n",
773 __func__, spt, (int)spt->ref_tag));
774 pkt->ref_tag = swab32(spt->ref_tag);
775 pkt->app_tag_mask[0] = 0x0;
776 pkt->app_tag_mask[1] = 0x0;
777 } else {
778 pkt->ref_tag = cpu_to_le32((uint32_t)
779 (0xffffffff & scsi_get_lba(cmd)));
780 pkt->app_tag = __constant_cpu_to_le16(0);
781 pkt->app_tag_mask[0] = 0x0;
782 pkt->app_tag_mask[1] = 0x0;
783 }
784 /* enable ALL bytes of the ref tag */
785 pkt->ref_tag_mask[0] = 0xff;
786 pkt->ref_tag_mask[1] = 0xff;
787 pkt->ref_tag_mask[2] = 0xff;
788 pkt->ref_tag_mask[3] = 0xff;
789 break;
790 }
791
792 DEBUG18(printk(KERN_DEBUG
793 "%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
794 " app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
795 " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt,
796 (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd)));
797}
798
799
800static int
801qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
802 uint16_t tot_dsds)
803{
804 void *next_dsd;
805 uint8_t avail_dsds = 0;
806 uint32_t dsd_list_len;
807 struct dsd_dma *dsd_ptr;
808 struct scatterlist *sg;
809 uint32_t *cur_dsd = dsd;
810 int i;
811 uint16_t used_dsds = tot_dsds;
812
813 uint8_t *cp;
814
815 scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
816 dma_addr_t sle_dma;
817
818 /* Allocate additional continuation packets? */
819 if (avail_dsds == 0) {
820 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
821 QLA_DSDS_PER_IOCB : used_dsds;
822 dsd_list_len = (avail_dsds + 1) * 12;
823 used_dsds -= avail_dsds;
824
825 /* allocate tracking DS */
826 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
827 if (!dsd_ptr)
828 return 1;
829
830 /* allocate new list */
831 dsd_ptr->dsd_addr = next_dsd =
832 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
833 &dsd_ptr->dsd_list_dma);
834
835 if (!next_dsd) {
836 /*
837 * Need to cleanup only this dsd_ptr, rest
838 * will be done by sp_free_dma()
839 */
840 kfree(dsd_ptr);
841 return 1;
842 }
843
844 list_add_tail(&dsd_ptr->list,
845 &((struct crc_context *)sp->ctx)->dsd_list);
846
847 sp->flags |= SRB_CRC_CTX_DSD_VALID;
848
849 /* add new list to cmd iocb or last list */
850 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
851 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
852 *cur_dsd++ = dsd_list_len;
853 cur_dsd = (uint32_t *)next_dsd;
854 }
855 sle_dma = sg_dma_address(sg);
856 DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
857 " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma),
858 MSD(sle_dma), sg_dma_len(sg)));
859 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
860 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
861 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
862 avail_dsds--;
863
864 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
865 cp = page_address(sg_page(sg)) + sg->offset;
866 DEBUG18(printk("%s(): User Data buffer= %p:\n",
867 __func__ , cp));
868 }
869 }
870 /* Null termination */
871 *cur_dsd++ = 0;
872 *cur_dsd++ = 0;
873 *cur_dsd++ = 0;
874 return 0;
875}
876
877static int
878qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
879 uint32_t *dsd,
880 uint16_t tot_dsds)
881{
882 void *next_dsd;
883 uint8_t avail_dsds = 0;
884 uint32_t dsd_list_len;
885 struct dsd_dma *dsd_ptr;
886 struct scatterlist *sg;
887 int i;
888 struct scsi_cmnd *cmd;
889 uint32_t *cur_dsd = dsd;
890 uint16_t used_dsds = tot_dsds;
891
892 uint8_t *cp;
893
894
895 cmd = sp->cmd;
896 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
897 dma_addr_t sle_dma;
898
899 /* Allocate additional continuation packets? */
900 if (avail_dsds == 0) {
901 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
902 QLA_DSDS_PER_IOCB : used_dsds;
903 dsd_list_len = (avail_dsds + 1) * 12;
904 used_dsds -= avail_dsds;
905
906 /* allocate tracking DS */
907 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
908 if (!dsd_ptr)
909 return 1;
910
911 /* allocate new list */
912 dsd_ptr->dsd_addr = next_dsd =
913 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
914 &dsd_ptr->dsd_list_dma);
915
916 if (!next_dsd) {
917 /*
918 * Need to cleanup only this dsd_ptr, rest
919 * will be done by sp_free_dma()
920 */
921 kfree(dsd_ptr);
922 return 1;
923 }
924
925 list_add_tail(&dsd_ptr->list,
926 &((struct crc_context *)sp->ctx)->dsd_list);
927
928 sp->flags |= SRB_CRC_CTX_DSD_VALID;
929
930 /* add new list to cmd iocb or last list */
931 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
932 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
933 *cur_dsd++ = dsd_list_len;
934 cur_dsd = (uint32_t *)next_dsd;
935 }
936 sle_dma = sg_dma_address(sg);
937 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
938 DEBUG18(printk(KERN_DEBUG
939 "%s(): %p, sg entry %d - addr =0x%x"
940 "0x%x, len =%d\n", __func__ , cur_dsd, i,
941 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)));
942 }
943 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
944 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
945 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
946
947 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
948 cp = page_address(sg_page(sg)) + sg->offset;
949 DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
950 __func__ , cp));
951 }
952 avail_dsds--;
953 }
954 /* Null termination */
955 *cur_dsd++ = 0;
956 *cur_dsd++ = 0;
957 *cur_dsd++ = 0;
958 return 0;
959}
960
961/**
962 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
963 * Type 6 IOCB types.
964 *
965 * @sp: SRB command to process
966 * @cmd_pkt: Command type 3 IOCB
967 * @tot_dsds: Total number of segments to transfer
968 */
969static inline int
970qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
971 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
972{
973 uint32_t *cur_dsd, *fcp_dl;
974 scsi_qla_host_t *vha;
975 struct scsi_cmnd *cmd;
976 struct scatterlist *cur_seg;
977 int sgc;
978 uint32_t total_bytes;
979 uint32_t data_bytes;
980 uint32_t dif_bytes;
981 uint8_t bundling = 1;
982 uint16_t blk_size;
983 uint8_t *clr_ptr;
984 struct crc_context *crc_ctx_pkt = NULL;
985 struct qla_hw_data *ha;
986 uint8_t additional_fcpcdb_len;
987 uint16_t fcp_cmnd_len;
988 struct fcp_cmnd *fcp_cmnd;
989 dma_addr_t crc_ctx_dma;
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800990 char tag[2];
Arun Easibad75002010-05-04 15:01:30 -0700991
992 cmd = sp->cmd;
993
994 sgc = 0;
995 /* Update entry type to indicate Command Type CRC_2 IOCB */
996 *((uint32_t *)(&cmd_pkt->entry_type)) =
997 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
998
999 /* No data transfer */
1000 data_bytes = scsi_bufflen(cmd);
1001 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1002 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1003 __func__, data_bytes));
1004 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1005 return QLA_SUCCESS;
1006 }
1007
1008 vha = sp->fcport->vha;
1009 ha = vha->hw;
1010
1011 DEBUG18(printk(KERN_DEBUG
Madhuranath Iyengar09d1dc22010-10-15 11:27:44 -07001012 "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
1013 vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
Arun Easibad75002010-05-04 15:01:30 -07001014
1015 cmd_pkt->vp_index = sp->fcport->vp_idx;
1016
1017 /* Set transfer direction */
1018 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1019 cmd_pkt->control_flags =
1020 __constant_cpu_to_le16(CF_WRITE_DATA);
1021 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1022 cmd_pkt->control_flags =
1023 __constant_cpu_to_le16(CF_READ_DATA);
1024 }
1025
1026 tot_prot_dsds = scsi_prot_sg_count(cmd);
1027 if (!tot_prot_dsds)
1028 bundling = 0;
1029
1030 /* Allocate CRC context from global pool */
1031 crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1032 GFP_ATOMIC, &crc_ctx_dma);
1033
1034 if (!crc_ctx_pkt)
1035 goto crc_queuing_error;
1036
1037 /* Zero out CTX area. */
1038 clr_ptr = (uint8_t *)crc_ctx_pkt;
1039 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1040
1041 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1042
1043 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1044
1045 /* Set handle */
1046 crc_ctx_pkt->handle = cmd_pkt->handle;
1047
1048 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1049
1050 qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
1051 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1052
1053 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1054 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1055 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1056
1057 /* Determine SCSI command length -- align to 4 byte boundary */
1058 if (cmd->cmd_len > 16) {
1059 DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
1060 __func__));
1061 additional_fcpcdb_len = cmd->cmd_len - 16;
1062 if ((cmd->cmd_len % 4) != 0) {
1063 /* SCSI cmd > 16 bytes must be multiple of 4 */
1064 goto crc_queuing_error;
1065 }
1066 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1067 } else {
1068 additional_fcpcdb_len = 0;
1069 fcp_cmnd_len = 12 + 16 + 4;
1070 }
1071
1072 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1073
1074 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1075 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1076 fcp_cmnd->additional_cdb_len |= 1;
1077 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1078 fcp_cmnd->additional_cdb_len |= 2;
1079
1080 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
Mike Hernandez85727e12010-11-23 16:52:46 -08001081 host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));
Arun Easibad75002010-05-04 15:01:30 -07001082 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1083 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1084 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1085 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1086 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1087 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
Uwe Kleine-König65155b32010-06-11 12:17:01 +02001088 fcp_cmnd->task_management = 0;
Arun Easibad75002010-05-04 15:01:30 -07001089
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001090 /*
1091 * Update tagged queuing modifier if using command tag queuing
1092 */
1093 if (scsi_populate_tag_msg(cmd, tag)) {
1094 switch (tag[0]) {
1095 case HEAD_OF_QUEUE_TAG:
1096 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1097 break;
1098 case ORDERED_QUEUE_TAG:
1099 fcp_cmnd->task_attribute = TSK_ORDERED;
1100 break;
1101 default:
1102 fcp_cmnd->task_attribute = 0;
1103 break;
1104 }
1105 } else {
1106 fcp_cmnd->task_attribute = 0;
1107 }
1108
Arun Easibad75002010-05-04 15:01:30 -07001109 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1110
1111 DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
1112 "entries %d, data bytes %d, Protection entries %d\n",
1113 __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
1114 data_bytes, tot_prot_dsds));
1115
1116 /* Compute dif len and adjust data len to incude protection */
1117 total_bytes = data_bytes;
1118 dif_bytes = 0;
1119 blk_size = cmd->device->sector_size;
Arun Easi0c470872010-07-23 15:28:38 +05001120 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
Arun Easibad75002010-05-04 15:01:30 -07001121 dif_bytes = (data_bytes / blk_size) * 8;
1122 total_bytes += dif_bytes;
1123 }
1124
1125 if (!ql2xenablehba_err_chk)
1126 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1127
1128 if (!bundling) {
1129 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1130 } else {
1131 /*
1132 * Configure Bundling if we need to fetch interlaving
1133 * protection PCI accesses
1134 */
1135 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1136 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1137 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1138 tot_prot_dsds);
1139 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1140 }
1141
1142 /* Finish the common fields of CRC pkt */
1143 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1144 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1145 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1146 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1147 /* Fibre channel byte count */
1148 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1149 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1150 additional_fcpcdb_len);
1151 *fcp_dl = htonl(total_bytes);
1152
1153 DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
1154 " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
1155 vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
1156 crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
1157
Arun Easi0c470872010-07-23 15:28:38 +05001158 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1159 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1160 __func__, data_bytes));
1161 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1162 return QLA_SUCCESS;
1163 }
Arun Easibad75002010-05-04 15:01:30 -07001164 /* Walks data segments */
1165
1166 cmd_pkt->control_flags |=
1167 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1168 if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1169 (tot_dsds - tot_prot_dsds)))
1170 goto crc_queuing_error;
1171
1172 if (bundling && tot_prot_dsds) {
1173 /* Walks dif segments */
1174 cur_seg = scsi_prot_sglist(cmd);
1175 cmd_pkt->control_flags |=
1176 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1177 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1178 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1179 tot_prot_dsds))
1180 goto crc_queuing_error;
1181 }
1182 return QLA_SUCCESS;
1183
1184crc_queuing_error:
1185 DEBUG18(qla_printk(KERN_INFO, ha,
1186 "CMD sent FAILED crc_q error:sp = %p\n", sp));
1187 /* Cleanup will be performed by the caller */
1188
1189 return QLA_FUNCTION_FAILED;
1190}
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001191
1192/**
1193 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1194 * @sp: command to send to the ISP
1195 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -07001196 * Returns non-zero if a failure occurred, else zero.
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001197 */
1198int
1199qla24xx_start_scsi(srb_t *sp)
1200{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001201 int ret, nseg;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001202 unsigned long flags;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001203 uint32_t *clr_ptr;
1204 uint32_t index;
1205 uint32_t handle;
1206 struct cmd_type_7 *cmd_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001207 uint16_t cnt;
1208 uint16_t req_cnt;
1209 uint16_t tot_dsds;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001210 struct req_que *req = NULL;
1211 struct rsp_que *rsp = NULL;
1212 struct scsi_cmnd *cmd = sp->cmd;
Andrew Vasquez444786d2009-01-05 11:18:10 -08001213 struct scsi_qla_host *vha = sp->fcport->vha;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001214 struct qla_hw_data *ha = vha->hw;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001215 char tag[2];
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001216
1217 /* Setup device pointers. */
1218 ret = 0;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001219
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001220 qla25xx_set_que(sp, &rsp);
1221 req = vha->req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001222
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001223 /* So we know we haven't pci_map'ed anything yet */
1224 tot_dsds = 0;
1225
1226 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001227 if (vha->marker_needed != 0) {
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001228 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
1229 != QLA_SUCCESS)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001230 return QLA_FUNCTION_FAILED;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001231 vha->marker_needed = 0;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001232 }
1233
1234 /* Acquire ring specific lock */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001235 spin_lock_irqsave(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001236
1237 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001238 handle = req->current_outstanding_cmd;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001239 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1240 handle++;
1241 if (handle == MAX_OUTSTANDING_COMMANDS)
1242 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001243 if (!req->outstanding_cmds[handle])
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001244 break;
1245 }
1246 if (index == MAX_OUTSTANDING_COMMANDS)
1247 goto queuing_error;
1248
1249 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001250 if (scsi_sg_count(cmd)) {
1251 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1252 scsi_sg_count(cmd), cmd->sc_data_direction);
1253 if (unlikely(!nseg))
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001254 goto queuing_error;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001255 } else
1256 nseg = 0;
1257
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001258 tot_dsds = nseg;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001259
1260 req_cnt = qla24xx_calc_iocbs(tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001261 if (req->cnt < (req_cnt + 2)) {
Andrew Vasquez08029992009-03-24 09:07:55 -07001262 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001263
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001264 if (req->ring_index < cnt)
1265 req->cnt = cnt - req->ring_index;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001266 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001267 req->cnt = req->length -
1268 (req->ring_index - cnt);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001269 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001270 if (req->cnt < (req_cnt + 2))
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001271 goto queuing_error;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001272
1273 /* Build command packet. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001274 req->current_outstanding_cmd = handle;
1275 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -07001276 sp->handle = handle;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001277 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001278 req->cnt -= req_cnt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001279
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001280 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001281 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001282
1283 /* Zero out remaining portion of packet. */
James Bottomley72df8322005-10-28 14:41:19 -05001284 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001285 clr_ptr = (uint32_t *)cmd_pkt + 2;
1286 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1287 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1288
1289 /* Set NPORT-ID and LUN number*/
1290 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1291 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1292 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1293 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001294 cmd_pkt->vp_index = sp->fcport->vp_idx;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001295
Andrew Vasquez661c3f62005-10-27 11:09:58 -07001296 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
andrew.vasquez@qlogic.com0d4be122006-02-07 08:45:35 -08001297 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001298
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001299 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1300 if (scsi_populate_tag_msg(cmd, tag)) {
1301 switch (tag[0]) {
1302 case HEAD_OF_QUEUE_TAG:
1303 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1304 break;
1305 case ORDERED_QUEUE_TAG:
1306 cmd_pkt->task = TSK_ORDERED;
1307 break;
1308 }
1309 }
1310
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001311 /* Load SCSI command packet. */
1312 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1313 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1314
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001315 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001316
1317 /* Build IOCB segments */
1318 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1319
1320 /* Set total data segment count. */
1321 cmd_pkt->entry_count = (uint8_t)req_cnt;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001322 /* Specify response queue number where completion should happen */
1323 cmd_pkt->entry_status = (uint8_t) rsp->id;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001324 wmb();
1325
1326 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001327 req->ring_index++;
1328 if (req->ring_index == req->length) {
1329 req->ring_index = 0;
1330 req->ring_ptr = req->ring;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001331 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001332 req->ring_ptr++;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001333
1334 sp->flags |= SRB_DMA_VALID;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001335
1336 /* Set chip new ring index. */
Andrew Vasquez08029992009-03-24 09:07:55 -07001337 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1338 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001339
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001340 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001341 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001342 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001343 qla24xx_process_response_queue(vha, rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001344
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001345 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001346 return QLA_SUCCESS;
1347
1348queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001349 if (tot_dsds)
1350 scsi_dma_unmap(cmd);
1351
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001352 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001353
1354 return QLA_FUNCTION_FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355}
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001356
Arun Easibad75002010-05-04 15:01:30 -07001357
1358/**
1359 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1360 * @sp: command to send to the ISP
1361 *
1362 * Returns non-zero if a failure occurred, else zero.
1363 */
1364int
1365qla24xx_dif_start_scsi(srb_t *sp)
1366{
1367 int nseg;
1368 unsigned long flags;
1369 uint32_t *clr_ptr;
1370 uint32_t index;
1371 uint32_t handle;
1372 uint16_t cnt;
1373 uint16_t req_cnt = 0;
1374 uint16_t tot_dsds;
1375 uint16_t tot_prot_dsds;
1376 uint16_t fw_prot_opts = 0;
1377 struct req_que *req = NULL;
1378 struct rsp_que *rsp = NULL;
1379 struct scsi_cmnd *cmd = sp->cmd;
1380 struct scsi_qla_host *vha = sp->fcport->vha;
1381 struct qla_hw_data *ha = vha->hw;
1382 struct cmd_type_crc_2 *cmd_pkt;
1383 uint32_t status = 0;
1384
1385#define QDSS_GOT_Q_SPACE BIT_0
1386
Arun Easi0c470872010-07-23 15:28:38 +05001387 /* Only process protection or >16 cdb in this routine */
1388 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1389 if (cmd->cmd_len <= 16)
1390 return qla24xx_start_scsi(sp);
1391 }
Arun Easibad75002010-05-04 15:01:30 -07001392
1393 /* Setup device pointers. */
1394
1395 qla25xx_set_que(sp, &rsp);
1396 req = vha->req;
1397
1398 /* So we know we haven't pci_map'ed anything yet */
1399 tot_dsds = 0;
1400
1401 /* Send marker if required */
1402 if (vha->marker_needed != 0) {
1403 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1404 QLA_SUCCESS)
1405 return QLA_FUNCTION_FAILED;
1406 vha->marker_needed = 0;
1407 }
1408
1409 /* Acquire ring specific lock */
1410 spin_lock_irqsave(&ha->hardware_lock, flags);
1411
1412 /* Check for room in outstanding command list. */
1413 handle = req->current_outstanding_cmd;
1414 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1415 handle++;
1416 if (handle == MAX_OUTSTANDING_COMMANDS)
1417 handle = 1;
1418 if (!req->outstanding_cmds[handle])
1419 break;
1420 }
1421
1422 if (index == MAX_OUTSTANDING_COMMANDS)
1423 goto queuing_error;
1424
1425 /* Compute number of required data segments */
1426 /* Map the sg table so we have an accurate count of sg entries needed */
1427 if (scsi_sg_count(cmd)) {
1428 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1429 scsi_sg_count(cmd), cmd->sc_data_direction);
1430 if (unlikely(!nseg))
1431 goto queuing_error;
1432 else
1433 sp->flags |= SRB_DMA_VALID;
1434 } else
1435 nseg = 0;
1436
1437 /* number of required data segments */
1438 tot_dsds = nseg;
1439
1440 /* Compute number of required protection segments */
1441 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1442 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1443 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1444 if (unlikely(!nseg))
1445 goto queuing_error;
1446 else
1447 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1448 } else {
1449 nseg = 0;
1450 }
1451
1452 req_cnt = 1;
1453 /* Total Data and protection sg segment(s) */
1454 tot_prot_dsds = nseg;
1455 tot_dsds += nseg;
1456 if (req->cnt < (req_cnt + 2)) {
1457 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1458
1459 if (req->ring_index < cnt)
1460 req->cnt = cnt - req->ring_index;
1461 else
1462 req->cnt = req->length -
1463 (req->ring_index - cnt);
1464 }
1465
1466 if (req->cnt < (req_cnt + 2))
1467 goto queuing_error;
1468
1469 status |= QDSS_GOT_Q_SPACE;
1470
1471 /* Build header part of command packet (excluding the OPCODE). */
1472 req->current_outstanding_cmd = handle;
1473 req->outstanding_cmds[handle] = sp;
1474 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1475 req->cnt -= req_cnt;
1476
1477 /* Fill-in common area */
1478 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1479 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1480
1481 clr_ptr = (uint32_t *)cmd_pkt + 2;
1482 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1483
1484 /* Set NPORT-ID and LUN number*/
1485 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1486 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1487 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1488 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1489
1490 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1491 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1492
1493 /* Total Data and protection segment(s) */
1494 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1495
1496 /* Build IOCB segments and adjust for data protection segments */
1497 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1498 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1499 QLA_SUCCESS)
1500 goto queuing_error;
1501
1502 cmd_pkt->entry_count = (uint8_t)req_cnt;
1503 /* Specify response queue number where completion should happen */
1504 cmd_pkt->entry_status = (uint8_t) rsp->id;
1505 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1506 wmb();
1507
1508 /* Adjust ring index. */
1509 req->ring_index++;
1510 if (req->ring_index == req->length) {
1511 req->ring_index = 0;
1512 req->ring_ptr = req->ring;
1513 } else
1514 req->ring_ptr++;
1515
1516 /* Set chip new ring index. */
1517 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1518 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1519
1520 /* Manage unprocessed RIO/ZIO commands in response queue. */
1521 if (vha->flags.process_response_queue &&
1522 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1523 qla24xx_process_response_queue(vha, rsp);
1524
1525 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1526
1527 return QLA_SUCCESS;
1528
1529queuing_error:
1530 if (status & QDSS_GOT_Q_SPACE) {
1531 req->outstanding_cmds[handle] = NULL;
1532 req->cnt += req_cnt;
1533 }
1534 /* Cleanup will be performed by the caller (queuecommand) */
1535
1536 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1537
1538 DEBUG18(qla_printk(KERN_INFO, ha,
1539 "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
1540 return QLA_FUNCTION_FAILED;
1541}
1542
1543
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001544static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001545{
1546 struct scsi_cmnd *cmd = sp->cmd;
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001547 struct qla_hw_data *ha = sp->fcport->vha->hw;
1548 int affinity = cmd->request->cpu;
1549
Anirban Chakraborty7163ea82009-08-05 09:18:40 -07001550 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001551 affinity < ha->max_rsp_queues - 1)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001552 *rsp = ha->rsp_q_map[affinity + 1];
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001553 else
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001554 *rsp = ha->rsp_q_map[0];
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001555}
Andrew Vasquezac280b62009-08-20 11:06:05 -07001556
1557/* Generic Control-SRB manipulation functions. */
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001558void *
1559qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001560{
Andrew Vasquezac280b62009-08-20 11:06:05 -07001561 struct qla_hw_data *ha = vha->hw;
1562 struct req_que *req = ha->req_q_map[0];
1563 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1564 uint32_t index, handle;
1565 request_t *pkt;
1566 uint16_t cnt, req_cnt;
1567
1568 pkt = NULL;
1569 req_cnt = 1;
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001570 handle = 0;
1571
1572 if (!sp)
1573 goto skip_cmd_array;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001574
1575 /* Check for room in outstanding command list. */
1576 handle = req->current_outstanding_cmd;
1577 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1578 handle++;
1579 if (handle == MAX_OUTSTANDING_COMMANDS)
1580 handle = 1;
1581 if (!req->outstanding_cmds[handle])
1582 break;
1583 }
1584 if (index == MAX_OUTSTANDING_COMMANDS)
1585 goto queuing_error;
1586
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001587 /* Prep command array. */
1588 req->current_outstanding_cmd = handle;
1589 req->outstanding_cmds[handle] = sp;
1590 sp->handle = handle;
1591
1592skip_cmd_array:
Andrew Vasquezac280b62009-08-20 11:06:05 -07001593 /* Check for room on request queue. */
1594 if (req->cnt < req_cnt) {
1595 if (ha->mqenable)
1596 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001597 else if (IS_QLA82XX(ha))
1598 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
Andrew Vasquezac280b62009-08-20 11:06:05 -07001599 else if (IS_FWI2_CAPABLE(ha))
1600 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1601 else
1602 cnt = qla2x00_debounce_register(
1603 ISP_REQ_Q_OUT(ha, &reg->isp));
1604
1605 if (req->ring_index < cnt)
1606 req->cnt = cnt - req->ring_index;
1607 else
1608 req->cnt = req->length -
1609 (req->ring_index - cnt);
1610 }
1611 if (req->cnt < req_cnt)
1612 goto queuing_error;
1613
1614 /* Prep packet */
Andrew Vasquezac280b62009-08-20 11:06:05 -07001615 req->cnt -= req_cnt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001616 pkt = req->ring_ptr;
1617 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1618 pkt->entry_count = req_cnt;
1619 pkt->handle = handle;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001620
1621queuing_error:
1622 return pkt;
1623}
1624
1625static void
1626qla2x00_start_iocbs(srb_t *sp)
1627{
1628 struct qla_hw_data *ha = sp->fcport->vha->hw;
1629 struct req_que *req = ha->req_q_map[0];
1630 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1631 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1632
Giridhar Malavalia9083012010-04-12 17:59:55 -07001633 if (IS_QLA82XX(ha)) {
1634 qla82xx_start_iocbs(sp);
Andrew Vasquezac280b62009-08-20 11:06:05 -07001635 } else {
Giridhar Malavalia9083012010-04-12 17:59:55 -07001636 /* Adjust ring index. */
1637 req->ring_index++;
1638 if (req->ring_index == req->length) {
1639 req->ring_index = 0;
1640 req->ring_ptr = req->ring;
1641 } else
1642 req->ring_ptr++;
1643
1644 /* Set chip new ring index. */
1645 if (ha->mqenable) {
1646 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
1647 RD_REG_DWORD(&ioreg->hccr);
1648 } else if (IS_QLA82XX(ha)) {
1649 qla82xx_start_iocbs(sp);
1650 } else if (IS_FWI2_CAPABLE(ha)) {
1651 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
1652 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
1653 } else {
1654 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
1655 req->ring_index);
1656 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
1657 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07001658 }
1659}
1660
1661static void
1662qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1663{
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001664 struct srb_ctx *ctx = sp->ctx;
1665 struct srb_iocb *lio = ctx->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001666
1667 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1668 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001669 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001670 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001671 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001672 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1673 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1674 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1675 logio->port_id[1] = sp->fcport->d_id.b.area;
1676 logio->port_id[2] = sp->fcport->d_id.b.domain;
1677 logio->vp_index = sp->fcport->vp_idx;
1678}
1679
1680static void
1681qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1682{
1683 struct qla_hw_data *ha = sp->fcport->vha->hw;
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001684 struct srb_ctx *ctx = sp->ctx;
1685 struct srb_iocb *lio = ctx->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001686 uint16_t opts;
1687
Giridhar Malavalib9637522010-05-28 15:08:15 -07001688 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001689 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1690 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001691 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1692 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001693 if (HAS_EXTENDED_IDS(ha)) {
1694 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1695 mbx->mb10 = cpu_to_le16(opts);
1696 } else {
1697 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1698 }
1699 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1700 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1701 sp->fcport->d_id.b.al_pa);
1702 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1703}
1704
1705static void
1706qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1707{
1708 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1709 logio->control_flags =
1710 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1711 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1712 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1713 logio->port_id[1] = sp->fcport->d_id.b.area;
1714 logio->port_id[2] = sp->fcport->d_id.b.domain;
1715 logio->vp_index = sp->fcport->vp_idx;
1716}
1717
1718static void
1719qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1720{
1721 struct qla_hw_data *ha = sp->fcport->vha->hw;
1722
Giridhar Malavalib9637522010-05-28 15:08:15 -07001723 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001724 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1725 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1726 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1727 cpu_to_le16(sp->fcport->loop_id):
1728 cpu_to_le16(sp->fcport->loop_id << 8);
1729 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1730 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1731 sp->fcport->d_id.b.al_pa);
1732 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1733 /* Implicit: mbx->mbx10 = 0. */
1734}
1735
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001736static void
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07001737qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1738{
1739 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1740 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1741 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1742 logio->vp_index = sp->fcport->vp_idx;
1743}
1744
1745static void
1746qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1747{
1748 struct qla_hw_data *ha = sp->fcport->vha->hw;
1749
1750 mbx->entry_type = MBX_IOCB_TYPE;
1751 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1752 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1753 if (HAS_EXTENDED_IDS(ha)) {
1754 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1755 mbx->mb10 = cpu_to_le16(BIT_0);
1756 } else {
1757 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1758 }
1759 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1760 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1761 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1762 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1763 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1764}
1765
1766static void
Madhuranath Iyengar38222632010-05-04 15:01:29 -07001767qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1768{
1769 uint32_t flags;
1770 unsigned int lun;
1771 struct fc_port *fcport = sp->fcport;
1772 scsi_qla_host_t *vha = fcport->vha;
1773 struct qla_hw_data *ha = vha->hw;
1774 struct srb_ctx *ctx = sp->ctx;
1775 struct srb_iocb *iocb = ctx->u.iocb_cmd;
1776 struct req_que *req = vha->req;
1777
1778 flags = iocb->u.tmf.flags;
1779 lun = iocb->u.tmf.lun;
1780
1781 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1782 tsk->entry_count = 1;
1783 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1784 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1785 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1786 tsk->control_flags = cpu_to_le32(flags);
1787 tsk->port_id[0] = fcport->d_id.b.al_pa;
1788 tsk->port_id[1] = fcport->d_id.b.area;
1789 tsk->port_id[2] = fcport->d_id.b.domain;
1790 tsk->vp_index = fcport->vp_idx;
1791
1792 if (flags == TCF_LUN_RESET) {
1793 int_to_scsilun(lun, &tsk->lun);
1794 host_to_fcp_swap((uint8_t *)&tsk->lun,
1795 sizeof(tsk->lun));
1796 }
1797}
1798
1799static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001800qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1801{
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001802 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001803
1804 els_iocb->entry_type = ELS_IOCB_TYPE;
1805 els_iocb->entry_count = 1;
1806 els_iocb->sys_define = 0;
1807 els_iocb->entry_status = 0;
1808 els_iocb->handle = sp->handle;
1809 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1810 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1811 els_iocb->vp_index = sp->fcport->vp_idx;
1812 els_iocb->sof_type = EST_SOFI3;
1813 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1814
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001815 els_iocb->opcode =
1816 (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
1817 bsg_job->request->rqst_data.r_els.els_code :
1818 bsg_job->request->rqst_data.h_els.command_code;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001819 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1820 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1821 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1822 els_iocb->control_flags = 0;
1823 els_iocb->rx_byte_count =
1824 cpu_to_le32(bsg_job->reply_payload.payload_len);
1825 els_iocb->tx_byte_count =
1826 cpu_to_le32(bsg_job->request_payload.payload_len);
1827
1828 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1829 (bsg_job->request_payload.sg_list)));
1830 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
1831 (bsg_job->request_payload.sg_list)));
1832 els_iocb->tx_len = cpu_to_le32(sg_dma_len
1833 (bsg_job->request_payload.sg_list));
1834
1835 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
1836 (bsg_job->reply_payload.sg_list)));
1837 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
1838 (bsg_job->reply_payload.sg_list)));
1839 els_iocb->rx_len = cpu_to_le32(sg_dma_len
1840 (bsg_job->reply_payload.sg_list));
1841}
1842
1843static void
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05001844qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
1845{
1846 uint16_t avail_dsds;
1847 uint32_t *cur_dsd;
1848 struct scatterlist *sg;
1849 int index;
1850 uint16_t tot_dsds;
1851 scsi_qla_host_t *vha = sp->fcport->vha;
1852 struct qla_hw_data *ha = vha->hw;
1853 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1854 int loop_iterartion = 0;
1855 int cont_iocb_prsnt = 0;
1856 int entry_count = 1;
1857
1858 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
1859 ct_iocb->entry_type = CT_IOCB_TYPE;
1860 ct_iocb->entry_status = 0;
1861 ct_iocb->handle1 = sp->handle;
1862 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
1863 ct_iocb->status = __constant_cpu_to_le16(0);
1864 ct_iocb->control_flags = __constant_cpu_to_le16(0);
1865 ct_iocb->timeout = 0;
1866 ct_iocb->cmd_dsd_count =
1867 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1868 ct_iocb->total_dsd_count =
1869 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
1870 ct_iocb->req_bytecount =
1871 cpu_to_le32(bsg_job->request_payload.payload_len);
1872 ct_iocb->rsp_bytecount =
1873 cpu_to_le32(bsg_job->reply_payload.payload_len);
1874
1875 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
1876 (bsg_job->request_payload.sg_list)));
1877 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
1878 (bsg_job->request_payload.sg_list)));
1879 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
1880
1881 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
1882 (bsg_job->reply_payload.sg_list)));
1883 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
1884 (bsg_job->reply_payload.sg_list)));
1885 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
1886
1887 avail_dsds = 1;
1888 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
1889 index = 0;
1890 tot_dsds = bsg_job->reply_payload.sg_cnt;
1891
1892 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1893 dma_addr_t sle_dma;
1894 cont_a64_entry_t *cont_pkt;
1895
1896 /* Allocate additional continuation packets? */
1897 if (avail_dsds == 0) {
1898 /*
1899 * Five DSDs are available in the Cont.
1900 * Type 1 IOCB.
1901 */
1902 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1903 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1904 avail_dsds = 5;
1905 cont_iocb_prsnt = 1;
1906 entry_count++;
1907 }
1908
1909 sle_dma = sg_dma_address(sg);
1910 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1911 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1912 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1913 loop_iterartion++;
1914 avail_dsds--;
1915 }
1916 ct_iocb->entry_count = entry_count;
1917}
1918
1919static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001920qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1921{
1922 uint16_t avail_dsds;
1923 uint32_t *cur_dsd;
1924 struct scatterlist *sg;
1925 int index;
1926 uint16_t tot_dsds;
1927 scsi_qla_host_t *vha = sp->fcport->vha;
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001928 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001929 int loop_iterartion = 0;
1930 int cont_iocb_prsnt = 0;
1931 int entry_count = 1;
1932
1933 ct_iocb->entry_type = CT_IOCB_TYPE;
1934 ct_iocb->entry_status = 0;
1935 ct_iocb->sys_define = 0;
1936 ct_iocb->handle = sp->handle;
1937
1938 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1939 ct_iocb->vp_index = sp->fcport->vp_idx;
1940 ct_iocb->comp_status = __constant_cpu_to_le16(0);
1941
1942 ct_iocb->cmd_dsd_count =
1943 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1944 ct_iocb->timeout = 0;
1945 ct_iocb->rsp_dsd_count =
1946 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1947 ct_iocb->rsp_byte_count =
1948 cpu_to_le32(bsg_job->reply_payload.payload_len);
1949 ct_iocb->cmd_byte_count =
1950 cpu_to_le32(bsg_job->request_payload.payload_len);
1951 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
1952 (bsg_job->request_payload.sg_list)));
1953 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
1954 (bsg_job->request_payload.sg_list)));
1955 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
1956 (bsg_job->request_payload.sg_list));
1957
1958 avail_dsds = 1;
1959 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
1960 index = 0;
1961 tot_dsds = bsg_job->reply_payload.sg_cnt;
1962
1963 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1964 dma_addr_t sle_dma;
1965 cont_a64_entry_t *cont_pkt;
1966
1967 /* Allocate additional continuation packets? */
1968 if (avail_dsds == 0) {
1969 /*
1970 * Five DSDs are available in the Cont.
1971 * Type 1 IOCB.
1972 */
1973 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1974 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1975 avail_dsds = 5;
1976 cont_iocb_prsnt = 1;
1977 entry_count++;
1978 }
1979
1980 sle_dma = sg_dma_address(sg);
1981 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1982 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1983 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1984 loop_iterartion++;
1985 avail_dsds--;
1986 }
1987 ct_iocb->entry_count = entry_count;
1988}
1989
Andrew Vasquezac280b62009-08-20 11:06:05 -07001990int
1991qla2x00_start_sp(srb_t *sp)
1992{
1993 int rval;
1994 struct qla_hw_data *ha = sp->fcport->vha->hw;
1995 void *pkt;
1996 struct srb_ctx *ctx = sp->ctx;
1997 unsigned long flags;
1998
1999 rval = QLA_FUNCTION_FAILED;
2000 spin_lock_irqsave(&ha->hardware_lock, flags);
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002001 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002002 if (!pkt)
2003 goto done;
2004
2005 rval = QLA_SUCCESS;
2006 switch (ctx->type) {
2007 case SRB_LOGIN_CMD:
2008 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002009 qla24xx_login_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002010 qla2x00_login_iocb(sp, pkt);
2011 break;
2012 case SRB_LOGOUT_CMD:
2013 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002014 qla24xx_logout_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002015 qla2x00_logout_iocb(sp, pkt);
2016 break;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002017 case SRB_ELS_CMD_RPT:
2018 case SRB_ELS_CMD_HST:
2019 qla24xx_els_iocb(sp, pkt);
2020 break;
2021 case SRB_CT_CMD:
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002022 IS_FWI2_CAPABLE(ha) ?
2023 qla24xx_ct_iocb(sp, pkt) :
2024 qla2x00_ct_iocb(sp, pkt);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002025 break;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002026 case SRB_ADISC_CMD:
2027 IS_FWI2_CAPABLE(ha) ?
2028 qla24xx_adisc_iocb(sp, pkt) :
2029 qla2x00_adisc_iocb(sp, pkt);
2030 break;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002031 case SRB_TM_CMD:
2032 qla24xx_tm_iocb(sp, pkt);
2033 break;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002034 default:
2035 break;
2036 }
2037
2038 wmb();
2039 qla2x00_start_iocbs(sp);
2040done:
2041 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2042 return rval;
2043}