blob: 55a96761b5a4bc8210b781c8060d5b4803cee0a4 [file] [log] [blame]
Andrew Vasquezfa90c542005-10-27 11:10:08 -07001/*
2 * QLogic Fibre Channel HBA Driver
Andrew Vasquez07e264b2011-03-30 11:46:23 -07003 * Copyright (c) 2003-2011 QLogic Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Andrew Vasquezfa90c542005-10-27 11:10:08 -07005 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "qla_def.h"
8
9#include <linux/blkdev.h>
10#include <linux/delay.h>
11
12#include <scsi/scsi_tcq.h>
13
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -070014static void qla25xx_set_que(srb_t *, struct rsp_que **);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015/**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17 * @cmd: SCSI command
18 *
19 * Returns the proper CF_* direction based on CDB.
20 */
21static inline uint16_t
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070022qla2x00_get_cmd_direction(srb_t *sp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070023{
24 uint16_t cflags;
25
26 cflags = 0;
27
28 /* Set transfer direction */
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070029 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 cflags = CF_WRITE;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080031 sp->fcport->vha->hw->qla_stats.output_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070032 scsi_bufflen(sp->cmd);
33 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 cflags = CF_READ;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080035 sp->fcport->vha->hw->qla_stats.input_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070036 scsi_bufflen(sp->cmd);
37 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 return (cflags);
39}
40
41/**
42 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
43 * Continuation Type 0 IOCBs to allocate.
44 *
45 * @dsds: number of data segment decriptors needed
46 *
47 * Returns the number of IOCB entries needed to store @dsds.
48 */
49uint16_t
50qla2x00_calc_iocbs_32(uint16_t dsds)
51{
52 uint16_t iocbs;
53
54 iocbs = 1;
55 if (dsds > 3) {
56 iocbs += (dsds - 3) / 7;
57 if ((dsds - 3) % 7)
58 iocbs++;
59 }
60 return (iocbs);
61}
62
63/**
64 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
65 * Continuation Type 1 IOCBs to allocate.
66 *
67 * @dsds: number of data segment decriptors needed
68 *
69 * Returns the number of IOCB entries needed to store @dsds.
70 */
71uint16_t
72qla2x00_calc_iocbs_64(uint16_t dsds)
73{
74 uint16_t iocbs;
75
76 iocbs = 1;
77 if (dsds > 2) {
78 iocbs += (dsds - 2) / 5;
79 if ((dsds - 2) % 5)
80 iocbs++;
81 }
82 return (iocbs);
83}
84
85/**
86 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
87 * @ha: HA context
88 *
89 * Returns a pointer to the Continuation Type 0 IOCB packet.
90 */
91static inline cont_entry_t *
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070092qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
94 cont_entry_t *cont_pkt;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070095 struct req_que *req = vha->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080097 req->ring_index++;
98 if (req->ring_index == req->length) {
99 req->ring_index = 0;
100 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800102 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 }
104
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800105 cont_pkt = (cont_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 /* Load packet defaults. */
108 *((uint32_t *)(&cont_pkt->entry_type)) =
109 __constant_cpu_to_le32(CONTINUE_TYPE);
110
111 return (cont_pkt);
112}
113
114/**
115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
116 * @ha: HA context
117 *
118 * Returns a pointer to the continuation type 1 IOCB packet.
119 */
120static inline cont_a64_entry_t *
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800121qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
123 cont_a64_entry_t *cont_pkt;
124
125 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800126 req->ring_index++;
127 if (req->ring_index == req->length) {
128 req->ring_index = 0;
129 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800131 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 }
133
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800134 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
136 /* Load packet defaults. */
137 *((uint32_t *)(&cont_pkt->entry_type)) =
138 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
139
140 return (cont_pkt);
141}
142
Arun Easibad75002010-05-04 15:01:30 -0700143static inline int
144qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
145{
146 uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
147
148 /* We only support T10 DIF right now */
149 if (guard != SHOST_DIX_GUARD_CRC) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700150 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
151 "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
Arun Easibad75002010-05-04 15:01:30 -0700152 return 0;
153 }
154
155 /* We always use DIFF Bundling for best performance */
156 *fw_prot_opts = 0;
157
158 /* Translate SCSI opcode to a protection opcode */
159 switch (scsi_get_prot_op(sp->cmd)) {
160 case SCSI_PROT_READ_STRIP:
161 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
162 break;
163 case SCSI_PROT_WRITE_INSERT:
164 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165 break;
166 case SCSI_PROT_READ_INSERT:
167 *fw_prot_opts |= PO_MODE_DIF_INSERT;
168 break;
169 case SCSI_PROT_WRITE_STRIP:
170 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
171 break;
172 case SCSI_PROT_READ_PASS:
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
174 break;
175 case SCSI_PROT_WRITE_PASS:
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 break;
178 default: /* Normal Request */
179 *fw_prot_opts |= PO_MODE_DIF_PASS;
180 break;
181 }
182
183 return scsi_prot_sg_count(sp->cmd);
184}
185
186/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
188 * capable IOCB types.
189 *
190 * @sp: SRB command to process
191 * @cmd_pkt: Command type 2 IOCB
192 * @tot_dsds: Total number of segments to transfer
193 */
194void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
195 uint16_t tot_dsds)
196{
197 uint16_t avail_dsds;
198 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800199 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900201 struct scatterlist *sg;
202 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 cmd = sp->cmd;
205
206 /* Update entry type to indicate Command Type 2 IOCB */
207 *((uint32_t *)(&cmd_pkt->entry_type)) =
208 __constant_cpu_to_le32(COMMAND_TYPE);
209
210 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900211 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
213 return;
214 }
215
Andrew Vasquez444786d2009-01-05 11:18:10 -0800216 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700217 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219 /* Three DSDs are available in the Command Type 2 IOCB */
220 avail_dsds = 3;
221 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
222
223 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900224 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
225 cont_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900227 /* Allocate additional continuation packets? */
228 if (avail_dsds == 0) {
229 /*
230 * Seven DSDs are available in the Continuation
231 * Type 0 IOCB.
232 */
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700233 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900234 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
235 avail_dsds = 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900237
238 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
239 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
240 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 }
242}
243
244/**
245 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
246 * capable IOCB types.
247 *
248 * @sp: SRB command to process
249 * @cmd_pkt: Command type 3 IOCB
250 * @tot_dsds: Total number of segments to transfer
251 */
252void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
253 uint16_t tot_dsds)
254{
255 uint16_t avail_dsds;
256 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800257 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900259 struct scatterlist *sg;
260 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
262 cmd = sp->cmd;
263
264 /* Update entry type to indicate Command Type 3 IOCB */
265 *((uint32_t *)(&cmd_pkt->entry_type)) =
266 __constant_cpu_to_le32(COMMAND_A64_TYPE);
267
268 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900269 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
271 return;
272 }
273
Andrew Vasquez444786d2009-01-05 11:18:10 -0800274 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700275 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
277 /* Two DSDs are available in the Command Type 3 IOCB */
278 avail_dsds = 2;
279 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
280
281 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900282 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
283 dma_addr_t sle_dma;
284 cont_a64_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900286 /* Allocate additional continuation packets? */
287 if (avail_dsds == 0) {
288 /*
289 * Five DSDs are available in the Continuation
290 * Type 1 IOCB.
291 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800292 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900293 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
294 avail_dsds = 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900296
297 sle_dma = sg_dma_address(sg);
298 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
299 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
300 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
301 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 }
303}
304
305/**
306 * qla2x00_start_scsi() - Send a SCSI command to the ISP
307 * @sp: command to send to the ISP
308 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700309 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 */
311int
312qla2x00_start_scsi(srb_t *sp)
313{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900314 int ret, nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 unsigned long flags;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800316 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 struct scsi_cmnd *cmd;
318 uint32_t *clr_ptr;
319 uint32_t index;
320 uint32_t handle;
321 cmd_entry_t *cmd_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 uint16_t cnt;
323 uint16_t req_cnt;
324 uint16_t tot_dsds;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700325 struct device_reg_2xxx __iomem *reg;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800326 struct qla_hw_data *ha;
327 struct req_que *req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800328 struct rsp_que *rsp;
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800329 char tag[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331 /* Setup device pointers. */
332 ret = 0;
Andrew Vasquez444786d2009-01-05 11:18:10 -0800333 vha = sp->fcport->vha;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800334 ha = vha->hw;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700335 reg = &ha->iobase->isp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 cmd = sp->cmd;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800337 req = ha->req_q_map[0];
338 rsp = ha->rsp_q_map[0];
83021922005-04-17 15:10:41 -0500339 /* So we know we haven't pci_map'ed anything yet */
340 tot_dsds = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
342 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800343 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700344 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
345 QLA_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return (QLA_FUNCTION_FAILED);
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700347 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800348 vha->marker_needed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 }
350
351 /* Acquire ring specific lock */
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700352 spin_lock_irqsave(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800355 handle = req->current_outstanding_cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
357 handle++;
358 if (handle == MAX_OUTSTANDING_COMMANDS)
359 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800360 if (!req->outstanding_cmds[handle])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 break;
362 }
363 if (index == MAX_OUTSTANDING_COMMANDS)
364 goto queuing_error;
365
83021922005-04-17 15:10:41 -0500366 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -0700367 if (scsi_sg_count(cmd)) {
368 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
369 scsi_sg_count(cmd), cmd->sc_data_direction);
370 if (unlikely(!nseg))
371 goto queuing_error;
372 } else
373 nseg = 0;
374
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900375 tot_dsds = nseg;
83021922005-04-17 15:10:41 -0500376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 /* Calculate the number of request entries needed. */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700378 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800379 if (req->cnt < (req_cnt + 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800381 if (req->ring_index < cnt)
382 req->cnt = cnt - req->ring_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800384 req->cnt = req->length -
385 (req->ring_index - cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800387 if (req->cnt < (req_cnt + 2))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 goto queuing_error;
389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 /* Build command packet */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800391 req->current_outstanding_cmd = handle;
392 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -0700393 sp->handle = handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800395 req->cnt -= req_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800397 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 cmd_pkt->handle = handle;
399 /* Zero out remaining portion of packet. */
400 clr_ptr = (uint32_t *)cmd_pkt + 2;
401 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
bdf79622005-04-17 15:06:53 -0500404 /* Set target ID and LUN number*/
405 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 /* Update tagged queuing modifier */
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800409 if (scsi_populate_tag_msg(cmd, tag)) {
410 switch (tag[0]) {
411 case HEAD_OF_QUEUE_TAG:
412 cmd_pkt->control_flags =
413 __constant_cpu_to_le16(CF_HEAD_TAG);
414 break;
415 case ORDERED_QUEUE_TAG:
416 cmd_pkt->control_flags =
417 __constant_cpu_to_le16(CF_ORDERED_TAG);
418 break;
419 default:
420 cmd_pkt->control_flags =
421 __constant_cpu_to_le16(CF_SIMPLE_TAG);
422 break;
423 }
424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 /* Load SCSI command packet. */
427 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900428 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
430 /* Build IOCB segments */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700431 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
433 /* Set total data segment count. */
434 cmd_pkt->entry_count = (uint8_t)req_cnt;
435 wmb();
436
437 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800438 req->ring_index++;
439 if (req->ring_index == req->length) {
440 req->ring_index = 0;
441 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800443 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 sp->flags |= SRB_DMA_VALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447 /* Set chip new ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800448 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
450
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700451 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800452 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800453 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
454 qla2x00_process_response_queue(rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700455
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700456 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 return (QLA_SUCCESS);
458
459queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900460 if (tot_dsds)
461 scsi_dma_unmap(cmd);
462
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700463 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 return (QLA_FUNCTION_FAILED);
466}
467
468/**
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800469 * qla2x00_start_iocbs() - Execute the IOCB command
470 */
471static void
472qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
473{
474 struct qla_hw_data *ha = vha->hw;
475 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
476 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
477
478 if (IS_QLA82XX(ha)) {
479 qla82xx_start_iocbs(vha);
480 } else {
481 /* Adjust ring index. */
482 req->ring_index++;
483 if (req->ring_index == req->length) {
484 req->ring_index = 0;
485 req->ring_ptr = req->ring;
486 } else
487 req->ring_ptr++;
488
489 /* Set chip new ring index. */
490 if (ha->mqenable) {
491 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
492 RD_REG_DWORD(&ioreg->hccr);
493 } else if (IS_FWI2_CAPABLE(ha)) {
494 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
495 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
496 } else {
497 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
498 req->ring_index);
499 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
500 }
501 }
502}
503
504/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 * qla2x00_marker() - Send a marker IOCB to the firmware.
506 * @ha: HA context
507 * @loop_id: loop ID
508 * @lun: LUN
509 * @type: marker modifier
510 *
511 * Can be called from both normal and interrupt context.
512 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700513 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 */
Andrew Vasquez3dbe7562010-07-23 15:28:37 +0500515static int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800516__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
517 struct rsp_que *rsp, uint16_t loop_id,
518 uint16_t lun, uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519{
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700520 mrk_entry_t *mrk;
521 struct mrk_entry_24xx *mrk24;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800522 struct qla_hw_data *ha = vha->hw;
523 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700525 mrk24 = NULL;
Giridhar Malavali99b82122011-11-18 09:03:17 -0800526 req = ha->req_q_map[0];
Giridhar Malavalid94d10e2010-07-23 15:28:23 +0500527 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700528 if (mrk == NULL) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700529 ql_log(ql_log_warn, base_vha, 0x3026,
530 "Failed to allocate Marker IOCB.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
532 return (QLA_FUNCTION_FAILED);
533 }
534
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700535 mrk->entry_type = MARKER_TYPE;
536 mrk->modifier = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 if (type != MK_SYNC_ALL) {
Andrew Vasqueze4289242007-07-19 15:05:56 -0700538 if (IS_FWI2_CAPABLE(ha)) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700539 mrk24 = (struct mrk_entry_24xx *) mrk;
540 mrk24->nport_handle = cpu_to_le16(loop_id);
541 mrk24->lun[1] = LSB(lun);
542 mrk24->lun[2] = MSB(lun);
Shyam Sundarb797b6d2006-08-01 13:48:13 -0700543 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800544 mrk24->vp_index = vha->vp_idx;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -0700545 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700546 } else {
547 SET_TARGET_ID(ha, mrk->target, loop_id);
548 mrk->lun = cpu_to_le16(lun);
549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 }
551 wmb();
552
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800553 qla2x00_start_iocbs(vha, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555 return (QLA_SUCCESS);
556}
557
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -0700558int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800559qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
560 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
561 uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562{
563 int ret;
564 unsigned long flags = 0;
565
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800566 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
567 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
568 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
570 return (ret);
571}
572
573/**
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700574 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
575 * Continuation Type 1 IOCBs to allocate.
576 *
577 * @dsds: number of data segment decriptors needed
578 *
579 * Returns the number of IOCB entries needed to store @dsds.
580 */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700581inline uint16_t
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700582qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700583{
584 uint16_t iocbs;
585
586 iocbs = 1;
587 if (dsds > 1) {
588 iocbs += (dsds - 1) / 5;
589 if ((dsds - 1) % 5)
590 iocbs++;
591 }
592 return iocbs;
593}
594
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800595static inline int
596qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
597 uint16_t tot_dsds)
598{
599 uint32_t *cur_dsd = NULL;
600 scsi_qla_host_t *vha;
601 struct qla_hw_data *ha;
602 struct scsi_cmnd *cmd;
603 struct scatterlist *cur_seg;
604 uint32_t *dsd_seg;
605 void *next_dsd;
606 uint8_t avail_dsds;
607 uint8_t first_iocb = 1;
608 uint32_t dsd_list_len;
609 struct dsd_dma *dsd_ptr;
610 struct ct6_dsd *ctx;
611
612 cmd = sp->cmd;
613
614 /* Update entry type to indicate Command Type 3 IOCB */
615 *((uint32_t *)(&cmd_pkt->entry_type)) =
616 __constant_cpu_to_le32(COMMAND_TYPE_6);
617
618 /* No data transfer */
619 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
620 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
621 return 0;
622 }
623
624 vha = sp->fcport->vha;
625 ha = vha->hw;
626
627 /* Set transfer direction */
628 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
629 cmd_pkt->control_flags =
630 __constant_cpu_to_le16(CF_WRITE_DATA);
631 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
632 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
633 cmd_pkt->control_flags =
634 __constant_cpu_to_le16(CF_READ_DATA);
635 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
636 }
637
638 cur_seg = scsi_sglist(cmd);
639 ctx = sp->ctx;
640
641 while (tot_dsds) {
642 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
643 QLA_DSDS_PER_IOCB : tot_dsds;
644 tot_dsds -= avail_dsds;
645 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
646
647 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
648 struct dsd_dma, list);
649 next_dsd = dsd_ptr->dsd_addr;
650 list_del(&dsd_ptr->list);
651 ha->gbl_dsd_avail--;
652 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
653 ctx->dsd_use_cnt++;
654 ha->gbl_dsd_inuse++;
655
656 if (first_iocb) {
657 first_iocb = 0;
658 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
659 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
660 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
661 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
662 } else {
663 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
664 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
665 *cur_dsd++ = cpu_to_le32(dsd_list_len);
666 }
667 cur_dsd = (uint32_t *)next_dsd;
668 while (avail_dsds) {
669 dma_addr_t sle_dma;
670
671 sle_dma = sg_dma_address(cur_seg);
672 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
673 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
674 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
675 cur_seg = sg_next(cur_seg);
676 avail_dsds--;
677 }
678 }
679
680 /* Null termination */
681 *cur_dsd++ = 0;
682 *cur_dsd++ = 0;
683 *cur_dsd++ = 0;
684 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
685 return 0;
686}
687
688/*
689 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
690 * for Command Type 6.
691 *
692 * @dsds: number of data segment decriptors needed
693 *
694 * Returns the number of dsd list needed to store @dsds.
695 */
696inline uint16_t
697qla24xx_calc_dsd_lists(uint16_t dsds)
698{
699 uint16_t dsd_lists = 0;
700
701 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
702 if (dsds % QLA_DSDS_PER_IOCB)
703 dsd_lists++;
704 return dsd_lists;
705}
706
707
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700708/**
709 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
710 * IOCB types.
711 *
712 * @sp: SRB command to process
713 * @cmd_pkt: Command type 3 IOCB
714 * @tot_dsds: Total number of segments to transfer
715 */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700716inline void
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700717qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
718 uint16_t tot_dsds)
719{
720 uint16_t avail_dsds;
721 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800722 scsi_qla_host_t *vha;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700723 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900724 struct scatterlist *sg;
725 int i;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800726 struct req_que *req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700727
728 cmd = sp->cmd;
729
730 /* Update entry type to indicate Command Type 3 IOCB */
731 *((uint32_t *)(&cmd_pkt->entry_type)) =
732 __constant_cpu_to_le32(COMMAND_TYPE_7);
733
734 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900735 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700736 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
737 return;
738 }
739
Andrew Vasquez444786d2009-01-05 11:18:10 -0800740 vha = sp->fcport->vha;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700741 req = vha->req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700742
743 /* Set transfer direction */
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700744 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700745 cmd_pkt->task_mgmt_flags =
746 __constant_cpu_to_le16(TMF_WRITE_DATA);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800747 sp->fcport->vha->hw->qla_stats.output_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700748 scsi_bufflen(sp->cmd);
749 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700750 cmd_pkt->task_mgmt_flags =
751 __constant_cpu_to_le16(TMF_READ_DATA);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800752 sp->fcport->vha->hw->qla_stats.input_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700753 scsi_bufflen(sp->cmd);
754 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700755
756 /* One DSD is available in the Command Type 3 IOCB */
757 avail_dsds = 1;
758 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
759
760 /* Load data segments */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700761
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900762 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
763 dma_addr_t sle_dma;
764 cont_a64_entry_t *cont_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700765
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900766 /* Allocate additional continuation packets? */
767 if (avail_dsds == 0) {
768 /*
769 * Five DSDs are available in the Continuation
770 * Type 1 IOCB.
771 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800772 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900773 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
774 avail_dsds = 5;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700775 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900776
777 sle_dma = sg_dma_address(sg);
778 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
779 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
780 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
781 avail_dsds--;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700782 }
783}
784
Arun Easibad75002010-05-04 15:01:30 -0700785struct fw_dif_context {
786 uint32_t ref_tag;
787 uint16_t app_tag;
788 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
789 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
790};
791
792/*
793 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
794 *
795 */
796static inline void
Arun Easie02587d2011-08-16 11:29:23 -0700797qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
Arun Easibad75002010-05-04 15:01:30 -0700798 unsigned int protcnt)
799{
Arun Easie02587d2011-08-16 11:29:23 -0700800 struct scsi_cmnd *cmd = sp->cmd;
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700801 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -0700802
803 switch (scsi_get_prot_type(cmd)) {
Arun Easibad75002010-05-04 15:01:30 -0700804 case SCSI_PROT_DIF_TYPE0:
Arun Easi8cb20492011-08-16 11:29:22 -0700805 /*
806 * No check for ql2xenablehba_err_chk, as it would be an
807 * I/O error if hba tag generation is not done.
808 */
809 pkt->ref_tag = cpu_to_le32((uint32_t)
810 (0xffffffff & scsi_get_lba(cmd)));
Arun Easie02587d2011-08-16 11:29:23 -0700811
812 if (!qla2x00_hba_err_chk_enabled(sp))
813 break;
814
Arun Easi8cb20492011-08-16 11:29:22 -0700815 pkt->ref_tag_mask[0] = 0xff;
816 pkt->ref_tag_mask[1] = 0xff;
817 pkt->ref_tag_mask[2] = 0xff;
818 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700819 break;
820
821 /*
822 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
823 * match LBA in CDB + N
824 */
825 case SCSI_PROT_DIF_TYPE2:
Arun Easie02587d2011-08-16 11:29:23 -0700826 pkt->app_tag = __constant_cpu_to_le16(0);
827 pkt->app_tag_mask[0] = 0x0;
828 pkt->app_tag_mask[1] = 0x0;
Arun Easi0c470872010-07-23 15:28:38 +0500829
830 pkt->ref_tag = cpu_to_le32((uint32_t)
831 (0xffffffff & scsi_get_lba(cmd)));
832
Arun Easie02587d2011-08-16 11:29:23 -0700833 if (!qla2x00_hba_err_chk_enabled(sp))
834 break;
835
Arun Easi0c470872010-07-23 15:28:38 +0500836 /* enable ALL bytes of the ref tag */
837 pkt->ref_tag_mask[0] = 0xff;
838 pkt->ref_tag_mask[1] = 0xff;
839 pkt->ref_tag_mask[2] = 0xff;
840 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700841 break;
842
843 /* For Type 3 protection: 16 bit GUARD only */
844 case SCSI_PROT_DIF_TYPE3:
845 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
846 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
847 0x00;
848 break;
849
850 /*
851 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
852 * 16 bit app tag.
853 */
854 case SCSI_PROT_DIF_TYPE1:
Arun Easie02587d2011-08-16 11:29:23 -0700855 pkt->ref_tag = cpu_to_le32((uint32_t)
856 (0xffffffff & scsi_get_lba(cmd)));
857 pkt->app_tag = __constant_cpu_to_le16(0);
858 pkt->app_tag_mask[0] = 0x0;
859 pkt->app_tag_mask[1] = 0x0;
860
861 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -0700862 break;
863
Arun Easibad75002010-05-04 15:01:30 -0700864 /* enable ALL bytes of the ref tag */
865 pkt->ref_tag_mask[0] = 0xff;
866 pkt->ref_tag_mask[1] = 0xff;
867 pkt->ref_tag_mask[2] = 0xff;
868 pkt->ref_tag_mask[3] = 0xff;
869 break;
870 }
871
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700872 ql_dbg(ql_dbg_io, vha, 0x3009,
873 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
874 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
875 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
876 scsi_get_prot_type(cmd), cmd);
Arun Easibad75002010-05-04 15:01:30 -0700877}
878
Arun Easi8cb20492011-08-16 11:29:22 -0700879struct qla2_sgx {
880 dma_addr_t dma_addr; /* OUT */
881 uint32_t dma_len; /* OUT */
Arun Easibad75002010-05-04 15:01:30 -0700882
Arun Easi8cb20492011-08-16 11:29:22 -0700883 uint32_t tot_bytes; /* IN */
884 struct scatterlist *cur_sg; /* IN */
885
886 /* for book keeping, bzero on initial invocation */
887 uint32_t bytes_consumed;
888 uint32_t num_bytes;
889 uint32_t tot_partial;
890
891 /* for debugging */
892 uint32_t num_sg;
893 srb_t *sp;
894};
895
896static int
897qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
898 uint32_t *partial)
899{
900 struct scatterlist *sg;
901 uint32_t cumulative_partial, sg_len;
902 dma_addr_t sg_dma_addr;
903
904 if (sgx->num_bytes == sgx->tot_bytes)
905 return 0;
906
907 sg = sgx->cur_sg;
908 cumulative_partial = sgx->tot_partial;
909
910 sg_dma_addr = sg_dma_address(sg);
911 sg_len = sg_dma_len(sg);
912
913 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
914
915 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
916 sgx->dma_len = (blk_sz - cumulative_partial);
917 sgx->tot_partial = 0;
918 sgx->num_bytes += blk_sz;
919 *partial = 0;
920 } else {
921 sgx->dma_len = sg_len - sgx->bytes_consumed;
922 sgx->tot_partial += sgx->dma_len;
923 *partial = 1;
924 }
925
926 sgx->bytes_consumed += sgx->dma_len;
927
928 if (sg_len == sgx->bytes_consumed) {
929 sg = sg_next(sg);
930 sgx->num_sg++;
931 sgx->cur_sg = sg;
932 sgx->bytes_consumed = 0;
933 }
934
935 return 1;
936}
937
938static int
939qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
940 uint32_t *dsd, uint16_t tot_dsds)
941{
942 void *next_dsd;
943 uint8_t avail_dsds = 0;
944 uint32_t dsd_list_len;
945 struct dsd_dma *dsd_ptr;
946 struct scatterlist *sg_prot;
947 uint32_t *cur_dsd = dsd;
948 uint16_t used_dsds = tot_dsds;
949
950 uint32_t prot_int;
951 uint32_t partial;
952 struct qla2_sgx sgx;
953 dma_addr_t sle_dma;
954 uint32_t sle_dma_len, tot_prot_dma_len = 0;
955 struct scsi_cmnd *cmd = sp->cmd;
956
957 prot_int = cmd->device->sector_size;
958
959 memset(&sgx, 0, sizeof(struct qla2_sgx));
960 sgx.tot_bytes = scsi_bufflen(sp->cmd);
961 sgx.cur_sg = scsi_sglist(sp->cmd);
962 sgx.sp = sp;
963
964 sg_prot = scsi_prot_sglist(sp->cmd);
965
966 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
967
968 sle_dma = sgx.dma_addr;
969 sle_dma_len = sgx.dma_len;
970alloc_and_fill:
971 /* Allocate additional continuation packets? */
972 if (avail_dsds == 0) {
973 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
974 QLA_DSDS_PER_IOCB : used_dsds;
975 dsd_list_len = (avail_dsds + 1) * 12;
976 used_dsds -= avail_dsds;
977
978 /* allocate tracking DS */
979 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
980 if (!dsd_ptr)
981 return 1;
982
983 /* allocate new list */
984 dsd_ptr->dsd_addr = next_dsd =
985 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
986 &dsd_ptr->dsd_list_dma);
987
988 if (!next_dsd) {
989 /*
990 * Need to cleanup only this dsd_ptr, rest
991 * will be done by sp_free_dma()
992 */
993 kfree(dsd_ptr);
994 return 1;
995 }
996
997 list_add_tail(&dsd_ptr->list,
998 &((struct crc_context *)sp->ctx)->dsd_list);
999
1000 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1001
1002 /* add new list to cmd iocb or last list */
1003 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1004 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1005 *cur_dsd++ = dsd_list_len;
1006 cur_dsd = (uint32_t *)next_dsd;
1007 }
1008 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1009 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1010 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1011 avail_dsds--;
1012
1013 if (partial == 0) {
1014 /* Got a full protection interval */
1015 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1016 sle_dma_len = 8;
1017
1018 tot_prot_dma_len += sle_dma_len;
1019 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1020 tot_prot_dma_len = 0;
1021 sg_prot = sg_next(sg_prot);
1022 }
1023
1024 partial = 1; /* So as to not re-enter this block */
1025 goto alloc_and_fill;
1026 }
1027 }
1028 /* Null termination */
1029 *cur_dsd++ = 0;
1030 *cur_dsd++ = 0;
1031 *cur_dsd++ = 0;
1032 return 0;
1033}
Giridhar Malavali5162cf02011-11-18 09:03:18 -08001034
Arun Easibad75002010-05-04 15:01:30 -07001035static int
1036qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1037 uint16_t tot_dsds)
1038{
1039 void *next_dsd;
1040 uint8_t avail_dsds = 0;
1041 uint32_t dsd_list_len;
1042 struct dsd_dma *dsd_ptr;
1043 struct scatterlist *sg;
1044 uint32_t *cur_dsd = dsd;
1045 int i;
1046 uint16_t used_dsds = tot_dsds;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001047 scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -07001048
1049 uint8_t *cp;
1050
1051 scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
1052 dma_addr_t sle_dma;
1053
1054 /* Allocate additional continuation packets? */
1055 if (avail_dsds == 0) {
1056 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1057 QLA_DSDS_PER_IOCB : used_dsds;
1058 dsd_list_len = (avail_dsds + 1) * 12;
1059 used_dsds -= avail_dsds;
1060
1061 /* allocate tracking DS */
1062 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1063 if (!dsd_ptr)
1064 return 1;
1065
1066 /* allocate new list */
1067 dsd_ptr->dsd_addr = next_dsd =
1068 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1069 &dsd_ptr->dsd_list_dma);
1070
1071 if (!next_dsd) {
1072 /*
1073 * Need to cleanup only this dsd_ptr, rest
1074 * will be done by sp_free_dma()
1075 */
1076 kfree(dsd_ptr);
1077 return 1;
1078 }
1079
1080 list_add_tail(&dsd_ptr->list,
1081 &((struct crc_context *)sp->ctx)->dsd_list);
1082
1083 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1084
1085 /* add new list to cmd iocb or last list */
1086 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1087 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1088 *cur_dsd++ = dsd_list_len;
1089 cur_dsd = (uint32_t *)next_dsd;
1090 }
1091 sle_dma = sg_dma_address(sg);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001092 ql_dbg(ql_dbg_io, vha, 0x300a,
1093 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
Joe Perchesd8424f62011-11-18 09:03:06 -08001094 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001095 sp->cmd);
Arun Easibad75002010-05-04 15:01:30 -07001096 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1097 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1098 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1099 avail_dsds--;
1100
1101 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1102 cp = page_address(sg_page(sg)) + sg->offset;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001103 ql_dbg(ql_dbg_io, vha, 0x300b,
1104 "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
Arun Easibad75002010-05-04 15:01:30 -07001105 }
1106 }
1107 /* Null termination */
1108 *cur_dsd++ = 0;
1109 *cur_dsd++ = 0;
1110 *cur_dsd++ = 0;
1111 return 0;
1112}
1113
1114static int
1115qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1116 uint32_t *dsd,
1117 uint16_t tot_dsds)
1118{
1119 void *next_dsd;
1120 uint8_t avail_dsds = 0;
1121 uint32_t dsd_list_len;
1122 struct dsd_dma *dsd_ptr;
1123 struct scatterlist *sg;
1124 int i;
1125 struct scsi_cmnd *cmd;
1126 uint32_t *cur_dsd = dsd;
1127 uint16_t used_dsds = tot_dsds;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001128 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
Arun Easibad75002010-05-04 15:01:30 -07001129 uint8_t *cp;
1130
1131
1132 cmd = sp->cmd;
1133 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1134 dma_addr_t sle_dma;
1135
1136 /* Allocate additional continuation packets? */
1137 if (avail_dsds == 0) {
1138 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1139 QLA_DSDS_PER_IOCB : used_dsds;
1140 dsd_list_len = (avail_dsds + 1) * 12;
1141 used_dsds -= avail_dsds;
1142
1143 /* allocate tracking DS */
1144 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1145 if (!dsd_ptr)
1146 return 1;
1147
1148 /* allocate new list */
1149 dsd_ptr->dsd_addr = next_dsd =
1150 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1151 &dsd_ptr->dsd_list_dma);
1152
1153 if (!next_dsd) {
1154 /*
1155 * Need to cleanup only this dsd_ptr, rest
1156 * will be done by sp_free_dma()
1157 */
1158 kfree(dsd_ptr);
1159 return 1;
1160 }
1161
1162 list_add_tail(&dsd_ptr->list,
1163 &((struct crc_context *)sp->ctx)->dsd_list);
1164
1165 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1166
1167 /* add new list to cmd iocb or last list */
1168 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1169 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1170 *cur_dsd++ = dsd_list_len;
1171 cur_dsd = (uint32_t *)next_dsd;
1172 }
1173 sle_dma = sg_dma_address(sg);
1174 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001175 ql_dbg(ql_dbg_io, vha, 0x3027,
1176 "%s(): %p, sg_entry %d - "
1177 "addr=0x%x0x%x, len=%d.\n",
1178 __func__, cur_dsd, i,
1179 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
Arun Easibad75002010-05-04 15:01:30 -07001180 }
1181 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1182 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1183 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1184
1185 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1186 cp = page_address(sg_page(sg)) + sg->offset;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001187 ql_dbg(ql_dbg_io, vha, 0x3028,
1188 "%s(): Protection Data buffer = %p.\n", __func__,
1189 cp);
Arun Easibad75002010-05-04 15:01:30 -07001190 }
1191 avail_dsds--;
1192 }
1193 /* Null termination */
1194 *cur_dsd++ = 0;
1195 *cur_dsd++ = 0;
1196 *cur_dsd++ = 0;
1197 return 0;
1198}
1199
1200/**
1201 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1202 * Type 6 IOCB types.
1203 *
1204 * @sp: SRB command to process
1205 * @cmd_pkt: Command type 3 IOCB
1206 * @tot_dsds: Total number of segments to transfer
1207 */
1208static inline int
1209qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1210 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1211{
1212 uint32_t *cur_dsd, *fcp_dl;
1213 scsi_qla_host_t *vha;
1214 struct scsi_cmnd *cmd;
1215 struct scatterlist *cur_seg;
1216 int sgc;
Arun Easi8cb20492011-08-16 11:29:22 -07001217 uint32_t total_bytes = 0;
Arun Easibad75002010-05-04 15:01:30 -07001218 uint32_t data_bytes;
1219 uint32_t dif_bytes;
1220 uint8_t bundling = 1;
1221 uint16_t blk_size;
1222 uint8_t *clr_ptr;
1223 struct crc_context *crc_ctx_pkt = NULL;
1224 struct qla_hw_data *ha;
1225 uint8_t additional_fcpcdb_len;
1226 uint16_t fcp_cmnd_len;
1227 struct fcp_cmnd *fcp_cmnd;
1228 dma_addr_t crc_ctx_dma;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001229 char tag[2];
Arun Easibad75002010-05-04 15:01:30 -07001230
1231 cmd = sp->cmd;
1232
1233 sgc = 0;
1234 /* Update entry type to indicate Command Type CRC_2 IOCB */
1235 *((uint32_t *)(&cmd_pkt->entry_type)) =
1236 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1237
Arun Easibad75002010-05-04 15:01:30 -07001238 vha = sp->fcport->vha;
1239 ha = vha->hw;
1240
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001241 /* No data transfer */
1242 data_bytes = scsi_bufflen(cmd);
1243 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1244 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1245 return QLA_SUCCESS;
1246 }
Arun Easibad75002010-05-04 15:01:30 -07001247
1248 cmd_pkt->vp_index = sp->fcport->vp_idx;
1249
1250 /* Set transfer direction */
1251 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1252 cmd_pkt->control_flags =
1253 __constant_cpu_to_le16(CF_WRITE_DATA);
1254 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1255 cmd_pkt->control_flags =
1256 __constant_cpu_to_le16(CF_READ_DATA);
1257 }
1258
Arun Easi8cb20492011-08-16 11:29:22 -07001259 if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
1260 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
1261 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
1262 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
Arun Easibad75002010-05-04 15:01:30 -07001263 bundling = 0;
1264
1265 /* Allocate CRC context from global pool */
1266 crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1267 GFP_ATOMIC, &crc_ctx_dma);
1268
1269 if (!crc_ctx_pkt)
1270 goto crc_queuing_error;
1271
1272 /* Zero out CTX area. */
1273 clr_ptr = (uint8_t *)crc_ctx_pkt;
1274 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1275
1276 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1277
1278 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1279
1280 /* Set handle */
1281 crc_ctx_pkt->handle = cmd_pkt->handle;
1282
1283 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1284
Arun Easie02587d2011-08-16 11:29:23 -07001285 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
Arun Easibad75002010-05-04 15:01:30 -07001286 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1287
1288 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1289 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1290 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1291
1292 /* Determine SCSI command length -- align to 4 byte boundary */
1293 if (cmd->cmd_len > 16) {
Arun Easibad75002010-05-04 15:01:30 -07001294 additional_fcpcdb_len = cmd->cmd_len - 16;
1295 if ((cmd->cmd_len % 4) != 0) {
1296 /* SCSI cmd > 16 bytes must be multiple of 4 */
1297 goto crc_queuing_error;
1298 }
1299 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1300 } else {
1301 additional_fcpcdb_len = 0;
1302 fcp_cmnd_len = 12 + 16 + 4;
1303 }
1304
1305 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1306
1307 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1308 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1309 fcp_cmnd->additional_cdb_len |= 1;
1310 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1311 fcp_cmnd->additional_cdb_len |= 2;
1312
1313 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1314 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1315 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1316 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1317 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1318 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1319 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
Uwe Kleine-König65155b32010-06-11 12:17:01 +02001320 fcp_cmnd->task_management = 0;
Arun Easibad75002010-05-04 15:01:30 -07001321
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001322 /*
1323 * Update tagged queuing modifier if using command tag queuing
1324 */
1325 if (scsi_populate_tag_msg(cmd, tag)) {
1326 switch (tag[0]) {
1327 case HEAD_OF_QUEUE_TAG:
1328 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1329 break;
1330 case ORDERED_QUEUE_TAG:
1331 fcp_cmnd->task_attribute = TSK_ORDERED;
1332 break;
1333 default:
1334 fcp_cmnd->task_attribute = 0;
1335 break;
1336 }
1337 } else {
1338 fcp_cmnd->task_attribute = 0;
1339 }
1340
Arun Easibad75002010-05-04 15:01:30 -07001341 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1342
Arun Easibad75002010-05-04 15:01:30 -07001343 /* Compute dif len and adjust data len to incude protection */
Arun Easibad75002010-05-04 15:01:30 -07001344 dif_bytes = 0;
1345 blk_size = cmd->device->sector_size;
Arun Easi8cb20492011-08-16 11:29:22 -07001346 dif_bytes = (data_bytes / blk_size) * 8;
1347
1348 switch (scsi_get_prot_op(sp->cmd)) {
1349 case SCSI_PROT_READ_INSERT:
1350 case SCSI_PROT_WRITE_STRIP:
1351 total_bytes = data_bytes;
1352 data_bytes += dif_bytes;
1353 break;
1354
1355 case SCSI_PROT_READ_STRIP:
1356 case SCSI_PROT_WRITE_INSERT:
1357 case SCSI_PROT_READ_PASS:
1358 case SCSI_PROT_WRITE_PASS:
1359 total_bytes = data_bytes + dif_bytes;
1360 break;
1361 default:
1362 BUG();
Arun Easibad75002010-05-04 15:01:30 -07001363 }
1364
Arun Easie02587d2011-08-16 11:29:23 -07001365 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -07001366 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1367
1368 if (!bundling) {
1369 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1370 } else {
1371 /*
1372 * Configure Bundling if we need to fetch interlaving
1373 * protection PCI accesses
1374 */
1375 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1376 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1377 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1378 tot_prot_dsds);
1379 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1380 }
1381
1382 /* Finish the common fields of CRC pkt */
1383 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1384 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1385 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1386 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1387 /* Fibre channel byte count */
1388 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1389 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1390 additional_fcpcdb_len);
1391 *fcp_dl = htonl(total_bytes);
1392
Arun Easi0c470872010-07-23 15:28:38 +05001393 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
Arun Easi0c470872010-07-23 15:28:38 +05001394 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1395 return QLA_SUCCESS;
1396 }
Arun Easibad75002010-05-04 15:01:30 -07001397 /* Walks data segments */
1398
1399 cmd_pkt->control_flags |=
1400 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
Arun Easi8cb20492011-08-16 11:29:22 -07001401
1402 if (!bundling && tot_prot_dsds) {
1403 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1404 cur_dsd, tot_dsds))
1405 goto crc_queuing_error;
1406 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
Arun Easibad75002010-05-04 15:01:30 -07001407 (tot_dsds - tot_prot_dsds)))
1408 goto crc_queuing_error;
1409
1410 if (bundling && tot_prot_dsds) {
1411 /* Walks dif segments */
1412 cur_seg = scsi_prot_sglist(cmd);
1413 cmd_pkt->control_flags |=
1414 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1415 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1416 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1417 tot_prot_dsds))
1418 goto crc_queuing_error;
1419 }
1420 return QLA_SUCCESS;
1421
1422crc_queuing_error:
Arun Easibad75002010-05-04 15:01:30 -07001423 /* Cleanup will be performed by the caller */
1424
1425 return QLA_FUNCTION_FAILED;
1426}
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001427
1428/**
1429 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1430 * @sp: command to send to the ISP
1431 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -07001432 * Returns non-zero if a failure occurred, else zero.
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001433 */
1434int
1435qla24xx_start_scsi(srb_t *sp)
1436{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001437 int ret, nseg;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001438 unsigned long flags;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001439 uint32_t *clr_ptr;
1440 uint32_t index;
1441 uint32_t handle;
1442 struct cmd_type_7 *cmd_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001443 uint16_t cnt;
1444 uint16_t req_cnt;
1445 uint16_t tot_dsds;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001446 struct req_que *req = NULL;
1447 struct rsp_que *rsp = NULL;
1448 struct scsi_cmnd *cmd = sp->cmd;
Andrew Vasquez444786d2009-01-05 11:18:10 -08001449 struct scsi_qla_host *vha = sp->fcport->vha;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001450 struct qla_hw_data *ha = vha->hw;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001451 char tag[2];
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001452
1453 /* Setup device pointers. */
1454 ret = 0;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001455
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001456 qla25xx_set_que(sp, &rsp);
1457 req = vha->req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001458
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001459 /* So we know we haven't pci_map'ed anything yet */
1460 tot_dsds = 0;
1461
1462 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001463 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001464 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1465 QLA_SUCCESS)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001466 return QLA_FUNCTION_FAILED;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001467 vha->marker_needed = 0;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001468 }
1469
1470 /* Acquire ring specific lock */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001471 spin_lock_irqsave(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001472
1473 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001474 handle = req->current_outstanding_cmd;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001475 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1476 handle++;
1477 if (handle == MAX_OUTSTANDING_COMMANDS)
1478 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001479 if (!req->outstanding_cmds[handle])
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001480 break;
1481 }
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001482 if (index == MAX_OUTSTANDING_COMMANDS) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001483 goto queuing_error;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001484 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001485
1486 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001487 if (scsi_sg_count(cmd)) {
1488 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1489 scsi_sg_count(cmd), cmd->sc_data_direction);
1490 if (unlikely(!nseg))
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001491 goto queuing_error;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001492 } else
1493 nseg = 0;
1494
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001495 tot_dsds = nseg;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001496 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001497 if (req->cnt < (req_cnt + 2)) {
Andrew Vasquez08029992009-03-24 09:07:55 -07001498 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001499
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001500 if (req->ring_index < cnt)
1501 req->cnt = cnt - req->ring_index;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001502 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001503 req->cnt = req->length -
1504 (req->ring_index - cnt);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001505 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001506 if (req->cnt < (req_cnt + 2))
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001507 goto queuing_error;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001508
1509 /* Build command packet. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001510 req->current_outstanding_cmd = handle;
1511 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -07001512 sp->handle = handle;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001513 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001514 req->cnt -= req_cnt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001515
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001516 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001517 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001518
1519 /* Zero out remaining portion of packet. */
James Bottomley72df8322005-10-28 14:41:19 -05001520 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001521 clr_ptr = (uint32_t *)cmd_pkt + 2;
1522 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1523 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1524
1525 /* Set NPORT-ID and LUN number*/
1526 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1527 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1528 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1529 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001530 cmd_pkt->vp_index = sp->fcport->vp_idx;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001531
Andrew Vasquez661c3f62005-10-27 11:09:58 -07001532 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
andrew.vasquez@qlogic.com0d4be122006-02-07 08:45:35 -08001533 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001534
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001535 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1536 if (scsi_populate_tag_msg(cmd, tag)) {
1537 switch (tag[0]) {
1538 case HEAD_OF_QUEUE_TAG:
1539 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1540 break;
1541 case ORDERED_QUEUE_TAG:
1542 cmd_pkt->task = TSK_ORDERED;
1543 break;
1544 }
1545 }
1546
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001547 /* Load SCSI command packet. */
1548 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1549 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1550
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001551 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001552
1553 /* Build IOCB segments */
1554 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1555
1556 /* Set total data segment count. */
1557 cmd_pkt->entry_count = (uint8_t)req_cnt;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001558 /* Specify response queue number where completion should happen */
1559 cmd_pkt->entry_status = (uint8_t) rsp->id;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001560 wmb();
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001561 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001562 req->ring_index++;
1563 if (req->ring_index == req->length) {
1564 req->ring_index = 0;
1565 req->ring_ptr = req->ring;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001566 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001567 req->ring_ptr++;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001568
1569 sp->flags |= SRB_DMA_VALID;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001570
1571 /* Set chip new ring index. */
Andrew Vasquez08029992009-03-24 09:07:55 -07001572 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1573 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001574
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001575 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001576 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001577 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001578 qla24xx_process_response_queue(vha, rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001579
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001580 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001581 return QLA_SUCCESS;
1582
1583queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001584 if (tot_dsds)
1585 scsi_dma_unmap(cmd);
1586
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001587 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001588
1589 return QLA_FUNCTION_FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590}
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001591
Arun Easibad75002010-05-04 15:01:30 -07001592
1593/**
1594 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1595 * @sp: command to send to the ISP
1596 *
1597 * Returns non-zero if a failure occurred, else zero.
1598 */
1599int
1600qla24xx_dif_start_scsi(srb_t *sp)
1601{
1602 int nseg;
1603 unsigned long flags;
1604 uint32_t *clr_ptr;
1605 uint32_t index;
1606 uint32_t handle;
1607 uint16_t cnt;
1608 uint16_t req_cnt = 0;
1609 uint16_t tot_dsds;
1610 uint16_t tot_prot_dsds;
1611 uint16_t fw_prot_opts = 0;
1612 struct req_que *req = NULL;
1613 struct rsp_que *rsp = NULL;
1614 struct scsi_cmnd *cmd = sp->cmd;
1615 struct scsi_qla_host *vha = sp->fcport->vha;
1616 struct qla_hw_data *ha = vha->hw;
1617 struct cmd_type_crc_2 *cmd_pkt;
1618 uint32_t status = 0;
1619
1620#define QDSS_GOT_Q_SPACE BIT_0
1621
Arun Easi0c470872010-07-23 15:28:38 +05001622 /* Only process protection or >16 cdb in this routine */
1623 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1624 if (cmd->cmd_len <= 16)
1625 return qla24xx_start_scsi(sp);
1626 }
Arun Easibad75002010-05-04 15:01:30 -07001627
1628 /* Setup device pointers. */
1629
1630 qla25xx_set_que(sp, &rsp);
1631 req = vha->req;
1632
1633 /* So we know we haven't pci_map'ed anything yet */
1634 tot_dsds = 0;
1635
1636 /* Send marker if required */
1637 if (vha->marker_needed != 0) {
1638 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1639 QLA_SUCCESS)
1640 return QLA_FUNCTION_FAILED;
1641 vha->marker_needed = 0;
1642 }
1643
1644 /* Acquire ring specific lock */
1645 spin_lock_irqsave(&ha->hardware_lock, flags);
1646
1647 /* Check for room in outstanding command list. */
1648 handle = req->current_outstanding_cmd;
1649 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1650 handle++;
1651 if (handle == MAX_OUTSTANDING_COMMANDS)
1652 handle = 1;
1653 if (!req->outstanding_cmds[handle])
1654 break;
1655 }
1656
1657 if (index == MAX_OUTSTANDING_COMMANDS)
1658 goto queuing_error;
1659
1660 /* Compute number of required data segments */
1661 /* Map the sg table so we have an accurate count of sg entries needed */
1662 if (scsi_sg_count(cmd)) {
1663 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1664 scsi_sg_count(cmd), cmd->sc_data_direction);
1665 if (unlikely(!nseg))
1666 goto queuing_error;
1667 else
1668 sp->flags |= SRB_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001669
1670 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1671 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1672 struct qla2_sgx sgx;
1673 uint32_t partial;
1674
1675 memset(&sgx, 0, sizeof(struct qla2_sgx));
1676 sgx.tot_bytes = scsi_bufflen(cmd);
1677 sgx.cur_sg = scsi_sglist(cmd);
1678 sgx.sp = sp;
1679
1680 nseg = 0;
1681 while (qla24xx_get_one_block_sg(
1682 cmd->device->sector_size, &sgx, &partial))
1683 nseg++;
1684 }
Arun Easibad75002010-05-04 15:01:30 -07001685 } else
1686 nseg = 0;
1687
1688 /* number of required data segments */
1689 tot_dsds = nseg;
1690
1691 /* Compute number of required protection segments */
1692 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1693 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1694 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1695 if (unlikely(!nseg))
1696 goto queuing_error;
1697 else
1698 sp->flags |= SRB_CRC_PROT_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001699
1700 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1701 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1702 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1703 }
Arun Easibad75002010-05-04 15:01:30 -07001704 } else {
1705 nseg = 0;
1706 }
1707
1708 req_cnt = 1;
1709 /* Total Data and protection sg segment(s) */
1710 tot_prot_dsds = nseg;
1711 tot_dsds += nseg;
1712 if (req->cnt < (req_cnt + 2)) {
1713 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1714
1715 if (req->ring_index < cnt)
1716 req->cnt = cnt - req->ring_index;
1717 else
1718 req->cnt = req->length -
1719 (req->ring_index - cnt);
1720 }
1721
1722 if (req->cnt < (req_cnt + 2))
1723 goto queuing_error;
1724
1725 status |= QDSS_GOT_Q_SPACE;
1726
1727 /* Build header part of command packet (excluding the OPCODE). */
1728 req->current_outstanding_cmd = handle;
1729 req->outstanding_cmds[handle] = sp;
Arun Easi8cb20492011-08-16 11:29:22 -07001730 sp->handle = handle;
Arun Easibad75002010-05-04 15:01:30 -07001731 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1732 req->cnt -= req_cnt;
1733
1734 /* Fill-in common area */
1735 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1736 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1737
1738 clr_ptr = (uint32_t *)cmd_pkt + 2;
1739 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1740
1741 /* Set NPORT-ID and LUN number*/
1742 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1743 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1744 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1745 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1746
1747 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1748 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1749
1750 /* Total Data and protection segment(s) */
1751 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1752
1753 /* Build IOCB segments and adjust for data protection segments */
1754 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1755 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1756 QLA_SUCCESS)
1757 goto queuing_error;
1758
1759 cmd_pkt->entry_count = (uint8_t)req_cnt;
1760 /* Specify response queue number where completion should happen */
1761 cmd_pkt->entry_status = (uint8_t) rsp->id;
1762 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1763 wmb();
1764
1765 /* Adjust ring index. */
1766 req->ring_index++;
1767 if (req->ring_index == req->length) {
1768 req->ring_index = 0;
1769 req->ring_ptr = req->ring;
1770 } else
1771 req->ring_ptr++;
1772
1773 /* Set chip new ring index. */
1774 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1775 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1776
1777 /* Manage unprocessed RIO/ZIO commands in response queue. */
1778 if (vha->flags.process_response_queue &&
1779 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1780 qla24xx_process_response_queue(vha, rsp);
1781
1782 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1783
1784 return QLA_SUCCESS;
1785
1786queuing_error:
1787 if (status & QDSS_GOT_Q_SPACE) {
1788 req->outstanding_cmds[handle] = NULL;
1789 req->cnt += req_cnt;
1790 }
1791 /* Cleanup will be performed by the caller (queuecommand) */
1792
1793 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Arun Easibad75002010-05-04 15:01:30 -07001794 return QLA_FUNCTION_FAILED;
1795}
1796
1797
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001798static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001799{
1800 struct scsi_cmnd *cmd = sp->cmd;
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001801 struct qla_hw_data *ha = sp->fcport->vha->hw;
1802 int affinity = cmd->request->cpu;
1803
Anirban Chakraborty7163ea82009-08-05 09:18:40 -07001804 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001805 affinity < ha->max_rsp_queues - 1)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001806 *rsp = ha->rsp_q_map[affinity + 1];
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001807 else
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001808 *rsp = ha->rsp_q_map[0];
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001809}
Andrew Vasquezac280b62009-08-20 11:06:05 -07001810
1811/* Generic Control-SRB manipulation functions. */
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001812void *
1813qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001814{
Andrew Vasquezac280b62009-08-20 11:06:05 -07001815 struct qla_hw_data *ha = vha->hw;
1816 struct req_que *req = ha->req_q_map[0];
1817 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1818 uint32_t index, handle;
1819 request_t *pkt;
1820 uint16_t cnt, req_cnt;
Andrew Vasquez57807902011-11-18 09:03:20 -08001821 struct srb_ctx *ctx;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001822
1823 pkt = NULL;
1824 req_cnt = 1;
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001825 handle = 0;
1826
1827 if (!sp)
1828 goto skip_cmd_array;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001829
1830 /* Check for room in outstanding command list. */
1831 handle = req->current_outstanding_cmd;
1832 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1833 handle++;
1834 if (handle == MAX_OUTSTANDING_COMMANDS)
1835 handle = 1;
1836 if (!req->outstanding_cmds[handle])
1837 break;
1838 }
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001839 if (index == MAX_OUTSTANDING_COMMANDS) {
1840 ql_log(ql_log_warn, vha, 0x700b,
1841 "No room on oustanding cmd array.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07001842 goto queuing_error;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001843 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07001844
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001845 /* Prep command array. */
1846 req->current_outstanding_cmd = handle;
1847 req->outstanding_cmds[handle] = sp;
1848 sp->handle = handle;
1849
Andrew Vasquez57807902011-11-18 09:03:20 -08001850 /* Adjust entry-counts as needed. */
1851 if (sp->ctx) {
1852 ctx = sp->ctx;
1853 req_cnt = ctx->iocbs;
1854 }
1855
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001856skip_cmd_array:
Andrew Vasquezac280b62009-08-20 11:06:05 -07001857 /* Check for room on request queue. */
1858 if (req->cnt < req_cnt) {
1859 if (ha->mqenable)
1860 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001861 else if (IS_QLA82XX(ha))
1862 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
Andrew Vasquezac280b62009-08-20 11:06:05 -07001863 else if (IS_FWI2_CAPABLE(ha))
1864 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1865 else
1866 cnt = qla2x00_debounce_register(
1867 ISP_REQ_Q_OUT(ha, &reg->isp));
1868
1869 if (req->ring_index < cnt)
1870 req->cnt = cnt - req->ring_index;
1871 else
1872 req->cnt = req->length -
1873 (req->ring_index - cnt);
1874 }
1875 if (req->cnt < req_cnt)
1876 goto queuing_error;
1877
1878 /* Prep packet */
Andrew Vasquezac280b62009-08-20 11:06:05 -07001879 req->cnt -= req_cnt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001880 pkt = req->ring_ptr;
1881 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1882 pkt->entry_count = req_cnt;
1883 pkt->handle = handle;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001884
1885queuing_error:
1886 return pkt;
1887}
1888
1889static void
Andrew Vasquezac280b62009-08-20 11:06:05 -07001890qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1891{
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001892 struct srb_ctx *ctx = sp->ctx;
1893 struct srb_iocb *lio = ctx->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001894
1895 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1896 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001897 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001898 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001899 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001900 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1901 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1902 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1903 logio->port_id[1] = sp->fcport->d_id.b.area;
1904 logio->port_id[2] = sp->fcport->d_id.b.domain;
1905 logio->vp_index = sp->fcport->vp_idx;
1906}
1907
1908static void
1909qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1910{
1911 struct qla_hw_data *ha = sp->fcport->vha->hw;
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001912 struct srb_ctx *ctx = sp->ctx;
1913 struct srb_iocb *lio = ctx->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001914 uint16_t opts;
1915
Giridhar Malavalib9637522010-05-28 15:08:15 -07001916 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001917 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1918 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001919 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1920 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001921 if (HAS_EXTENDED_IDS(ha)) {
1922 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1923 mbx->mb10 = cpu_to_le16(opts);
1924 } else {
1925 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1926 }
1927 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1928 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1929 sp->fcport->d_id.b.al_pa);
1930 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1931}
1932
1933static void
1934qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1935{
1936 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1937 logio->control_flags =
1938 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1939 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1940 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1941 logio->port_id[1] = sp->fcport->d_id.b.area;
1942 logio->port_id[2] = sp->fcport->d_id.b.domain;
1943 logio->vp_index = sp->fcport->vp_idx;
1944}
1945
1946static void
1947qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1948{
1949 struct qla_hw_data *ha = sp->fcport->vha->hw;
1950
Giridhar Malavalib9637522010-05-28 15:08:15 -07001951 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001952 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1953 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1954 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1955 cpu_to_le16(sp->fcport->loop_id):
1956 cpu_to_le16(sp->fcport->loop_id << 8);
1957 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1958 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1959 sp->fcport->d_id.b.al_pa);
1960 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1961 /* Implicit: mbx->mbx10 = 0. */
1962}
1963
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001964static void
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07001965qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1966{
1967 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1968 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1969 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1970 logio->vp_index = sp->fcport->vp_idx;
1971}
1972
1973static void
1974qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1975{
1976 struct qla_hw_data *ha = sp->fcport->vha->hw;
1977
1978 mbx->entry_type = MBX_IOCB_TYPE;
1979 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1980 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1981 if (HAS_EXTENDED_IDS(ha)) {
1982 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1983 mbx->mb10 = cpu_to_le16(BIT_0);
1984 } else {
1985 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1986 }
1987 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1988 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1989 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1990 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1991 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1992}
1993
1994static void
Madhuranath Iyengar38222632010-05-04 15:01:29 -07001995qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1996{
1997 uint32_t flags;
1998 unsigned int lun;
1999 struct fc_port *fcport = sp->fcport;
2000 scsi_qla_host_t *vha = fcport->vha;
2001 struct qla_hw_data *ha = vha->hw;
2002 struct srb_ctx *ctx = sp->ctx;
2003 struct srb_iocb *iocb = ctx->u.iocb_cmd;
2004 struct req_que *req = vha->req;
2005
2006 flags = iocb->u.tmf.flags;
2007 lun = iocb->u.tmf.lun;
2008
2009 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2010 tsk->entry_count = 1;
2011 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2012 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2013 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2014 tsk->control_flags = cpu_to_le32(flags);
2015 tsk->port_id[0] = fcport->d_id.b.al_pa;
2016 tsk->port_id[1] = fcport->d_id.b.area;
2017 tsk->port_id[2] = fcport->d_id.b.domain;
2018 tsk->vp_index = fcport->vp_idx;
2019
2020 if (flags == TCF_LUN_RESET) {
2021 int_to_scsilun(lun, &tsk->lun);
2022 host_to_fcp_swap((uint8_t *)&tsk->lun,
2023 sizeof(tsk->lun));
2024 }
2025}
2026
2027static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002028qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2029{
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002030 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002031
2032 els_iocb->entry_type = ELS_IOCB_TYPE;
2033 els_iocb->entry_count = 1;
2034 els_iocb->sys_define = 0;
2035 els_iocb->entry_status = 0;
2036 els_iocb->handle = sp->handle;
2037 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2038 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2039 els_iocb->vp_index = sp->fcport->vp_idx;
2040 els_iocb->sof_type = EST_SOFI3;
2041 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2042
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002043 els_iocb->opcode =
2044 (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
2045 bsg_job->request->rqst_data.r_els.els_code :
2046 bsg_job->request->rqst_data.h_els.command_code;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002047 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2048 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2049 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2050 els_iocb->control_flags = 0;
2051 els_iocb->rx_byte_count =
2052 cpu_to_le32(bsg_job->reply_payload.payload_len);
2053 els_iocb->tx_byte_count =
2054 cpu_to_le32(bsg_job->request_payload.payload_len);
2055
2056 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2057 (bsg_job->request_payload.sg_list)));
2058 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2059 (bsg_job->request_payload.sg_list)));
2060 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2061 (bsg_job->request_payload.sg_list));
2062
2063 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2064 (bsg_job->reply_payload.sg_list)));
2065 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2066 (bsg_job->reply_payload.sg_list)));
2067 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2068 (bsg_job->reply_payload.sg_list));
2069}
2070
2071static void
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002072qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2073{
2074 uint16_t avail_dsds;
2075 uint32_t *cur_dsd;
2076 struct scatterlist *sg;
2077 int index;
2078 uint16_t tot_dsds;
2079 scsi_qla_host_t *vha = sp->fcport->vha;
2080 struct qla_hw_data *ha = vha->hw;
2081 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2082 int loop_iterartion = 0;
2083 int cont_iocb_prsnt = 0;
2084 int entry_count = 1;
2085
2086 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2087 ct_iocb->entry_type = CT_IOCB_TYPE;
2088 ct_iocb->entry_status = 0;
2089 ct_iocb->handle1 = sp->handle;
2090 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2091 ct_iocb->status = __constant_cpu_to_le16(0);
2092 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2093 ct_iocb->timeout = 0;
2094 ct_iocb->cmd_dsd_count =
2095 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2096 ct_iocb->total_dsd_count =
2097 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2098 ct_iocb->req_bytecount =
2099 cpu_to_le32(bsg_job->request_payload.payload_len);
2100 ct_iocb->rsp_bytecount =
2101 cpu_to_le32(bsg_job->reply_payload.payload_len);
2102
2103 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2104 (bsg_job->request_payload.sg_list)));
2105 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2106 (bsg_job->request_payload.sg_list)));
2107 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2108
2109 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2110 (bsg_job->reply_payload.sg_list)));
2111 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2112 (bsg_job->reply_payload.sg_list)));
2113 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2114
2115 avail_dsds = 1;
2116 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2117 index = 0;
2118 tot_dsds = bsg_job->reply_payload.sg_cnt;
2119
2120 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2121 dma_addr_t sle_dma;
2122 cont_a64_entry_t *cont_pkt;
2123
2124 /* Allocate additional continuation packets? */
2125 if (avail_dsds == 0) {
2126 /*
2127 * Five DSDs are available in the Cont.
2128 * Type 1 IOCB.
2129 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002130 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2131 vha->hw->req_q_map[0]);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002132 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2133 avail_dsds = 5;
2134 cont_iocb_prsnt = 1;
2135 entry_count++;
2136 }
2137
2138 sle_dma = sg_dma_address(sg);
2139 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2140 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2141 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2142 loop_iterartion++;
2143 avail_dsds--;
2144 }
2145 ct_iocb->entry_count = entry_count;
2146}
2147
2148static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002149qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2150{
2151 uint16_t avail_dsds;
2152 uint32_t *cur_dsd;
2153 struct scatterlist *sg;
2154 int index;
2155 uint16_t tot_dsds;
2156 scsi_qla_host_t *vha = sp->fcport->vha;
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002157 struct qla_hw_data *ha = vha->hw;
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002158 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002159 int loop_iterartion = 0;
2160 int cont_iocb_prsnt = 0;
2161 int entry_count = 1;
2162
2163 ct_iocb->entry_type = CT_IOCB_TYPE;
2164 ct_iocb->entry_status = 0;
2165 ct_iocb->sys_define = 0;
2166 ct_iocb->handle = sp->handle;
2167
2168 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2169 ct_iocb->vp_index = sp->fcport->vp_idx;
2170 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2171
2172 ct_iocb->cmd_dsd_count =
2173 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2174 ct_iocb->timeout = 0;
2175 ct_iocb->rsp_dsd_count =
2176 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2177 ct_iocb->rsp_byte_count =
2178 cpu_to_le32(bsg_job->reply_payload.payload_len);
2179 ct_iocb->cmd_byte_count =
2180 cpu_to_le32(bsg_job->request_payload.payload_len);
2181 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2182 (bsg_job->request_payload.sg_list)));
2183 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2184 (bsg_job->request_payload.sg_list)));
2185 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2186 (bsg_job->request_payload.sg_list));
2187
2188 avail_dsds = 1;
2189 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2190 index = 0;
2191 tot_dsds = bsg_job->reply_payload.sg_cnt;
2192
2193 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2194 dma_addr_t sle_dma;
2195 cont_a64_entry_t *cont_pkt;
2196
2197 /* Allocate additional continuation packets? */
2198 if (avail_dsds == 0) {
2199 /*
2200 * Five DSDs are available in the Cont.
2201 * Type 1 IOCB.
2202 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002203 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2204 ha->req_q_map[0]);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002205 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2206 avail_dsds = 5;
2207 cont_iocb_prsnt = 1;
2208 entry_count++;
2209 }
2210
2211 sle_dma = sg_dma_address(sg);
2212 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2213 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2214 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2215 loop_iterartion++;
2216 avail_dsds--;
2217 }
2218 ct_iocb->entry_count = entry_count;
2219}
2220
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002221/*
2222 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2223 * @sp: command to send to the ISP
2224 *
2225 * Returns non-zero if a failure occurred, else zero.
2226 */
2227int
2228qla82xx_start_scsi(srb_t *sp)
2229{
2230 int ret, nseg;
2231 unsigned long flags;
2232 struct scsi_cmnd *cmd;
2233 uint32_t *clr_ptr;
2234 uint32_t index;
2235 uint32_t handle;
2236 uint16_t cnt;
2237 uint16_t req_cnt;
2238 uint16_t tot_dsds;
2239 struct device_reg_82xx __iomem *reg;
2240 uint32_t dbval;
2241 uint32_t *fcp_dl;
2242 uint8_t additional_cdb_len;
2243 struct ct6_dsd *ctx;
2244 struct scsi_qla_host *vha = sp->fcport->vha;
2245 struct qla_hw_data *ha = vha->hw;
2246 struct req_que *req = NULL;
2247 struct rsp_que *rsp = NULL;
2248 char tag[2];
2249
2250 /* Setup device pointers. */
2251 ret = 0;
2252 reg = &ha->iobase->isp82;
2253 cmd = sp->cmd;
2254 req = vha->req;
2255 rsp = ha->rsp_q_map[0];
2256
2257 /* So we know we haven't pci_map'ed anything yet */
2258 tot_dsds = 0;
2259
2260 dbval = 0x04 | (ha->portnum << 5);
2261
2262 /* Send marker if required */
2263 if (vha->marker_needed != 0) {
2264 if (qla2x00_marker(vha, req,
2265 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2266 ql_log(ql_log_warn, vha, 0x300c,
2267 "qla2x00_marker failed for cmd=%p.\n", cmd);
2268 return QLA_FUNCTION_FAILED;
2269 }
2270 vha->marker_needed = 0;
2271 }
2272
2273 /* Acquire ring specific lock */
2274 spin_lock_irqsave(&ha->hardware_lock, flags);
2275
2276 /* Check for room in outstanding command list. */
2277 handle = req->current_outstanding_cmd;
2278 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2279 handle++;
2280 if (handle == MAX_OUTSTANDING_COMMANDS)
2281 handle = 1;
2282 if (!req->outstanding_cmds[handle])
2283 break;
2284 }
2285 if (index == MAX_OUTSTANDING_COMMANDS)
2286 goto queuing_error;
2287
2288 /* Map the sg table so we have an accurate count of sg entries needed */
2289 if (scsi_sg_count(cmd)) {
2290 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2291 scsi_sg_count(cmd), cmd->sc_data_direction);
2292 if (unlikely(!nseg))
2293 goto queuing_error;
2294 } else
2295 nseg = 0;
2296
2297 tot_dsds = nseg;
2298
2299 if (tot_dsds > ql2xshiftctondsd) {
2300 struct cmd_type_6 *cmd_pkt;
2301 uint16_t more_dsd_lists = 0;
2302 struct dsd_dma *dsd_ptr;
2303 uint16_t i;
2304
2305 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2306 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2307 ql_dbg(ql_dbg_io, vha, 0x300d,
2308 "Num of DSD list %d is than %d for cmd=%p.\n",
2309 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2310 cmd);
2311 goto queuing_error;
2312 }
2313
2314 if (more_dsd_lists <= ha->gbl_dsd_avail)
2315 goto sufficient_dsds;
2316 else
2317 more_dsd_lists -= ha->gbl_dsd_avail;
2318
2319 for (i = 0; i < more_dsd_lists; i++) {
2320 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2321 if (!dsd_ptr) {
2322 ql_log(ql_log_fatal, vha, 0x300e,
2323 "Failed to allocate memory for dsd_dma "
2324 "for cmd=%p.\n", cmd);
2325 goto queuing_error;
2326 }
2327
2328 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2329 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2330 if (!dsd_ptr->dsd_addr) {
2331 kfree(dsd_ptr);
2332 ql_log(ql_log_fatal, vha, 0x300f,
2333 "Failed to allocate memory for dsd_addr "
2334 "for cmd=%p.\n", cmd);
2335 goto queuing_error;
2336 }
2337 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2338 ha->gbl_dsd_avail++;
2339 }
2340
2341sufficient_dsds:
2342 req_cnt = 1;
2343
2344 if (req->cnt < (req_cnt + 2)) {
2345 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2346 &reg->req_q_out[0]);
2347 if (req->ring_index < cnt)
2348 req->cnt = cnt - req->ring_index;
2349 else
2350 req->cnt = req->length -
2351 (req->ring_index - cnt);
2352 }
2353
2354 if (req->cnt < (req_cnt + 2))
2355 goto queuing_error;
2356
2357 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2358 if (!sp->ctx) {
2359 ql_log(ql_log_fatal, vha, 0x3010,
2360 "Failed to allocate ctx for cmd=%p.\n", cmd);
2361 goto queuing_error;
2362 }
2363 memset(ctx, 0, sizeof(struct ct6_dsd));
2364 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2365 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2366 if (!ctx->fcp_cmnd) {
2367 ql_log(ql_log_fatal, vha, 0x3011,
2368 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2369 goto queuing_error_fcp_cmnd;
2370 }
2371
2372 /* Initialize the DSD list and dma handle */
2373 INIT_LIST_HEAD(&ctx->dsd_list);
2374 ctx->dsd_use_cnt = 0;
2375
2376 if (cmd->cmd_len > 16) {
2377 additional_cdb_len = cmd->cmd_len - 16;
2378 if ((cmd->cmd_len % 4) != 0) {
2379 /* SCSI command bigger than 16 bytes must be
2380 * multiple of 4
2381 */
2382 ql_log(ql_log_warn, vha, 0x3012,
2383 "scsi cmd len %d not multiple of 4 "
2384 "for cmd=%p.\n", cmd->cmd_len, cmd);
2385 goto queuing_error_fcp_cmnd;
2386 }
2387 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2388 } else {
2389 additional_cdb_len = 0;
2390 ctx->fcp_cmnd_len = 12 + 16 + 4;
2391 }
2392
2393 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2394 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2395
2396 /* Zero out remaining portion of packet. */
2397 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2398 clr_ptr = (uint32_t *)cmd_pkt + 2;
2399 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2400 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2401
2402 /* Set NPORT-ID and LUN number*/
2403 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2404 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2405 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2406 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2407 cmd_pkt->vp_index = sp->fcport->vp_idx;
2408
2409 /* Build IOCB segments */
2410 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2411 goto queuing_error_fcp_cmnd;
2412
2413 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2414 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2415
2416 /* build FCP_CMND IU */
2417 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2418 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2419 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2420
2421 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2422 ctx->fcp_cmnd->additional_cdb_len |= 1;
2423 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2424 ctx->fcp_cmnd->additional_cdb_len |= 2;
2425
2426 /*
2427 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2428 */
2429 if (scsi_populate_tag_msg(cmd, tag)) {
2430 switch (tag[0]) {
2431 case HEAD_OF_QUEUE_TAG:
2432 ctx->fcp_cmnd->task_attribute =
2433 TSK_HEAD_OF_QUEUE;
2434 break;
2435 case ORDERED_QUEUE_TAG:
2436 ctx->fcp_cmnd->task_attribute =
2437 TSK_ORDERED;
2438 break;
2439 }
2440 }
2441
Saurav Kashyapa00f6292011-11-18 09:03:19 -08002442 /* Populate the FCP_PRIO. */
2443 if (ha->flags.fcp_prio_enabled)
2444 ctx->fcp_cmnd->task_attribute |=
2445 sp->fcport->fcp_prio << 3;
2446
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002447 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2448
2449 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2450 additional_cdb_len);
2451 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2452
2453 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2454 cmd_pkt->fcp_cmnd_dseg_address[0] =
2455 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2456 cmd_pkt->fcp_cmnd_dseg_address[1] =
2457 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2458
2459 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2460 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2461 /* Set total data segment count. */
2462 cmd_pkt->entry_count = (uint8_t)req_cnt;
2463 /* Specify response queue number where
2464 * completion should happen
2465 */
2466 cmd_pkt->entry_status = (uint8_t) rsp->id;
2467 } else {
2468 struct cmd_type_7 *cmd_pkt;
2469 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2470 if (req->cnt < (req_cnt + 2)) {
2471 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2472 &reg->req_q_out[0]);
2473 if (req->ring_index < cnt)
2474 req->cnt = cnt - req->ring_index;
2475 else
2476 req->cnt = req->length -
2477 (req->ring_index - cnt);
2478 }
2479 if (req->cnt < (req_cnt + 2))
2480 goto queuing_error;
2481
2482 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2483 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2484
2485 /* Zero out remaining portion of packet. */
2486 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2487 clr_ptr = (uint32_t *)cmd_pkt + 2;
2488 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2489 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2490
2491 /* Set NPORT-ID and LUN number*/
2492 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2493 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2494 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2495 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2496 cmd_pkt->vp_index = sp->fcport->vp_idx;
2497
2498 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2499 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2500 sizeof(cmd_pkt->lun));
2501
2502 /*
2503 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2504 */
2505 if (scsi_populate_tag_msg(cmd, tag)) {
2506 switch (tag[0]) {
2507 case HEAD_OF_QUEUE_TAG:
2508 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2509 break;
2510 case ORDERED_QUEUE_TAG:
2511 cmd_pkt->task = TSK_ORDERED;
2512 break;
2513 }
2514 }
2515
Saurav Kashyapa00f6292011-11-18 09:03:19 -08002516 /* Populate the FCP_PRIO. */
2517 if (ha->flags.fcp_prio_enabled)
2518 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2519
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002520 /* Load SCSI command packet. */
2521 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2522 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2523
2524 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2525
2526 /* Build IOCB segments */
2527 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2528
2529 /* Set total data segment count. */
2530 cmd_pkt->entry_count = (uint8_t)req_cnt;
2531 /* Specify response queue number where
2532 * completion should happen.
2533 */
2534 cmd_pkt->entry_status = (uint8_t) rsp->id;
2535
2536 }
2537 /* Build command packet. */
2538 req->current_outstanding_cmd = handle;
2539 req->outstanding_cmds[handle] = sp;
2540 sp->handle = handle;
2541 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2542 req->cnt -= req_cnt;
2543 wmb();
2544
2545 /* Adjust ring index. */
2546 req->ring_index++;
2547 if (req->ring_index == req->length) {
2548 req->ring_index = 0;
2549 req->ring_ptr = req->ring;
2550 } else
2551 req->ring_ptr++;
2552
2553 sp->flags |= SRB_DMA_VALID;
2554
2555 /* Set chip new ring index. */
2556 /* write, read and verify logic */
2557 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2558 if (ql2xdbwr)
2559 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2560 else {
2561 WRT_REG_DWORD(
2562 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2563 dbval);
2564 wmb();
2565 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2566 WRT_REG_DWORD(
2567 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2568 dbval);
2569 wmb();
2570 }
2571 }
2572
2573 /* Manage unprocessed RIO/ZIO commands in response queue. */
2574 if (vha->flags.process_response_queue &&
2575 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2576 qla24xx_process_response_queue(vha, rsp);
2577
2578 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2579 return QLA_SUCCESS;
2580
2581queuing_error_fcp_cmnd:
2582 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2583queuing_error:
2584 if (tot_dsds)
2585 scsi_dma_unmap(cmd);
2586
2587 if (sp->ctx) {
2588 mempool_free(sp->ctx, ha->ctx_mempool);
2589 sp->ctx = NULL;
2590 }
2591 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2592
2593 return QLA_FUNCTION_FAILED;
2594}
2595
Andrew Vasquezac280b62009-08-20 11:06:05 -07002596int
2597qla2x00_start_sp(srb_t *sp)
2598{
2599 int rval;
2600 struct qla_hw_data *ha = sp->fcport->vha->hw;
2601 void *pkt;
2602 struct srb_ctx *ctx = sp->ctx;
2603 unsigned long flags;
2604
2605 rval = QLA_FUNCTION_FAILED;
2606 spin_lock_irqsave(&ha->hardware_lock, flags);
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002607 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002608 if (!pkt) {
2609 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2610 "qla2x00_alloc_iocbs failed.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07002611 goto done;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002612 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07002613
2614 rval = QLA_SUCCESS;
2615 switch (ctx->type) {
2616 case SRB_LOGIN_CMD:
2617 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002618 qla24xx_login_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002619 qla2x00_login_iocb(sp, pkt);
2620 break;
2621 case SRB_LOGOUT_CMD:
2622 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002623 qla24xx_logout_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002624 qla2x00_logout_iocb(sp, pkt);
2625 break;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002626 case SRB_ELS_CMD_RPT:
2627 case SRB_ELS_CMD_HST:
2628 qla24xx_els_iocb(sp, pkt);
2629 break;
2630 case SRB_CT_CMD:
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002631 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez57807902011-11-18 09:03:20 -08002632 qla24xx_ct_iocb(sp, pkt) :
2633 qla2x00_ct_iocb(sp, pkt);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002634 break;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002635 case SRB_ADISC_CMD:
2636 IS_FWI2_CAPABLE(ha) ?
2637 qla24xx_adisc_iocb(sp, pkt) :
2638 qla2x00_adisc_iocb(sp, pkt);
2639 break;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002640 case SRB_TM_CMD:
2641 qla24xx_tm_iocb(sp, pkt);
2642 break;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002643 default:
2644 break;
2645 }
2646
2647 wmb();
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002648 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002649done:
2650 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2651 return rval;
2652}