blob: 518fccfbfdb227d4efc1252f518f697407045cb6 [file] [log] [blame]
Giridhar Malavali6e980162010-03-19 17:03:58 -07001/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8
9#include <linux/kthread.h>
10#include <linux/vmalloc.h>
11#include <linux/delay.h>
12
13/* BSG support for ELS/CT pass through */
14inline srb_t *
15qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
16{
17 srb_t *sp;
18 struct qla_hw_data *ha = vha->hw;
Madhuranath Iyengar49163922010-05-04 15:01:28 -070019 struct srb_ctx *ctx;
Giridhar Malavali6e980162010-03-19 17:03:58 -070020
21 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
22 if (!sp)
23 goto done;
24 ctx = kzalloc(size, GFP_KERNEL);
25 if (!ctx) {
26 mempool_free(sp, ha->srb_mempool);
27 sp = NULL;
28 goto done;
29 }
30
31 memset(sp, 0, sizeof(*sp));
32 sp->fcport = fcport;
33 sp->ctx = ctx;
34done:
35 return sp;
36}
37
Sarang Radke09ff7012010-03-19 17:03:59 -070038int
39qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
40{
41 int i, ret, num_valid;
42 uint8_t *bcode;
43 struct qla_fcp_prio_entry *pri_entry;
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050044 uint32_t *bcode_val_ptr, bcode_val;
Sarang Radke09ff7012010-03-19 17:03:59 -070045
46 ret = 1;
47 num_valid = 0;
48 bcode = (uint8_t *)pri_cfg;
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050049 bcode_val_ptr = (uint32_t *)pri_cfg;
50 bcode_val = (uint32_t)(*bcode_val_ptr);
Sarang Radke09ff7012010-03-19 17:03:59 -070051
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050052 if (bcode_val == 0xFFFFFFFF) {
53 /* No FCP Priority config data in flash */
54 DEBUG2(printk(KERN_INFO
55 "%s: No FCP priority config data.\n",
56 __func__));
57 return 0;
58 }
59
60 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
61 bcode[3] != 'S') {
62 /* Invalid FCP priority data header*/
63 DEBUG2(printk(KERN_ERR
64 "%s: Invalid FCP Priority data header. bcode=0x%x\n",
65 __func__, bcode_val));
Sarang Radke09ff7012010-03-19 17:03:59 -070066 return 0;
67 }
68 if (flag != 1)
69 return ret;
70
71 pri_entry = &pri_cfg->entry[0];
72 for (i = 0; i < pri_cfg->num_entries; i++) {
73 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
74 num_valid++;
75 pri_entry++;
76 }
77
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050078 if (num_valid == 0) {
79 /* No valid FCP priority data entries */
80 DEBUG2(printk(KERN_ERR
81 "%s: No valid FCP Priority data entries.\n",
82 __func__));
Sarang Radke09ff7012010-03-19 17:03:59 -070083 ret = 0;
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050084 } else {
85 /* FCP priority data is valid */
86 DEBUG2(printk(KERN_INFO
87 "%s: Valid FCP priority data. num entries = %d\n",
88 __func__, num_valid));
89 }
Sarang Radke09ff7012010-03-19 17:03:59 -070090
91 return ret;
92}
93
94static int
95qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
96{
97 struct Scsi_Host *host = bsg_job->shost;
98 scsi_qla_host_t *vha = shost_priv(host);
99 struct qla_hw_data *ha = vha->hw;
100 int ret = 0;
101 uint32_t len;
102 uint32_t oper;
103
104 bsg_job->reply->reply_payload_rcv_len = 0;
105
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +0500106 if (!IS_QLA24XX_TYPE(ha) || !IS_QLA25XX(ha)) {
107 ret = -EINVAL;
108 goto exit_fcp_prio_cfg;
109 }
110
Sarang Radke09ff7012010-03-19 17:03:59 -0700111 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
112 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
113 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
114 ret = -EBUSY;
115 goto exit_fcp_prio_cfg;
116 }
117
118 /* Get the sub command */
119 oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
120
121 /* Only set config is allowed if config memory is not allocated */
122 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
123 ret = -EINVAL;
124 goto exit_fcp_prio_cfg;
125 }
126 switch (oper) {
127 case QLFC_FCP_PRIO_DISABLE:
128 if (ha->flags.fcp_prio_enabled) {
129 ha->flags.fcp_prio_enabled = 0;
130 ha->fcp_prio_cfg->attributes &=
131 ~FCP_PRIO_ATTR_ENABLE;
132 qla24xx_update_all_fcp_prio(vha);
133 bsg_job->reply->result = DID_OK;
134 } else {
135 ret = -EINVAL;
136 bsg_job->reply->result = (DID_ERROR << 16);
137 goto exit_fcp_prio_cfg;
138 }
139 break;
140
141 case QLFC_FCP_PRIO_ENABLE:
142 if (!ha->flags.fcp_prio_enabled) {
143 if (ha->fcp_prio_cfg) {
144 ha->flags.fcp_prio_enabled = 1;
145 ha->fcp_prio_cfg->attributes |=
146 FCP_PRIO_ATTR_ENABLE;
147 qla24xx_update_all_fcp_prio(vha);
148 bsg_job->reply->result = DID_OK;
149 } else {
150 ret = -EINVAL;
151 bsg_job->reply->result = (DID_ERROR << 16);
152 goto exit_fcp_prio_cfg;
153 }
154 }
155 break;
156
157 case QLFC_FCP_PRIO_GET_CONFIG:
158 len = bsg_job->reply_payload.payload_len;
159 if (!len || len > FCP_PRIO_CFG_SIZE) {
160 ret = -EINVAL;
161 bsg_job->reply->result = (DID_ERROR << 16);
162 goto exit_fcp_prio_cfg;
163 }
164
165 bsg_job->reply->result = DID_OK;
166 bsg_job->reply->reply_payload_rcv_len =
167 sg_copy_from_buffer(
168 bsg_job->reply_payload.sg_list,
169 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
170 len);
171
172 break;
173
174 case QLFC_FCP_PRIO_SET_CONFIG:
175 len = bsg_job->request_payload.payload_len;
176 if (!len || len > FCP_PRIO_CFG_SIZE) {
177 bsg_job->reply->result = (DID_ERROR << 16);
178 ret = -EINVAL;
179 goto exit_fcp_prio_cfg;
180 }
181
182 if (!ha->fcp_prio_cfg) {
183 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
184 if (!ha->fcp_prio_cfg) {
185 qla_printk(KERN_WARNING, ha,
186 "Unable to allocate memory "
187 "for fcp prio config data (%x).\n",
188 FCP_PRIO_CFG_SIZE);
189 bsg_job->reply->result = (DID_ERROR << 16);
190 ret = -ENOMEM;
191 goto exit_fcp_prio_cfg;
192 }
193 }
194
195 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
196 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
197 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
198 FCP_PRIO_CFG_SIZE);
199
200 /* validate fcp priority data */
201 if (!qla24xx_fcp_prio_cfg_valid(
202 (struct qla_fcp_prio_cfg *)
203 ha->fcp_prio_cfg, 1)) {
204 bsg_job->reply->result = (DID_ERROR << 16);
205 ret = -EINVAL;
206 /* If buffer was invalidatic int
207 * fcp_prio_cfg is of no use
208 */
209 vfree(ha->fcp_prio_cfg);
210 ha->fcp_prio_cfg = NULL;
211 goto exit_fcp_prio_cfg;
212 }
213
214 ha->flags.fcp_prio_enabled = 0;
215 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
216 ha->flags.fcp_prio_enabled = 1;
217 qla24xx_update_all_fcp_prio(vha);
218 bsg_job->reply->result = DID_OK;
219 break;
220 default:
221 ret = -EINVAL;
222 break;
223 }
224exit_fcp_prio_cfg:
225 bsg_job->job_done(bsg_job);
226 return ret;
227}
Giridhar Malavali6e980162010-03-19 17:03:58 -0700228static int
229qla2x00_process_els(struct fc_bsg_job *bsg_job)
230{
231 struct fc_rport *rport;
232 fc_port_t *fcport;
233 struct Scsi_Host *host;
234 scsi_qla_host_t *vha;
235 struct qla_hw_data *ha;
236 srb_t *sp;
237 const char *type;
238 int req_sg_cnt, rsp_sg_cnt;
239 int rval = (DRIVER_ERROR << 16);
240 uint16_t nextlid = 0;
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700241 struct srb_ctx *els;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700242
243 /* Multiple SG's are not supported for ELS requests */
244 if (bsg_job->request_payload.sg_cnt > 1 ||
245 bsg_job->reply_payload.sg_cnt > 1) {
246 DEBUG2(printk(KERN_INFO
247 "multiple SG's are not supported for ELS requests"
248 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
249 bsg_job->request_payload.sg_cnt,
250 bsg_job->reply_payload.sg_cnt));
251 rval = -EPERM;
252 goto done;
253 }
254
255 /* ELS request for rport */
256 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
257 rport = bsg_job->rport;
258 fcport = *(fc_port_t **) rport->dd_data;
259 host = rport_to_shost(rport);
260 vha = shost_priv(host);
261 ha = vha->hw;
262 type = "FC_BSG_RPT_ELS";
263
264 /* make sure the rport is logged in,
265 * if not perform fabric login
266 */
267 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
268 DEBUG2(qla_printk(KERN_WARNING, ha,
269 "failed to login port %06X for ELS passthru\n",
270 fcport->d_id.b24));
271 rval = -EIO;
272 goto done;
273 }
274 } else {
275 host = bsg_job->shost;
276 vha = shost_priv(host);
277 ha = vha->hw;
278 type = "FC_BSG_HST_ELS_NOLOGIN";
279
280 /* Allocate a dummy fcport structure, since functions
281 * preparing the IOCB and mailbox command retrieves port
282 * specific information from fcport structure. For Host based
283 * ELS commands there will be no fcport structure allocated
284 */
285 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
286 if (!fcport) {
287 rval = -ENOMEM;
288 goto done;
289 }
290
291 /* Initialize all required fields of fcport */
292 fcport->vha = vha;
293 fcport->vp_idx = vha->vp_idx;
294 fcport->d_id.b.al_pa =
295 bsg_job->request->rqst_data.h_els.port_id[0];
296 fcport->d_id.b.area =
297 bsg_job->request->rqst_data.h_els.port_id[1];
298 fcport->d_id.b.domain =
299 bsg_job->request->rqst_data.h_els.port_id[2];
300 fcport->loop_id =
301 (fcport->d_id.b.al_pa == 0xFD) ?
302 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
303 }
304
305 if (!vha->flags.online) {
306 DEBUG2(qla_printk(KERN_WARNING, ha,
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700307 "host not online\n"));
Giridhar Malavali6e980162010-03-19 17:03:58 -0700308 rval = -EIO;
309 goto done;
310 }
311
312 req_sg_cnt =
313 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
314 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
315 if (!req_sg_cnt) {
316 rval = -ENOMEM;
317 goto done_free_fcport;
318 }
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700319
320 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
321 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700322 if (!rsp_sg_cnt) {
323 rval = -ENOMEM;
324 goto done_free_fcport;
325 }
326
327 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700328 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
Giridhar Malavali6e980162010-03-19 17:03:58 -0700329 DEBUG2(printk(KERN_INFO
330 "dma mapping resulted in different sg counts \
331 [request_sg_cnt: %x dma_request_sg_cnt: %x\
332 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
333 bsg_job->request_payload.sg_cnt, req_sg_cnt,
334 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
335 rval = -EAGAIN;
336 goto done_unmap_sg;
337 }
338
339 /* Alloc SRB structure */
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700340 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
Giridhar Malavali6e980162010-03-19 17:03:58 -0700341 if (!sp) {
342 rval = -ENOMEM;
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700343 goto done_unmap_sg;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700344 }
345
346 els = sp->ctx;
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700347 els->type =
Giridhar Malavali6e980162010-03-19 17:03:58 -0700348 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
349 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
Madhuranath Iyengar38222632010-05-04 15:01:29 -0700350 els->name =
351 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
352 "bsg_els_rpt" : "bsg_els_hst");
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700353 els->u.bsg_job = bsg_job;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700354
355 DEBUG2(qla_printk(KERN_INFO, ha,
356 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
357 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
358 bsg_job->request->rqst_data.h_els.command_code,
359 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
360 fcport->d_id.b.al_pa));
361
362 rval = qla2x00_start_sp(sp);
363 if (rval != QLA_SUCCESS) {
364 kfree(sp->ctx);
365 mempool_free(sp, ha->srb_mempool);
366 rval = -EIO;
367 goto done_unmap_sg;
368 }
369 return rval;
370
371done_unmap_sg:
372 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
373 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
374 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
375 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
376 goto done_free_fcport;
377
378done_free_fcport:
379 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
380 kfree(fcport);
381done:
382 return rval;
383}
384
385static int
386qla2x00_process_ct(struct fc_bsg_job *bsg_job)
387{
388 srb_t *sp;
389 struct Scsi_Host *host = bsg_job->shost;
390 scsi_qla_host_t *vha = shost_priv(host);
391 struct qla_hw_data *ha = vha->hw;
392 int rval = (DRIVER_ERROR << 16);
393 int req_sg_cnt, rsp_sg_cnt;
394 uint16_t loop_id;
395 struct fc_port *fcport;
396 char *type = "FC_BSG_HST_CT";
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700397 struct srb_ctx *ct;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700398
399 /* pass through is supported only for ISP 4Gb or higher */
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700400 if (!IS_FWI2_CAPABLE(ha)) {
Giridhar Malavali6e980162010-03-19 17:03:58 -0700401 DEBUG2(qla_printk(KERN_INFO, ha,
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700402 "scsi(%ld):Firmware is not capable to support FC "
403 "CT pass thru\n", vha->host_no));
Giridhar Malavali6e980162010-03-19 17:03:58 -0700404 rval = -EPERM;
405 goto done;
406 }
407
408 req_sg_cnt =
409 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
410 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700411 if (!req_sg_cnt) {
Giridhar Malavali6e980162010-03-19 17:03:58 -0700412 rval = -ENOMEM;
413 goto done;
414 }
415
416 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
417 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
418 if (!rsp_sg_cnt) {
419 rval = -ENOMEM;
420 goto done;
421 }
422
423 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700424 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
Giridhar Malavali6e980162010-03-19 17:03:58 -0700425 DEBUG2(qla_printk(KERN_WARNING, ha,
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700426 "[request_sg_cnt: %x dma_request_sg_cnt: %x\
427 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
428 bsg_job->request_payload.sg_cnt, req_sg_cnt,
429 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
Giridhar Malavali6e980162010-03-19 17:03:58 -0700430 rval = -EAGAIN;
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700431 goto done_unmap_sg;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700432 }
433
434 if (!vha->flags.online) {
435 DEBUG2(qla_printk(KERN_WARNING, ha,
436 "host not online\n"));
437 rval = -EIO;
438 goto done_unmap_sg;
439 }
440
441 loop_id =
442 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
443 >> 24;
444 switch (loop_id) {
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700445 case 0xFC:
446 loop_id = cpu_to_le16(NPH_SNS);
447 break;
448 case 0xFA:
449 loop_id = vha->mgmt_svr_loop_id;
450 break;
451 default:
452 DEBUG2(qla_printk(KERN_INFO, ha,
453 "Unknown loop id: %x\n", loop_id));
454 rval = -EINVAL;
455 goto done_unmap_sg;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700456 }
457
458 /* Allocate a dummy fcport structure, since functions preparing the
459 * IOCB and mailbox command retrieves port specific information
460 * from fcport structure. For Host based ELS commands there will be
461 * no fcport structure allocated
462 */
463 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700464 if (!fcport) {
Giridhar Malavali6e980162010-03-19 17:03:58 -0700465 rval = -ENOMEM;
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700466 goto done_unmap_sg;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700467 }
468
469 /* Initialize all required fields of fcport */
470 fcport->vha = vha;
471 fcport->vp_idx = vha->vp_idx;
472 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
473 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
474 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
475 fcport->loop_id = loop_id;
476
477 /* Alloc SRB structure */
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700478 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
Giridhar Malavali6e980162010-03-19 17:03:58 -0700479 if (!sp) {
480 rval = -ENOMEM;
481 goto done_free_fcport;
482 }
483
484 ct = sp->ctx;
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700485 ct->type = SRB_CT_CMD;
Madhuranath Iyengar38222632010-05-04 15:01:29 -0700486 ct->name = "bsg_ct";
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700487 ct->u.bsg_job = bsg_job;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700488
489 DEBUG2(qla_printk(KERN_INFO, ha,
490 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
491 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
492 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
493 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
494 fcport->d_id.b.al_pa));
495
496 rval = qla2x00_start_sp(sp);
497 if (rval != QLA_SUCCESS) {
498 kfree(sp->ctx);
499 mempool_free(sp, ha->srb_mempool);
500 rval = -EIO;
501 goto done_free_fcport;
502 }
503 return rval;
504
505done_free_fcport:
506 kfree(fcport);
507done_unmap_sg:
508 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
509 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
510 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
511 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
512done:
513 return rval;
514}
515
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700516/* Set the port configuration to enable the
517 * internal loopback on ISP81XX
518 */
519static inline int
520qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
521 uint16_t *new_config)
522{
523 int ret = 0;
524 int rval = 0;
525 struct qla_hw_data *ha = vha->hw;
526
527 if (!IS_QLA81XX(ha))
528 goto done_set_internal;
529
530 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
531 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
532
533 ha->notify_dcbx_comp = 1;
534 ret = qla81xx_set_port_config(vha, new_config);
535 if (ret != QLA_SUCCESS) {
536 DEBUG2(printk(KERN_ERR
537 "%s(%lu): Set port config failed\n",
538 __func__, vha->host_no));
539 ha->notify_dcbx_comp = 0;
540 rval = -EINVAL;
541 goto done_set_internal;
542 }
543
544 /* Wait for DCBX complete event */
545 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
546 DEBUG2(qla_printk(KERN_WARNING, ha,
547 "State change notificaition not received.\n"));
548 } else
549 DEBUG2(qla_printk(KERN_INFO, ha,
550 "State change RECEIVED\n"));
551
552 ha->notify_dcbx_comp = 0;
553
554done_set_internal:
555 return rval;
556}
557
558/* Set the port configuration to disable the
559 * internal loopback on ISP81XX
560 */
561static inline int
562qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
563 int wait)
564{
565 int ret = 0;
566 int rval = 0;
567 uint16_t new_config[4];
568 struct qla_hw_data *ha = vha->hw;
569
570 if (!IS_QLA81XX(ha))
571 goto done_reset_internal;
572
573 memset(new_config, 0 , sizeof(new_config));
574 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
575 ENABLE_INTERNAL_LOOPBACK) {
576 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
577 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
578
579 ha->notify_dcbx_comp = wait;
580 ret = qla81xx_set_port_config(vha, new_config);
581 if (ret != QLA_SUCCESS) {
582 DEBUG2(printk(KERN_ERR
583 "%s(%lu): Set port config failed\n",
584 __func__, vha->host_no));
585 ha->notify_dcbx_comp = 0;
586 rval = -EINVAL;
587 goto done_reset_internal;
588 }
589
590 /* Wait for DCBX complete event */
591 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
592 (20 * HZ))) {
593 DEBUG2(qla_printk(KERN_WARNING, ha,
594 "State change notificaition not received.\n"));
595 ha->notify_dcbx_comp = 0;
596 rval = -EINVAL;
597 goto done_reset_internal;
598 } else
599 DEBUG2(qla_printk(KERN_INFO, ha,
600 "State change RECEIVED\n"));
601
602 ha->notify_dcbx_comp = 0;
603 }
604done_reset_internal:
605 return rval;
606}
607
Giridhar Malavali6e980162010-03-19 17:03:58 -0700608static int
609qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
610{
611 struct Scsi_Host *host = bsg_job->shost;
612 scsi_qla_host_t *vha = shost_priv(host);
613 struct qla_hw_data *ha = vha->hw;
614 int rval;
615 uint8_t command_sent;
616 char *type;
617 struct msg_echo_lb elreq;
618 uint16_t response[MAILBOX_REGISTER_COUNT];
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700619 uint16_t config[4], new_config[4];
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700620 uint8_t *fw_sts_ptr;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700621 uint8_t *req_data = NULL;
622 dma_addr_t req_data_dma;
623 uint32_t req_data_len;
624 uint8_t *rsp_data = NULL;
625 dma_addr_t rsp_data_dma;
626 uint32_t rsp_data_len;
627
628 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
629 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
630 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
631 return -EBUSY;
632
633 if (!vha->flags.online) {
634 DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
635 return -EIO;
636 }
637
638 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
639 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
640 DMA_TO_DEVICE);
641
642 if (!elreq.req_sg_cnt)
643 return -ENOMEM;
644
645 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
646 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
647 DMA_FROM_DEVICE);
648
649 if (!elreq.rsp_sg_cnt) {
650 rval = -ENOMEM;
651 goto done_unmap_req_sg;
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700652 }
Giridhar Malavali6e980162010-03-19 17:03:58 -0700653
654 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
655 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
656 DEBUG2(printk(KERN_INFO
657 "dma mapping resulted in different sg counts "
658 "[request_sg_cnt: %x dma_request_sg_cnt: %x "
659 "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
660 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
661 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
662 rval = -EAGAIN;
663 goto done_unmap_sg;
664 }
665 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
666 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
667 &req_data_dma, GFP_KERNEL);
668 if (!req_data) {
669 DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
670 "failed for host=%lu\n", __func__, vha->host_no));
671 rval = -ENOMEM;
672 goto done_unmap_sg;
673 }
674
675 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
676 &rsp_data_dma, GFP_KERNEL);
677 if (!rsp_data) {
678 DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
679 "failed for host=%lu\n", __func__, vha->host_no));
680 rval = -ENOMEM;
681 goto done_free_dma_req;
682 }
683
684 /* Copy the request buffer in req_data now */
685 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
686 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
687
688 elreq.send_dma = req_data_dma;
689 elreq.rcv_dma = rsp_data_dma;
690 elreq.transfer_size = req_data_len;
691
692 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
693
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700694 if ((ha->current_topology == ISP_CFG_F ||
695 (IS_QLA81XX(ha) &&
696 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
697 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
698 elreq.options == EXTERNAL_LOOPBACK) {
Giridhar Malavali6e980162010-03-19 17:03:58 -0700699 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
700 DEBUG2(qla_printk(KERN_INFO, ha,
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700701 "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
Giridhar Malavali6e980162010-03-19 17:03:58 -0700702 command_sent = INT_DEF_LB_ECHO_CMD;
703 rval = qla2x00_echo_test(vha, &elreq, response);
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700704 } else {
705 if (IS_QLA81XX(ha)) {
706 memset(config, 0, sizeof(config));
707 memset(new_config, 0, sizeof(new_config));
708 if (qla81xx_get_port_config(vha, config)) {
709 DEBUG2(printk(KERN_ERR
710 "%s(%lu): Get port config failed\n",
711 __func__, vha->host_no));
712 bsg_job->reply->reply_payload_rcv_len = 0;
713 bsg_job->reply->result = (DID_ERROR << 16);
714 rval = -EPERM;
715 goto done_free_dma_req;
716 }
717
718 if (elreq.options != EXTERNAL_LOOPBACK) {
719 DEBUG2(qla_printk(KERN_INFO, ha,
720 "Internal: current port config = %x\n",
721 config[0]));
722 if (qla81xx_set_internal_loopback(vha, config,
723 new_config)) {
724 bsg_job->reply->reply_payload_rcv_len =
725 0;
726 bsg_job->reply->result =
727 (DID_ERROR << 16);
728 rval = -EPERM;
729 goto done_free_dma_req;
730 }
731 } else {
732 /* For external loopback to work
733 * ensure internal loopback is disabled
734 */
735 if (qla81xx_reset_internal_loopback(vha,
736 config, 1)) {
737 bsg_job->reply->reply_payload_rcv_len =
738 0;
739 bsg_job->reply->result =
740 (DID_ERROR << 16);
741 rval = -EPERM;
742 goto done_free_dma_req;
743 }
744 }
745
746 type = "FC_BSG_HST_VENDOR_LOOPBACK";
747 DEBUG2(qla_printk(KERN_INFO, ha,
748 "scsi(%ld) bsg rqst type: %s\n",
749 vha->host_no, type));
750
751 command_sent = INT_DEF_LB_LOOPBACK_CMD;
752 rval = qla2x00_loopback_test(vha, &elreq, response);
753
754 if (new_config[1]) {
755 /* Revert back to original port config
756 * Also clear internal loopback
757 */
758 qla81xx_reset_internal_loopback(vha,
759 new_config, 0);
760 }
761
762 if (response[0] == MBS_COMMAND_ERROR &&
763 response[1] == MBS_LB_RESET) {
764 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
765 "ISP\n", __func__, vha->host_no));
766 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
767 qla2xxx_wake_dpc(vha);
768 qla2x00_wait_for_chip_reset(vha);
769 /* Also reset the MPI */
770 if (qla81xx_restart_mpi_firmware(vha) !=
771 QLA_SUCCESS) {
772 qla_printk(KERN_INFO, ha,
773 "MPI reset failed for host%ld.\n",
774 vha->host_no);
775 }
776
777 bsg_job->reply->reply_payload_rcv_len = 0;
778 bsg_job->reply->result = (DID_ERROR << 16);
779 rval = -EIO;
780 goto done_free_dma_req;
781 }
782 } else {
783 type = "FC_BSG_HST_VENDOR_LOOPBACK";
784 DEBUG2(qla_printk(KERN_INFO, ha,
785 "scsi(%ld) bsg rqst type: %s\n",
786 vha->host_no, type));
787 command_sent = INT_DEF_LB_LOOPBACK_CMD;
788 rval = qla2x00_loopback_test(vha, &elreq, response);
789 }
Giridhar Malavali6e980162010-03-19 17:03:58 -0700790 }
791
792 if (rval) {
793 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700794 "request %s failed\n", vha->host_no, type));
Giridhar Malavali6e980162010-03-19 17:03:58 -0700795
796 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700797 sizeof(struct fc_bsg_reply);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700798
799 memcpy(fw_sts_ptr, response, sizeof(response));
800 fw_sts_ptr += sizeof(response);
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700801 *fw_sts_ptr = command_sent;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700802 rval = 0;
803 bsg_job->reply->reply_payload_rcv_len = 0;
804 bsg_job->reply->result = (DID_ERROR << 16);
805 } else {
806 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
807 "request %s completed\n", vha->host_no, type));
808
809 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
810 sizeof(response) + sizeof(uint8_t);
811 bsg_job->reply->reply_payload_rcv_len =
812 bsg_job->reply_payload.payload_len;
813 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
814 sizeof(struct fc_bsg_reply);
815 memcpy(fw_sts_ptr, response, sizeof(response));
816 fw_sts_ptr += sizeof(response);
817 *fw_sts_ptr = command_sent;
818 bsg_job->reply->result = DID_OK;
819 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
820 bsg_job->reply_payload.sg_cnt, rsp_data,
821 rsp_data_len);
822 }
823 bsg_job->job_done(bsg_job);
824
825 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
826 rsp_data, rsp_data_dma);
827done_free_dma_req:
828 dma_free_coherent(&ha->pdev->dev, req_data_len,
829 req_data, req_data_dma);
830done_unmap_sg:
831 dma_unmap_sg(&ha->pdev->dev,
832 bsg_job->reply_payload.sg_list,
833 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
834done_unmap_req_sg:
835 dma_unmap_sg(&ha->pdev->dev,
836 bsg_job->request_payload.sg_list,
837 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700838 return rval;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700839}
840
841static int
842qla84xx_reset(struct fc_bsg_job *bsg_job)
843{
844 struct Scsi_Host *host = bsg_job->shost;
845 scsi_qla_host_t *vha = shost_priv(host);
846 struct qla_hw_data *ha = vha->hw;
847 int rval = 0;
848 uint32_t flag;
849
850 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
851 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
852 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
853 return -EBUSY;
854
855 if (!IS_QLA84XX(ha)) {
856 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
857 "exiting.\n", vha->host_no));
858 return -EINVAL;
859 }
860
861 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
862
863 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
864
865 if (rval) {
866 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
867 "request 84xx reset failed\n", vha->host_no));
868 rval = bsg_job->reply->reply_payload_rcv_len = 0;
869 bsg_job->reply->result = (DID_ERROR << 16);
870
871 } else {
872 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
873 "request 84xx reset completed\n", vha->host_no));
874 bsg_job->reply->result = DID_OK;
875 }
876
877 bsg_job->job_done(bsg_job);
878 return rval;
879}
880
881static int
882qla84xx_updatefw(struct fc_bsg_job *bsg_job)
883{
884 struct Scsi_Host *host = bsg_job->shost;
885 scsi_qla_host_t *vha = shost_priv(host);
886 struct qla_hw_data *ha = vha->hw;
887 struct verify_chip_entry_84xx *mn = NULL;
888 dma_addr_t mn_dma, fw_dma;
889 void *fw_buf = NULL;
890 int rval = 0;
891 uint32_t sg_cnt;
892 uint32_t data_len;
893 uint16_t options;
894 uint32_t flag;
895 uint32_t fw_ver;
896
897 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
898 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
899 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
900 return -EBUSY;
901
902 if (!IS_QLA84XX(ha)) {
903 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
904 "exiting.\n", vha->host_no));
905 return -EINVAL;
906 }
907
908 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
909 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
910 if (!sg_cnt)
911 return -ENOMEM;
912
913 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
914 DEBUG2(printk(KERN_INFO
915 "dma mapping resulted in different sg counts "
916 "request_sg_cnt: %x dma_request_sg_cnt: %x ",
917 bsg_job->request_payload.sg_cnt, sg_cnt));
918 rval = -EAGAIN;
919 goto done_unmap_sg;
920 }
921
922 data_len = bsg_job->request_payload.payload_len;
923 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
924 &fw_dma, GFP_KERNEL);
925 if (!fw_buf) {
926 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
927 "failed for host=%lu\n", __func__, vha->host_no));
928 rval = -ENOMEM;
929 goto done_unmap_sg;
930 }
931
932 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
933 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
934
935 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
936 if (!mn) {
937 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
938 "failed for host=%lu\n", __func__, vha->host_no));
939 rval = -ENOMEM;
940 goto done_free_fw_buf;
941 }
942
943 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
944 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
945
946 memset(mn, 0, sizeof(struct access_chip_84xx));
947 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
948 mn->entry_count = 1;
949
950 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
951 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
952 options |= VCO_DIAG_FW;
953
954 mn->options = cpu_to_le16(options);
955 mn->fw_ver = cpu_to_le32(fw_ver);
956 mn->fw_size = cpu_to_le32(data_len);
957 mn->fw_seq_size = cpu_to_le32(data_len);
958 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
959 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
960 mn->dseg_length = cpu_to_le32(data_len);
961 mn->data_seg_cnt = cpu_to_le16(1);
962
963 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
964
965 if (rval) {
966 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
967 "request 84xx updatefw failed\n", vha->host_no));
968
969 rval = bsg_job->reply->reply_payload_rcv_len = 0;
970 bsg_job->reply->result = (DID_ERROR << 16);
971
972 } else {
973 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
974 "request 84xx updatefw completed\n", vha->host_no));
975
976 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
977 bsg_job->reply->result = DID_OK;
978 }
979
980 bsg_job->job_done(bsg_job);
981 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
982
983done_free_fw_buf:
984 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
985
986done_unmap_sg:
987 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
988 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
989
990 return rval;
991}
992
993static int
994qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
995{
996 struct Scsi_Host *host = bsg_job->shost;
997 scsi_qla_host_t *vha = shost_priv(host);
998 struct qla_hw_data *ha = vha->hw;
999 struct access_chip_84xx *mn = NULL;
1000 dma_addr_t mn_dma, mgmt_dma;
1001 void *mgmt_b = NULL;
1002 int rval = 0;
1003 struct qla_bsg_a84_mgmt *ql84_mgmt;
1004 uint32_t sg_cnt;
Harish Zunjarraod5459082010-03-19 17:04:00 -07001005 uint32_t data_len = 0;
Giridhar Malavali6e980162010-03-19 17:03:58 -07001006 uint32_t dma_direction = DMA_NONE;
1007
1008 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1009 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1010 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
1011 return -EBUSY;
1012
1013 if (!IS_QLA84XX(ha)) {
1014 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
1015 "exiting.\n", vha->host_no));
1016 return -EINVAL;
1017 }
1018
1019 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1020 sizeof(struct fc_bsg_request));
1021 if (!ql84_mgmt) {
1022 DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
1023 __func__, vha->host_no));
1024 return -EINVAL;
1025 }
1026
1027 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1028 if (!mn) {
1029 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
1030 "failed for host=%lu\n", __func__, vha->host_no));
1031 return -ENOMEM;
1032 }
1033
1034 memset(mn, 0, sizeof(struct access_chip_84xx));
1035 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1036 mn->entry_count = 1;
1037
1038 switch (ql84_mgmt->mgmt.cmd) {
1039 case QLA84_MGMT_READ_MEM:
1040 case QLA84_MGMT_GET_INFO:
1041 sg_cnt = dma_map_sg(&ha->pdev->dev,
1042 bsg_job->reply_payload.sg_list,
1043 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1044 if (!sg_cnt) {
1045 rval = -ENOMEM;
1046 goto exit_mgmt;
1047 }
1048
1049 dma_direction = DMA_FROM_DEVICE;
1050
1051 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1052 DEBUG2(printk(KERN_INFO
1053 "dma mapping resulted in different sg counts "
1054 "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
1055 bsg_job->reply_payload.sg_cnt, sg_cnt));
1056 rval = -EAGAIN;
1057 goto done_unmap_sg;
1058 }
1059
1060 data_len = bsg_job->reply_payload.payload_len;
1061
1062 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1063 &mgmt_dma, GFP_KERNEL);
1064 if (!mgmt_b) {
1065 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
1066 "failed for host=%lu\n",
1067 __func__, vha->host_no));
1068 rval = -ENOMEM;
1069 goto done_unmap_sg;
1070 }
1071
1072 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1073 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1074 mn->parameter1 =
1075 cpu_to_le32(
1076 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1077
1078 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1079 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1080 mn->parameter1 =
1081 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1082
1083 mn->parameter2 =
1084 cpu_to_le32(
1085 ql84_mgmt->mgmt.mgmtp.u.info.context);
1086 }
1087 break;
1088
1089 case QLA84_MGMT_WRITE_MEM:
1090 sg_cnt = dma_map_sg(&ha->pdev->dev,
1091 bsg_job->request_payload.sg_list,
1092 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1093
1094 if (!sg_cnt) {
1095 rval = -ENOMEM;
1096 goto exit_mgmt;
1097 }
1098
1099 dma_direction = DMA_TO_DEVICE;
1100
1101 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1102 DEBUG2(printk(KERN_INFO
1103 "dma mapping resulted in different sg counts "
1104 "request_sg_cnt: %x dma_request_sg_cnt: %x ",
1105 bsg_job->request_payload.sg_cnt, sg_cnt));
1106 rval = -EAGAIN;
1107 goto done_unmap_sg;
1108 }
1109
1110 data_len = bsg_job->request_payload.payload_len;
1111 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1112 &mgmt_dma, GFP_KERNEL);
1113 if (!mgmt_b) {
1114 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
1115 "failed for host=%lu\n",
1116 __func__, vha->host_no));
1117 rval = -ENOMEM;
1118 goto done_unmap_sg;
1119 }
1120
1121 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1122 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1123
1124 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1125 mn->parameter1 =
1126 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1127 break;
1128
1129 case QLA84_MGMT_CHNG_CONFIG:
1130 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1131 mn->parameter1 =
1132 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1133
1134 mn->parameter2 =
1135 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1136
1137 mn->parameter3 =
1138 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1139 break;
1140
1141 default:
1142 rval = -EIO;
1143 goto exit_mgmt;
1144 }
1145
1146 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1147 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1148 mn->dseg_count = cpu_to_le16(1);
1149 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1150 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1151 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1152 }
1153
1154 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1155
1156 if (rval) {
1157 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
1158 "request 84xx mgmt failed\n", vha->host_no));
1159
1160 rval = bsg_job->reply->reply_payload_rcv_len = 0;
1161 bsg_job->reply->result = (DID_ERROR << 16);
1162
1163 } else {
1164 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
1165 "request 84xx mgmt completed\n", vha->host_no));
1166
1167 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1168 bsg_job->reply->result = DID_OK;
1169
1170 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1171 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1172 bsg_job->reply->reply_payload_rcv_len =
1173 bsg_job->reply_payload.payload_len;
1174
1175 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
Andrew Vasquez6c452a42010-03-19 17:04:02 -07001176 bsg_job->reply_payload.sg_cnt, mgmt_b,
1177 data_len);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001178 }
1179 }
1180
1181 bsg_job->job_done(bsg_job);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001182
1183done_unmap_sg:
Harish Zunjarraod5459082010-03-19 17:04:00 -07001184 if (mgmt_b)
1185 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1186
Giridhar Malavali6e980162010-03-19 17:03:58 -07001187 if (dma_direction == DMA_TO_DEVICE)
1188 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1189 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1190 else if (dma_direction == DMA_FROM_DEVICE)
1191 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1192 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1193
1194exit_mgmt:
1195 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1196
1197 return rval;
1198}
1199
1200static int
1201qla24xx_iidma(struct fc_bsg_job *bsg_job)
1202{
1203 struct Scsi_Host *host = bsg_job->shost;
1204 scsi_qla_host_t *vha = shost_priv(host);
1205 struct qla_hw_data *ha = vha->hw;
1206 int rval = 0;
1207 struct qla_port_param *port_param = NULL;
1208 fc_port_t *fcport = NULL;
1209 uint16_t mb[MAILBOX_REGISTER_COUNT];
1210 uint8_t *rsp_ptr = NULL;
1211
1212 bsg_job->reply->reply_payload_rcv_len = 0;
1213
1214 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1215 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1216 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
1217 return -EBUSY;
1218
1219 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1220 DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
1221 "supported\n", __func__, vha->host_no));
1222 return -EINVAL;
1223 }
1224
1225 port_param = (struct qla_port_param *)((char *)bsg_job->request +
1226 sizeof(struct fc_bsg_request));
1227 if (!port_param) {
1228 DEBUG2(printk("%s(%ld): port_param header not provided, "
1229 "exiting.\n", __func__, vha->host_no));
1230 return -EINVAL;
1231 }
1232
1233 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1234 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
1235 __func__, vha->host_no));
1236 return -EINVAL;
1237 }
1238
1239 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1240 if (fcport->port_type != FCT_TARGET)
1241 continue;
1242
1243 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1244 fcport->port_name, sizeof(fcport->port_name)))
1245 continue;
1246 break;
1247 }
1248
1249 if (!fcport) {
1250 DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
1251 __func__, vha->host_no));
1252 return -EINVAL;
1253 }
1254
Madhuranath Iyengar17cf2c52010-07-23 15:28:22 +05001255 if (fcport->loop_id == FC_NO_LOOP_ID) {
1256 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid port loop id, "
1257 "loop_id = 0x%x\n",
1258 __func__, vha->host_no, fcport->loop_id));
1259 return -EINVAL;
1260 }
1261
Madhuranath Iyengar9a15eb42010-07-23 15:28:31 +05001262 if (fcport->flags & FCF_LOGIN_NEEDED) {
1263 DEBUG2(printk(KERN_ERR "%s(%ld): Remote port not logged in, "
1264 "flags = 0x%x\n",
1265 __func__, vha->host_no, fcport->flags));
1266 return -EINVAL;
1267 }
1268
Giridhar Malavali6e980162010-03-19 17:03:58 -07001269 if (port_param->mode)
1270 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1271 port_param->speed, mb);
1272 else
1273 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1274 &port_param->speed, mb);
1275
1276 if (rval) {
1277 DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
Andrew Vasquez6c452a42010-03-19 17:04:02 -07001278 "%02x%02x%02x%02x%02x%02x%02x%02x -- "
1279 "%04x %x %04x %04x.\n",
Giridhar Malavali6e980162010-03-19 17:03:58 -07001280 vha->host_no, fcport->port_name[0],
1281 fcport->port_name[1],
1282 fcport->port_name[2], fcport->port_name[3],
1283 fcport->port_name[4], fcport->port_name[5],
1284 fcport->port_name[6], fcport->port_name[7], rval,
1285 fcport->fp_speed, mb[0], mb[1]));
1286 rval = 0;
1287 bsg_job->reply->result = (DID_ERROR << 16);
1288
1289 } else {
1290 if (!port_param->mode) {
1291 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1292 sizeof(struct qla_port_param);
1293
1294 rsp_ptr = ((uint8_t *)bsg_job->reply) +
1295 sizeof(struct fc_bsg_reply);
1296
1297 memcpy(rsp_ptr, port_param,
1298 sizeof(struct qla_port_param));
1299 }
1300
1301 bsg_job->reply->result = DID_OK;
1302 }
1303
1304 bsg_job->job_done(bsg_job);
1305 return rval;
1306}
1307
1308static int
1309qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1310{
1311 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1312 case QL_VND_LOOPBACK:
1313 return qla2x00_process_loopback(bsg_job);
1314
1315 case QL_VND_A84_RESET:
1316 return qla84xx_reset(bsg_job);
1317
1318 case QL_VND_A84_UPDATE_FW:
1319 return qla84xx_updatefw(bsg_job);
1320
1321 case QL_VND_A84_MGMT_CMD:
1322 return qla84xx_mgmt_cmd(bsg_job);
1323
1324 case QL_VND_IIDMA:
1325 return qla24xx_iidma(bsg_job);
1326
Sarang Radke09ff7012010-03-19 17:03:59 -07001327 case QL_VND_FCP_PRIO_CFG_CMD:
1328 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1329
Giridhar Malavali6e980162010-03-19 17:03:58 -07001330 default:
1331 bsg_job->reply->result = (DID_ERROR << 16);
1332 bsg_job->job_done(bsg_job);
1333 return -ENOSYS;
1334 }
1335}
1336
1337int
1338qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1339{
1340 int ret = -EINVAL;
1341
1342 switch (bsg_job->request->msgcode) {
1343 case FC_BSG_RPT_ELS:
1344 case FC_BSG_HST_ELS_NOLOGIN:
1345 ret = qla2x00_process_els(bsg_job);
1346 break;
1347 case FC_BSG_HST_CT:
1348 ret = qla2x00_process_ct(bsg_job);
1349 break;
1350 case FC_BSG_HST_VENDOR:
1351 ret = qla2x00_process_vendor_specific(bsg_job);
1352 break;
1353 case FC_BSG_HST_ADD_RPORT:
1354 case FC_BSG_HST_DEL_RPORT:
1355 case FC_BSG_RPT_CT:
1356 default:
1357 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
1358 break;
Andrew Vasquez6c452a42010-03-19 17:04:02 -07001359 }
Giridhar Malavali6e980162010-03-19 17:03:58 -07001360 return ret;
1361}
1362
1363int
1364qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1365{
1366 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1367 struct qla_hw_data *ha = vha->hw;
1368 srb_t *sp;
1369 int cnt, que;
1370 unsigned long flags;
1371 struct req_que *req;
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001372 struct srb_ctx *sp_bsg;
Giridhar Malavali6e980162010-03-19 17:03:58 -07001373
1374 /* find the bsg job from the active list of commands */
1375 spin_lock_irqsave(&ha->hardware_lock, flags);
1376 for (que = 0; que < ha->max_req_queues; que++) {
1377 req = ha->req_q_map[que];
1378 if (!req)
1379 continue;
1380
Andrew Vasquez6c452a42010-03-19 17:04:02 -07001381 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
Giridhar Malavali6e980162010-03-19 17:03:58 -07001382 sp = req->outstanding_cmds[cnt];
Giridhar Malavali6e980162010-03-19 17:03:58 -07001383 if (sp) {
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001384 sp_bsg = sp->ctx;
Giridhar Malavali6e980162010-03-19 17:03:58 -07001385
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001386 if (((sp_bsg->type == SRB_CT_CMD) ||
1387 (sp_bsg->type == SRB_ELS_CMD_HST))
1388 && (sp_bsg->u.bsg_job == bsg_job)) {
Giridhar Malavali6e980162010-03-19 17:03:58 -07001389 if (ha->isp_ops->abort_command(sp)) {
1390 DEBUG2(qla_printk(KERN_INFO, ha,
Andrew Vasquez6c452a42010-03-19 17:04:02 -07001391 "scsi(%ld): mbx "
1392 "abort_command failed\n",
1393 vha->host_no));
Giridhar Malavali6e980162010-03-19 17:03:58 -07001394 bsg_job->req->errors =
1395 bsg_job->reply->result = -EIO;
1396 } else {
1397 DEBUG2(qla_printk(KERN_INFO, ha,
Andrew Vasquez6c452a42010-03-19 17:04:02 -07001398 "scsi(%ld): mbx "
1399 "abort_command success\n",
1400 vha->host_no));
Giridhar Malavali6e980162010-03-19 17:03:58 -07001401 bsg_job->req->errors =
1402 bsg_job->reply->result = 0;
1403 }
1404 goto done;
1405 }
1406 }
1407 }
1408 }
1409 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1410 DEBUG2(qla_printk(KERN_INFO, ha,
1411 "scsi(%ld) SRB not found to abort\n", vha->host_no));
1412 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1413 return 0;
1414
1415done:
1416 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1417 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1418 kfree(sp->fcport);
1419 kfree(sp->ctx);
1420 mempool_free(sp, ha->srb_mempool);
1421 return 0;
1422}