blob: d479f144420413afbbeabc3d970d7d376ac46ad7 [file] [log] [blame]
Krishna Gudipatib85daaf2011-06-13 15:55:11 -07001/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <linux/uaccess.h>
19#include "bfad_drv.h"
20#include "bfad_im.h"
21#include "bfad_bsg.h"
22
23BFA_TRC_FILE(LDRV, BSG);
24
25/* bfad_im_bsg_get_kobject - increment the bfa refcnt */
26static void
27bfad_im_bsg_get_kobject(struct fc_bsg_job *job)
28{
29 struct Scsi_Host *shost = job->shost;
30 unsigned long flags;
31
32 spin_lock_irqsave(shost->host_lock, flags);
33 __module_get(shost->dma_dev->driver->owner);
34 spin_unlock_irqrestore(shost->host_lock, flags);
35}
36
37/* bfad_im_bsg_put_kobject - decrement the bfa refcnt */
38static void
39bfad_im_bsg_put_kobject(struct fc_bsg_job *job)
40{
41 struct Scsi_Host *shost = job->shost;
42 unsigned long flags;
43
44 spin_lock_irqsave(shost->host_lock, flags);
45 module_put(shost->dma_dev->driver->owner);
46 spin_unlock_irqrestore(shost->host_lock, flags);
47}
48
49static int
50bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
51{
52 int i;
53 struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
54 struct bfad_im_port_s *im_port;
55 struct bfa_port_attr_s pattr;
56 unsigned long flags;
57
58 spin_lock_irqsave(&bfad->bfad_lock, flags);
59 bfa_fcport_get_attr(&bfad->bfa, &pattr);
60 iocmd->nwwn = pattr.nwwn;
61 iocmd->pwwn = pattr.pwwn;
62 iocmd->ioc_type = bfa_get_type(&bfad->bfa);
63 iocmd->mac = bfa_get_mac(&bfad->bfa);
64 iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
65 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
66 iocmd->factorynwwn = pattr.factorynwwn;
67 iocmd->factorypwwn = pattr.factorypwwn;
68 im_port = bfad->pport.im_port;
69 iocmd->host = im_port->shost->host_no;
70 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
71
72 strcpy(iocmd->name, bfad->adapter_name);
73 strcpy(iocmd->port_name, bfad->port_name);
74 strcpy(iocmd->hwpath, bfad->pci_name);
75
76 /* set adapter hw path */
77 strcpy(iocmd->adapter_hwpath, bfad->pci_name);
78 i = strlen(iocmd->adapter_hwpath) - 1;
79 while (iocmd->adapter_hwpath[i] != '.')
80 i--;
81 iocmd->adapter_hwpath[i] = '\0';
82 iocmd->status = BFA_STATUS_OK;
83 return 0;
84}
85
86static int
87bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
88{
89 struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
90 unsigned long flags;
91
92 spin_lock_irqsave(&bfad->bfad_lock, flags);
93 bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
94 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
95
96 /* fill in driver attr info */
97 strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
98 strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
99 BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
100 strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
101 iocmd->ioc_attr.adapter_attr.fw_ver);
102 strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
103 iocmd->ioc_attr.adapter_attr.optrom_ver);
104
105 /* copy chip rev info first otherwise it will be overwritten */
106 memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
107 sizeof(bfad->pci_attr.chip_rev));
108 memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
109 sizeof(struct bfa_ioc_pci_attr_s));
110
111 iocmd->status = BFA_STATUS_OK;
112 return 0;
113}
114
115static int
116bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
117{
118 struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
119 struct bfa_lport_attr_s port_attr;
120 unsigned long flags;
121
122 spin_lock_irqsave(&bfad->bfad_lock, flags);
123 bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
124 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
125 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
126
127 if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
128 iocmd->attr.pid = port_attr.pid;
129 else
130 iocmd->attr.pid = 0;
131
132 iocmd->attr.port_type = port_attr.port_type;
133 iocmd->attr.loopback = port_attr.loopback;
134 iocmd->attr.authfail = port_attr.authfail;
135 strncpy(iocmd->attr.port_symname.symname,
136 port_attr.port_cfg.sym_name.symname,
137 sizeof(port_attr.port_cfg.sym_name.symname));
138
139 iocmd->status = BFA_STATUS_OK;
140 return 0;
141}
142
143static int
144bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
145{
146 struct bfa_fcs_lport_s *fcs_port;
147 struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
148 unsigned long flags;
149
150 spin_lock_irqsave(&bfad->bfad_lock, flags);
151 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
152 iocmd->vf_id, iocmd->pwwn);
153 if (fcs_port == NULL) {
154 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
155 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
156 goto out;
157 }
158
159 bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
160 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
161 iocmd->status = BFA_STATUS_OK;
162out:
163 return 0;
164}
165
166static int
167bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
168{
169 struct bfa_bsg_rport_scsi_addr_s *iocmd =
170 (struct bfa_bsg_rport_scsi_addr_s *)cmd;
171 struct bfa_fcs_lport_s *fcs_port;
172 struct bfa_fcs_itnim_s *fcs_itnim;
173 struct bfad_itnim_s *drv_itnim;
174 unsigned long flags;
175
176 spin_lock_irqsave(&bfad->bfad_lock, flags);
177 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
178 iocmd->vf_id, iocmd->pwwn);
179 if (fcs_port == NULL) {
180 bfa_trc(bfad, 0);
181 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
182 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
183 goto out;
184 }
185
186 fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
187 if (fcs_itnim == NULL) {
188 bfa_trc(bfad, 0);
189 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
190 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
191 goto out;
192 }
193
194 drv_itnim = fcs_itnim->itnim_drv;
195
196 if (drv_itnim && drv_itnim->im_port)
197 iocmd->host = drv_itnim->im_port->shost->host_no;
198 else {
199 bfa_trc(bfad, 0);
200 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
201 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
202 goto out;
203 }
204
205 iocmd->target = drv_itnim->scsi_tgt_id;
206 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
207
208 iocmd->bus = 0;
209 iocmd->lun = 0;
210 iocmd->status = BFA_STATUS_OK;
211out:
212 return 0;
213}
214
215static int
216bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
217 unsigned int payload_len)
218{
219 struct bfa_bsg_fabric_get_lports_s *iocmd =
220 (struct bfa_bsg_fabric_get_lports_s *)cmd;
221 bfa_fcs_vf_t *fcs_vf;
222 uint32_t nports = iocmd->nports;
223 unsigned long flags;
224 void *iocmd_bufptr;
225
226 if (nports == 0) {
227 iocmd->status = BFA_STATUS_EINVAL;
228 goto out;
229 }
230
231 if (bfad_chk_iocmd_sz(payload_len,
232 sizeof(struct bfa_bsg_fabric_get_lports_s),
233 sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
234 iocmd->status = BFA_STATUS_VERSION_FAIL;
235 goto out;
236 }
237
238 iocmd_bufptr = (char *)iocmd +
239 sizeof(struct bfa_bsg_fabric_get_lports_s);
240
241 spin_lock_irqsave(&bfad->bfad_lock, flags);
242 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
243 if (fcs_vf == NULL) {
244 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
245 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
246 goto out;
247 }
248 bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
249 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
250
251 iocmd->nports = nports;
252 iocmd->status = BFA_STATUS_OK;
253out:
254 return 0;
255}
256
257static int
258bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
259{
260 struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
261 struct bfa_fcs_lport_s *fcs_port;
262 unsigned long flags;
263
264 spin_lock_irqsave(&bfad->bfad_lock, flags);
265 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
266 iocmd->vf_id, iocmd->lpwwn);
267 if (!fcs_port)
268 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
269 else
270 iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
271 iocmd->rpwwn, &iocmd->attr);
272 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
273 return 0;
274}
275
276static int
277bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
278 unsigned int payload_len)
279{
280 int rc = EINVAL;
281
282 switch (cmd) {
283 case IOCMD_IOC_GET_INFO:
284 rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
285 break;
286 case IOCMD_IOC_GET_ATTR:
287 rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
288 break;
289 case IOCMD_PORT_GET_ATTR:
290 rc = bfad_iocmd_port_get_attr(bfad, iocmd);
291 break;
292 case IOCMD_LPORT_GET_ATTR:
293 rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
294 break;
295 case IOCMD_RPORT_GET_ADDR:
296 rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
297 break;
298 case IOCMD_FABRIC_GET_LPORTS:
299 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
300 break;
301 case IOCMD_ITNIM_GET_ATTR:
302 rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
303 break;
304 default:
305 rc = EINVAL;
306 break;
307 }
308 return -rc;
309}
310
311static int
312bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
313{
314 uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
315 struct bfad_im_port_s *im_port =
316 (struct bfad_im_port_s *) job->shost->hostdata[0];
317 struct bfad_s *bfad = im_port->bfad;
318 void *payload_kbuf;
319 int rc = -EINVAL;
320
321 /* Allocate a temp buffer to hold the passed in user space command */
322 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
323 if (!payload_kbuf) {
324 rc = -ENOMEM;
325 goto out;
326 }
327
328 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
329 sg_copy_to_buffer(job->request_payload.sg_list,
330 job->request_payload.sg_cnt, payload_kbuf,
331 job->request_payload.payload_len);
332
333 /* Invoke IOCMD handler - to handle all the vendor command requests */
334 rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
335 job->request_payload.payload_len);
336 if (rc != BFA_STATUS_OK)
337 goto error;
338
339 /* Copy the response data to the job->reply_payload sg_list */
340 sg_copy_from_buffer(job->reply_payload.sg_list,
341 job->reply_payload.sg_cnt,
342 payload_kbuf,
343 job->reply_payload.payload_len);
344
345 /* free the command buffer */
346 kfree(payload_kbuf);
347
348 /* Fill the BSG job reply data */
349 job->reply_len = job->reply_payload.payload_len;
350 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
351 job->reply->result = rc;
352
353 job->job_done(job);
354 return rc;
355error:
356 /* free the command buffer */
357 kfree(payload_kbuf);
358out:
359 job->reply->result = rc;
360 job->reply_len = sizeof(uint32_t);
361 job->reply->reply_payload_rcv_len = 0;
362 return rc;
363}
364
365/* FC passthru call backs */
366u64
367bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
368{
369 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
370 struct bfa_sge_s *sge;
371 u64 addr;
372
373 sge = drv_fcxp->req_sge + sgeid;
374 addr = (u64)(size_t) sge->sg_addr;
375 return addr;
376}
377
378u32
379bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
380{
381 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
382 struct bfa_sge_s *sge;
383
384 sge = drv_fcxp->req_sge + sgeid;
385 return sge->sg_len;
386}
387
388u64
389bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
390{
391 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
392 struct bfa_sge_s *sge;
393 u64 addr;
394
395 sge = drv_fcxp->rsp_sge + sgeid;
396 addr = (u64)(size_t) sge->sg_addr;
397 return addr;
398}
399
400u32
401bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
402{
403 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
404 struct bfa_sge_s *sge;
405
406 sge = drv_fcxp->rsp_sge + sgeid;
407 return sge->sg_len;
408}
409
410void
411bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
412 bfa_status_t req_status, u32 rsp_len, u32 resid_len,
413 struct fchs_s *rsp_fchs)
414{
415 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
416
417 drv_fcxp->req_status = req_status;
418 drv_fcxp->rsp_len = rsp_len;
419
420 /* bfa_fcxp will be automatically freed by BFA */
421 drv_fcxp->bfa_fcxp = NULL;
422 complete(&drv_fcxp->comp);
423}
424
425struct bfad_buf_info *
426bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
427 uint32_t payload_len, uint32_t *num_sgles)
428{
429 struct bfad_buf_info *buf_base, *buf_info;
430 struct bfa_sge_s *sg_table;
431 int sge_num = 1;
432
433 buf_base = kzalloc((sizeof(struct bfad_buf_info) +
434 sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
435 if (!buf_base)
436 return NULL;
437
438 sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
439 (sizeof(struct bfad_buf_info) * sge_num));
440
441 /* Allocate dma coherent memory */
442 buf_info = buf_base;
443 buf_info->size = payload_len;
444 buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
445 &buf_info->phys, GFP_KERNEL);
446 if (!buf_info->virt)
447 goto out_free_mem;
448
449 /* copy the linear bsg buffer to buf_info */
450 memset(buf_info->virt, 0, buf_info->size);
451 memcpy(buf_info->virt, payload_kbuf, buf_info->size);
452
453 /*
454 * Setup SG table
455 */
456 sg_table->sg_len = buf_info->size;
457 sg_table->sg_addr = (void *)(size_t) buf_info->phys;
458
459 *num_sgles = sge_num;
460
461 return buf_base;
462
463out_free_mem:
464 kfree(buf_base);
465 return NULL;
466}
467
468void
469bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
470 uint32_t num_sgles)
471{
472 int i;
473 struct bfad_buf_info *buf_info = buf_base;
474
475 if (buf_base) {
476 for (i = 0; i < num_sgles; buf_info++, i++) {
477 if (buf_info->virt != NULL)
478 dma_free_coherent(&bfad->pcidev->dev,
479 buf_info->size, buf_info->virt,
480 buf_info->phys);
481 }
482 kfree(buf_base);
483 }
484}
485
486int
487bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
488 bfa_bsg_fcpt_t *bsg_fcpt)
489{
490 struct bfa_fcxp_s *hal_fcxp;
491 struct bfad_s *bfad = drv_fcxp->port->bfad;
492 unsigned long flags;
493 uint8_t lp_tag;
494
495 spin_lock_irqsave(&bfad->bfad_lock, flags);
496
497 /* Allocate bfa_fcxp structure */
498 hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa,
499 drv_fcxp->num_req_sgles,
500 drv_fcxp->num_rsp_sgles,
501 bfad_fcxp_get_req_sgaddr_cb,
502 bfad_fcxp_get_req_sglen_cb,
503 bfad_fcxp_get_rsp_sgaddr_cb,
504 bfad_fcxp_get_rsp_sglen_cb);
505 if (!hal_fcxp) {
506 bfa_trc(bfad, 0);
507 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
508 return BFA_STATUS_ENOMEM;
509 }
510
511 drv_fcxp->bfa_fcxp = hal_fcxp;
512
513 lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
514
515 bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
516 bsg_fcpt->cts, bsg_fcpt->cos,
517 job->request_payload.payload_len,
518 &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
519 job->reply_payload.payload_len, bsg_fcpt->tsecs);
520
521 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
522
523 return BFA_STATUS_OK;
524}
525
526int
527bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
528{
529 struct bfa_bsg_data *bsg_data;
530 struct bfad_im_port_s *im_port =
531 (struct bfad_im_port_s *) job->shost->hostdata[0];
532 struct bfad_s *bfad = im_port->bfad;
533 bfa_bsg_fcpt_t *bsg_fcpt;
534 struct bfad_fcxp *drv_fcxp;
535 struct bfa_fcs_lport_s *fcs_port;
536 struct bfa_fcs_rport_s *fcs_rport;
537 uint32_t command_type = job->request->msgcode;
538 unsigned long flags;
539 struct bfad_buf_info *rsp_buf_info;
540 void *req_kbuf = NULL, *rsp_kbuf = NULL;
541 int rc = -EINVAL;
542
543 job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
544 job->reply->reply_payload_rcv_len = 0;
545
546 /* Get the payload passed in from userspace */
547 bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
548 sizeof(struct fc_bsg_request));
549 if (bsg_data == NULL)
550 goto out;
551
552 /*
553 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
554 * buffer of size bsg_data->payload_len
555 */
556 bsg_fcpt = (struct bfa_bsg_fcpt_s *)
557 kzalloc(bsg_data->payload_len, GFP_KERNEL);
558 if (!bsg_fcpt)
559 goto out;
560
561 if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
562 bsg_data->payload_len)) {
563 kfree(bsg_fcpt);
564 goto out;
565 }
566
567 drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
568 if (drv_fcxp == NULL) {
569 rc = -ENOMEM;
570 goto out;
571 }
572
573 spin_lock_irqsave(&bfad->bfad_lock, flags);
574 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
575 bsg_fcpt->lpwwn);
576 if (fcs_port == NULL) {
577 bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
578 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
579 goto out_free_mem;
580 }
581
582 /* Check if the port is online before sending FC Passthru cmd */
583 if (!bfa_fcs_lport_is_online(fcs_port)) {
584 bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
585 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
586 goto out_free_mem;
587 }
588
589 drv_fcxp->port = fcs_port->bfad_port;
590
591 if (drv_fcxp->port->bfad == 0)
592 drv_fcxp->port->bfad = bfad;
593
594 /* Fetch the bfa_rport - if nexus needed */
595 if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
596 command_type == FC_BSG_HST_CT) {
597 /* BSG HST commands: no nexus needed */
598 drv_fcxp->bfa_rport = NULL;
599
600 } else if (command_type == FC_BSG_RPT_ELS ||
601 command_type == FC_BSG_RPT_CT) {
602 /* BSG RPT commands: nexus needed */
603 fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
604 bsg_fcpt->dpwwn);
605 if (fcs_rport == NULL) {
606 bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
607 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
608 goto out_free_mem;
609 }
610
611 drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
612
613 } else { /* Unknown BSG msgcode; return -EINVAL */
614 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
615 goto out_free_mem;
616 }
617
618 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
619
620 /* allocate memory for req / rsp buffers */
621 req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
622 if (!req_kbuf) {
623 printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
624 bfad->pci_name);
625 rc = -ENOMEM;
626 goto out_free_mem;
627 }
628
629 rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
630 if (!rsp_kbuf) {
631 printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
632 bfad->pci_name);
633 rc = -ENOMEM;
634 goto out_free_mem;
635 }
636
637 /* map req sg - copy the sg_list passed in to the linear buffer */
638 sg_copy_to_buffer(job->request_payload.sg_list,
639 job->request_payload.sg_cnt, req_kbuf,
640 job->request_payload.payload_len);
641
642 drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
643 job->request_payload.payload_len,
644 &drv_fcxp->num_req_sgles);
645 if (!drv_fcxp->reqbuf_info) {
646 printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
647 bfad->pci_name);
648 rc = -ENOMEM;
649 goto out_free_mem;
650 }
651
652 drv_fcxp->req_sge = (struct bfa_sge_s *)
653 (((uint8_t *)drv_fcxp->reqbuf_info) +
654 (sizeof(struct bfad_buf_info) *
655 drv_fcxp->num_req_sgles));
656
657 /* map rsp sg */
658 drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
659 job->reply_payload.payload_len,
660 &drv_fcxp->num_rsp_sgles);
661 if (!drv_fcxp->rspbuf_info) {
662 printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
663 bfad->pci_name);
664 rc = -ENOMEM;
665 goto out_free_mem;
666 }
667
668 rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
669 drv_fcxp->rsp_sge = (struct bfa_sge_s *)
670 (((uint8_t *)drv_fcxp->rspbuf_info) +
671 (sizeof(struct bfad_buf_info) *
672 drv_fcxp->num_rsp_sgles));
673
674 /* fcxp send */
675 init_completion(&drv_fcxp->comp);
676 rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
677 if (rc == BFA_STATUS_OK) {
678 wait_for_completion(&drv_fcxp->comp);
679 bsg_fcpt->status = drv_fcxp->req_status;
680 } else {
681 bsg_fcpt->status = rc;
682 goto out_free_mem;
683 }
684
685 /* fill the job->reply data */
686 if (drv_fcxp->req_status == BFA_STATUS_OK) {
687 job->reply_len = drv_fcxp->rsp_len;
688 job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
689 job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
690 } else {
691 job->reply->reply_payload_rcv_len =
692 sizeof(struct fc_bsg_ctels_reply);
693 job->reply_len = sizeof(uint32_t);
694 job->reply->reply_data.ctels_reply.status =
695 FC_CTELS_STATUS_REJECT;
696 }
697
698 /* Copy the response data to the reply_payload sg list */
699 sg_copy_from_buffer(job->reply_payload.sg_list,
700 job->reply_payload.sg_cnt,
701 (uint8_t *)rsp_buf_info->virt,
702 job->reply_payload.payload_len);
703
704out_free_mem:
705 bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
706 drv_fcxp->num_rsp_sgles);
707 bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
708 drv_fcxp->num_req_sgles);
709 kfree(req_kbuf);
710 kfree(rsp_kbuf);
711
712 /* Need a copy to user op */
713 if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
714 bsg_data->payload_len))
715 rc = -EIO;
716
717 kfree(bsg_fcpt);
718 kfree(drv_fcxp);
719out:
720 job->reply->result = rc;
721
722 if (rc == BFA_STATUS_OK)
723 job->job_done(job);
724
725 return rc;
726}
727
728int
729bfad_im_bsg_request(struct fc_bsg_job *job)
730{
731 uint32_t rc = BFA_STATUS_OK;
732
733 /* Increment the bfa module refcnt - if bsg request is in service */
734 bfad_im_bsg_get_kobject(job);
735
736 switch (job->request->msgcode) {
737 case FC_BSG_HST_VENDOR:
738 /* Process BSG HST Vendor requests */
739 rc = bfad_im_bsg_vendor_request(job);
740 break;
741 case FC_BSG_HST_ELS_NOLOGIN:
742 case FC_BSG_RPT_ELS:
743 case FC_BSG_HST_CT:
744 case FC_BSG_RPT_CT:
745 /* Process BSG ELS/CT commands */
746 rc = bfad_im_bsg_els_ct_request(job);
747 break;
748 default:
749 job->reply->result = rc = -EINVAL;
750 job->reply->reply_payload_rcv_len = 0;
751 break;
752 }
753
754 /* Decrement the bfa module refcnt - on completion of bsg request */
755 bfad_im_bsg_put_kobject(job);
756
757 return rc;
758}
759
760int
761bfad_im_bsg_timeout(struct fc_bsg_job *job)
762{
763 /* Don't complete the BSG job request - return -EAGAIN
764 * to reset bsg job timeout : for ELS/CT pass thru we
765 * already have timer to track the request.
766 */
767 return -EAGAIN;
768}