blob: 35c415c81dad5b82b8bc9ae10865144d4a042b9c [file] [log] [blame]
Krishna Gudipatib85daaf2011-06-13 15:55:11 -07001/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <linux/uaccess.h>
19#include "bfad_drv.h"
20#include "bfad_im.h"
21#include "bfad_bsg.h"
22
23BFA_TRC_FILE(LDRV, BSG);
24
25/* bfad_im_bsg_get_kobject - increment the bfa refcnt */
26static void
27bfad_im_bsg_get_kobject(struct fc_bsg_job *job)
28{
29 struct Scsi_Host *shost = job->shost;
30 unsigned long flags;
31
32 spin_lock_irqsave(shost->host_lock, flags);
33 __module_get(shost->dma_dev->driver->owner);
34 spin_unlock_irqrestore(shost->host_lock, flags);
35}
36
37/* bfad_im_bsg_put_kobject - decrement the bfa refcnt */
38static void
39bfad_im_bsg_put_kobject(struct fc_bsg_job *job)
40{
41 struct Scsi_Host *shost = job->shost;
42 unsigned long flags;
43
44 spin_lock_irqsave(shost->host_lock, flags);
45 module_put(shost->dma_dev->driver->owner);
46 spin_unlock_irqrestore(shost->host_lock, flags);
47}
48
49static int
50bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
51{
52 int i;
53 struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
54 struct bfad_im_port_s *im_port;
55 struct bfa_port_attr_s pattr;
56 unsigned long flags;
57
58 spin_lock_irqsave(&bfad->bfad_lock, flags);
59 bfa_fcport_get_attr(&bfad->bfa, &pattr);
60 iocmd->nwwn = pattr.nwwn;
61 iocmd->pwwn = pattr.pwwn;
62 iocmd->ioc_type = bfa_get_type(&bfad->bfa);
63 iocmd->mac = bfa_get_mac(&bfad->bfa);
64 iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
65 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
66 iocmd->factorynwwn = pattr.factorynwwn;
67 iocmd->factorypwwn = pattr.factorypwwn;
68 im_port = bfad->pport.im_port;
69 iocmd->host = im_port->shost->host_no;
70 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
71
72 strcpy(iocmd->name, bfad->adapter_name);
73 strcpy(iocmd->port_name, bfad->port_name);
74 strcpy(iocmd->hwpath, bfad->pci_name);
75
76 /* set adapter hw path */
77 strcpy(iocmd->adapter_hwpath, bfad->pci_name);
78 i = strlen(iocmd->adapter_hwpath) - 1;
79 while (iocmd->adapter_hwpath[i] != '.')
80 i--;
81 iocmd->adapter_hwpath[i] = '\0';
82 iocmd->status = BFA_STATUS_OK;
83 return 0;
84}
85
86static int
87bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
88{
89 struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
90 unsigned long flags;
91
92 spin_lock_irqsave(&bfad->bfad_lock, flags);
93 bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
94 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
95
96 /* fill in driver attr info */
97 strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
98 strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
99 BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
100 strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
101 iocmd->ioc_attr.adapter_attr.fw_ver);
102 strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
103 iocmd->ioc_attr.adapter_attr.optrom_ver);
104
105 /* copy chip rev info first otherwise it will be overwritten */
106 memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
107 sizeof(bfad->pci_attr.chip_rev));
108 memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
109 sizeof(struct bfa_ioc_pci_attr_s));
110
111 iocmd->status = BFA_STATUS_OK;
112 return 0;
113}
114
115static int
116bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
117{
118 struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
119 struct bfa_lport_attr_s port_attr;
120 unsigned long flags;
121
122 spin_lock_irqsave(&bfad->bfad_lock, flags);
123 bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
124 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
125 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
126
127 if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
128 iocmd->attr.pid = port_attr.pid;
129 else
130 iocmd->attr.pid = 0;
131
132 iocmd->attr.port_type = port_attr.port_type;
133 iocmd->attr.loopback = port_attr.loopback;
134 iocmd->attr.authfail = port_attr.authfail;
135 strncpy(iocmd->attr.port_symname.symname,
136 port_attr.port_cfg.sym_name.symname,
137 sizeof(port_attr.port_cfg.sym_name.symname));
138
139 iocmd->status = BFA_STATUS_OK;
140 return 0;
141}
142
143static int
144bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
145{
146 struct bfa_fcs_lport_s *fcs_port;
147 struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
148 unsigned long flags;
149
150 spin_lock_irqsave(&bfad->bfad_lock, flags);
151 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
152 iocmd->vf_id, iocmd->pwwn);
153 if (fcs_port == NULL) {
154 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
155 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
156 goto out;
157 }
158
159 bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
160 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
161 iocmd->status = BFA_STATUS_OK;
162out:
163 return 0;
164}
165
166static int
167bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
168{
169 struct bfa_bsg_rport_scsi_addr_s *iocmd =
170 (struct bfa_bsg_rport_scsi_addr_s *)cmd;
171 struct bfa_fcs_lport_s *fcs_port;
172 struct bfa_fcs_itnim_s *fcs_itnim;
173 struct bfad_itnim_s *drv_itnim;
174 unsigned long flags;
175
176 spin_lock_irqsave(&bfad->bfad_lock, flags);
177 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
178 iocmd->vf_id, iocmd->pwwn);
179 if (fcs_port == NULL) {
180 bfa_trc(bfad, 0);
181 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
182 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
183 goto out;
184 }
185
186 fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
187 if (fcs_itnim == NULL) {
188 bfa_trc(bfad, 0);
189 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
190 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
191 goto out;
192 }
193
194 drv_itnim = fcs_itnim->itnim_drv;
195
196 if (drv_itnim && drv_itnim->im_port)
197 iocmd->host = drv_itnim->im_port->shost->host_no;
198 else {
199 bfa_trc(bfad, 0);
200 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
201 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
202 goto out;
203 }
204
205 iocmd->target = drv_itnim->scsi_tgt_id;
206 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
207
208 iocmd->bus = 0;
209 iocmd->lun = 0;
210 iocmd->status = BFA_STATUS_OK;
211out:
212 return 0;
213}
214
215static int
216bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
217 unsigned int payload_len)
218{
219 struct bfa_bsg_fabric_get_lports_s *iocmd =
220 (struct bfa_bsg_fabric_get_lports_s *)cmd;
221 bfa_fcs_vf_t *fcs_vf;
222 uint32_t nports = iocmd->nports;
223 unsigned long flags;
224 void *iocmd_bufptr;
225
226 if (nports == 0) {
227 iocmd->status = BFA_STATUS_EINVAL;
228 goto out;
229 }
230
231 if (bfad_chk_iocmd_sz(payload_len,
232 sizeof(struct bfa_bsg_fabric_get_lports_s),
233 sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
234 iocmd->status = BFA_STATUS_VERSION_FAIL;
235 goto out;
236 }
237
238 iocmd_bufptr = (char *)iocmd +
239 sizeof(struct bfa_bsg_fabric_get_lports_s);
240
241 spin_lock_irqsave(&bfad->bfad_lock, flags);
242 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
243 if (fcs_vf == NULL) {
244 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
245 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
246 goto out;
247 }
248 bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
249 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
250
251 iocmd->nports = nports;
252 iocmd->status = BFA_STATUS_OK;
253out:
254 return 0;
255}
256
257static int
258bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
259{
260 struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
261 struct bfa_fcs_lport_s *fcs_port;
262 unsigned long flags;
263
264 spin_lock_irqsave(&bfad->bfad_lock, flags);
265 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
266 iocmd->vf_id, iocmd->lpwwn);
267 if (!fcs_port)
268 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
269 else
270 iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
271 iocmd->rpwwn, &iocmd->attr);
272 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
273 return 0;
274}
275
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700276int
277bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
278{
279 struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
280 struct bfad_hal_comp fcomp;
281 unsigned long flags;
282
283 init_completion(&fcomp.comp);
284 spin_lock_irqsave(&bfad->bfad_lock, flags);
285 iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
286 &iocmd->pcifn_cfg,
287 bfad_hcb_comp, &fcomp);
288 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
289 if (iocmd->status != BFA_STATUS_OK)
290 goto out;
291
292 wait_for_completion(&fcomp.comp);
293 iocmd->status = fcomp.status;
294out:
295 return 0;
296}
297
298int
299bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
300{
301 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
302 struct bfad_hal_comp fcomp;
303 unsigned long flags;
304
305 init_completion(&fcomp.comp);
306 spin_lock_irqsave(&bfad->bfad_lock, flags);
307 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
308 &iocmd->pcifn_id, iocmd->port,
309 iocmd->pcifn_class, iocmd->bandwidth,
310 bfad_hcb_comp, &fcomp);
311 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
312 if (iocmd->status != BFA_STATUS_OK)
313 goto out;
314
315 wait_for_completion(&fcomp.comp);
316 iocmd->status = fcomp.status;
317out:
318 return 0;
319}
320
321int
322bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
323{
324 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
325 struct bfad_hal_comp fcomp;
326 unsigned long flags;
327
328 init_completion(&fcomp.comp);
329 spin_lock_irqsave(&bfad->bfad_lock, flags);
330 iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
331 iocmd->pcifn_id,
332 bfad_hcb_comp, &fcomp);
333 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
334 if (iocmd->status != BFA_STATUS_OK)
335 goto out;
336
337 wait_for_completion(&fcomp.comp);
338 iocmd->status = fcomp.status;
339out:
340 return 0;
341}
342
343int
344bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
345{
346 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
347 struct bfad_hal_comp fcomp;
348 unsigned long flags;
349
350 init_completion(&fcomp.comp);
351 spin_lock_irqsave(&bfad->bfad_lock, flags);
352 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
353 iocmd->pcifn_id, iocmd->bandwidth,
354 bfad_hcb_comp, &fcomp);
355 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
356 bfa_trc(bfad, iocmd->status);
357 if (iocmd->status != BFA_STATUS_OK)
358 goto out;
359
360 wait_for_completion(&fcomp.comp);
361 iocmd->status = fcomp.status;
362 bfa_trc(bfad, iocmd->status);
363out:
364 return 0;
365}
366
367int
368bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
369{
370 struct bfa_bsg_adapter_cfg_mode_s *iocmd =
371 (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
372 struct bfad_hal_comp fcomp;
373 unsigned long flags = 0;
374
375 init_completion(&fcomp.comp);
376 spin_lock_irqsave(&bfad->bfad_lock, flags);
377 iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
378 iocmd->cfg.mode, iocmd->cfg.max_pf,
379 iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
380 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
381 if (iocmd->status != BFA_STATUS_OK)
382 goto out;
383
384 wait_for_completion(&fcomp.comp);
385 iocmd->status = fcomp.status;
386out:
387 return 0;
388}
389
390int
391bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
392{
393 struct bfa_bsg_port_cfg_mode_s *iocmd =
394 (struct bfa_bsg_port_cfg_mode_s *)cmd;
395 struct bfad_hal_comp fcomp;
396 unsigned long flags = 0;
397
398 init_completion(&fcomp.comp);
399 spin_lock_irqsave(&bfad->bfad_lock, flags);
400 iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
401 iocmd->instance, iocmd->cfg.mode,
402 iocmd->cfg.max_pf, iocmd->cfg.max_vf,
403 bfad_hcb_comp, &fcomp);
404 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
405 if (iocmd->status != BFA_STATUS_OK)
406 goto out;
407
408 wait_for_completion(&fcomp.comp);
409 iocmd->status = fcomp.status;
410out:
411 return 0;
412}
413
414int
415bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
416{
417 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
418 struct bfad_hal_comp fcomp;
419 unsigned long flags;
420
421 init_completion(&fcomp.comp);
422 spin_lock_irqsave(&bfad->bfad_lock, flags);
423 if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
424 iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
425 bfad_hcb_comp, &fcomp);
426 else
427 iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
428 bfad_hcb_comp, &fcomp);
429 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
430
431 if (iocmd->status != BFA_STATUS_OK)
432 goto out;
433
434 wait_for_completion(&fcomp.comp);
435 iocmd->status = fcomp.status;
436out:
437 return 0;
438}
439
Krishna Gudipatib85daaf2011-06-13 15:55:11 -0700440static int
441bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
442 unsigned int payload_len)
443{
444 int rc = EINVAL;
445
446 switch (cmd) {
447 case IOCMD_IOC_GET_INFO:
448 rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
449 break;
450 case IOCMD_IOC_GET_ATTR:
451 rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
452 break;
453 case IOCMD_PORT_GET_ATTR:
454 rc = bfad_iocmd_port_get_attr(bfad, iocmd);
455 break;
456 case IOCMD_LPORT_GET_ATTR:
457 rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
458 break;
459 case IOCMD_RPORT_GET_ADDR:
460 rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
461 break;
462 case IOCMD_FABRIC_GET_LPORTS:
463 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
464 break;
465 case IOCMD_ITNIM_GET_ATTR:
466 rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
467 break;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700468 case IOCMD_IOC_PCIFN_CFG:
469 rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
470 break;
471 case IOCMD_PCIFN_CREATE:
472 rc = bfad_iocmd_pcifn_create(bfad, iocmd);
473 break;
474 case IOCMD_PCIFN_DELETE:
475 rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
476 break;
477 case IOCMD_PCIFN_BW:
478 rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
479 break;
480 case IOCMD_ADAPTER_CFG_MODE:
481 rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
482 break;
483 case IOCMD_PORT_CFG_MODE:
484 rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
485 break;
486 case IOCMD_FLASH_ENABLE_OPTROM:
487 case IOCMD_FLASH_DISABLE_OPTROM:
488 rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
489 break;
Krishna Gudipatib85daaf2011-06-13 15:55:11 -0700490 default:
491 rc = EINVAL;
492 break;
493 }
494 return -rc;
495}
496
497static int
498bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
499{
500 uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
501 struct bfad_im_port_s *im_port =
502 (struct bfad_im_port_s *) job->shost->hostdata[0];
503 struct bfad_s *bfad = im_port->bfad;
504 void *payload_kbuf;
505 int rc = -EINVAL;
506
507 /* Allocate a temp buffer to hold the passed in user space command */
508 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
509 if (!payload_kbuf) {
510 rc = -ENOMEM;
511 goto out;
512 }
513
514 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
515 sg_copy_to_buffer(job->request_payload.sg_list,
516 job->request_payload.sg_cnt, payload_kbuf,
517 job->request_payload.payload_len);
518
519 /* Invoke IOCMD handler - to handle all the vendor command requests */
520 rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
521 job->request_payload.payload_len);
522 if (rc != BFA_STATUS_OK)
523 goto error;
524
525 /* Copy the response data to the job->reply_payload sg_list */
526 sg_copy_from_buffer(job->reply_payload.sg_list,
527 job->reply_payload.sg_cnt,
528 payload_kbuf,
529 job->reply_payload.payload_len);
530
531 /* free the command buffer */
532 kfree(payload_kbuf);
533
534 /* Fill the BSG job reply data */
535 job->reply_len = job->reply_payload.payload_len;
536 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
537 job->reply->result = rc;
538
539 job->job_done(job);
540 return rc;
541error:
542 /* free the command buffer */
543 kfree(payload_kbuf);
544out:
545 job->reply->result = rc;
546 job->reply_len = sizeof(uint32_t);
547 job->reply->reply_payload_rcv_len = 0;
548 return rc;
549}
550
551/* FC passthru call backs */
552u64
553bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
554{
555 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
556 struct bfa_sge_s *sge;
557 u64 addr;
558
559 sge = drv_fcxp->req_sge + sgeid;
560 addr = (u64)(size_t) sge->sg_addr;
561 return addr;
562}
563
564u32
565bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
566{
567 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
568 struct bfa_sge_s *sge;
569
570 sge = drv_fcxp->req_sge + sgeid;
571 return sge->sg_len;
572}
573
574u64
575bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
576{
577 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
578 struct bfa_sge_s *sge;
579 u64 addr;
580
581 sge = drv_fcxp->rsp_sge + sgeid;
582 addr = (u64)(size_t) sge->sg_addr;
583 return addr;
584}
585
586u32
587bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
588{
589 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
590 struct bfa_sge_s *sge;
591
592 sge = drv_fcxp->rsp_sge + sgeid;
593 return sge->sg_len;
594}
595
596void
597bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
598 bfa_status_t req_status, u32 rsp_len, u32 resid_len,
599 struct fchs_s *rsp_fchs)
600{
601 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
602
603 drv_fcxp->req_status = req_status;
604 drv_fcxp->rsp_len = rsp_len;
605
606 /* bfa_fcxp will be automatically freed by BFA */
607 drv_fcxp->bfa_fcxp = NULL;
608 complete(&drv_fcxp->comp);
609}
610
611struct bfad_buf_info *
612bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
613 uint32_t payload_len, uint32_t *num_sgles)
614{
615 struct bfad_buf_info *buf_base, *buf_info;
616 struct bfa_sge_s *sg_table;
617 int sge_num = 1;
618
619 buf_base = kzalloc((sizeof(struct bfad_buf_info) +
620 sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
621 if (!buf_base)
622 return NULL;
623
624 sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
625 (sizeof(struct bfad_buf_info) * sge_num));
626
627 /* Allocate dma coherent memory */
628 buf_info = buf_base;
629 buf_info->size = payload_len;
630 buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
631 &buf_info->phys, GFP_KERNEL);
632 if (!buf_info->virt)
633 goto out_free_mem;
634
635 /* copy the linear bsg buffer to buf_info */
636 memset(buf_info->virt, 0, buf_info->size);
637 memcpy(buf_info->virt, payload_kbuf, buf_info->size);
638
639 /*
640 * Setup SG table
641 */
642 sg_table->sg_len = buf_info->size;
643 sg_table->sg_addr = (void *)(size_t) buf_info->phys;
644
645 *num_sgles = sge_num;
646
647 return buf_base;
648
649out_free_mem:
650 kfree(buf_base);
651 return NULL;
652}
653
654void
655bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
656 uint32_t num_sgles)
657{
658 int i;
659 struct bfad_buf_info *buf_info = buf_base;
660
661 if (buf_base) {
662 for (i = 0; i < num_sgles; buf_info++, i++) {
663 if (buf_info->virt != NULL)
664 dma_free_coherent(&bfad->pcidev->dev,
665 buf_info->size, buf_info->virt,
666 buf_info->phys);
667 }
668 kfree(buf_base);
669 }
670}
671
672int
673bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
674 bfa_bsg_fcpt_t *bsg_fcpt)
675{
676 struct bfa_fcxp_s *hal_fcxp;
677 struct bfad_s *bfad = drv_fcxp->port->bfad;
678 unsigned long flags;
679 uint8_t lp_tag;
680
681 spin_lock_irqsave(&bfad->bfad_lock, flags);
682
683 /* Allocate bfa_fcxp structure */
684 hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa,
685 drv_fcxp->num_req_sgles,
686 drv_fcxp->num_rsp_sgles,
687 bfad_fcxp_get_req_sgaddr_cb,
688 bfad_fcxp_get_req_sglen_cb,
689 bfad_fcxp_get_rsp_sgaddr_cb,
690 bfad_fcxp_get_rsp_sglen_cb);
691 if (!hal_fcxp) {
692 bfa_trc(bfad, 0);
693 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
694 return BFA_STATUS_ENOMEM;
695 }
696
697 drv_fcxp->bfa_fcxp = hal_fcxp;
698
699 lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
700
701 bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
702 bsg_fcpt->cts, bsg_fcpt->cos,
703 job->request_payload.payload_len,
704 &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
705 job->reply_payload.payload_len, bsg_fcpt->tsecs);
706
707 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
708
709 return BFA_STATUS_OK;
710}
711
712int
713bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
714{
715 struct bfa_bsg_data *bsg_data;
716 struct bfad_im_port_s *im_port =
717 (struct bfad_im_port_s *) job->shost->hostdata[0];
718 struct bfad_s *bfad = im_port->bfad;
719 bfa_bsg_fcpt_t *bsg_fcpt;
720 struct bfad_fcxp *drv_fcxp;
721 struct bfa_fcs_lport_s *fcs_port;
722 struct bfa_fcs_rport_s *fcs_rport;
723 uint32_t command_type = job->request->msgcode;
724 unsigned long flags;
725 struct bfad_buf_info *rsp_buf_info;
726 void *req_kbuf = NULL, *rsp_kbuf = NULL;
727 int rc = -EINVAL;
728
729 job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
730 job->reply->reply_payload_rcv_len = 0;
731
732 /* Get the payload passed in from userspace */
733 bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
734 sizeof(struct fc_bsg_request));
735 if (bsg_data == NULL)
736 goto out;
737
738 /*
739 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
740 * buffer of size bsg_data->payload_len
741 */
742 bsg_fcpt = (struct bfa_bsg_fcpt_s *)
743 kzalloc(bsg_data->payload_len, GFP_KERNEL);
744 if (!bsg_fcpt)
745 goto out;
746
747 if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
748 bsg_data->payload_len)) {
749 kfree(bsg_fcpt);
750 goto out;
751 }
752
753 drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
754 if (drv_fcxp == NULL) {
755 rc = -ENOMEM;
756 goto out;
757 }
758
759 spin_lock_irqsave(&bfad->bfad_lock, flags);
760 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
761 bsg_fcpt->lpwwn);
762 if (fcs_port == NULL) {
763 bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
764 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
765 goto out_free_mem;
766 }
767
768 /* Check if the port is online before sending FC Passthru cmd */
769 if (!bfa_fcs_lport_is_online(fcs_port)) {
770 bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
771 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
772 goto out_free_mem;
773 }
774
775 drv_fcxp->port = fcs_port->bfad_port;
776
777 if (drv_fcxp->port->bfad == 0)
778 drv_fcxp->port->bfad = bfad;
779
780 /* Fetch the bfa_rport - if nexus needed */
781 if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
782 command_type == FC_BSG_HST_CT) {
783 /* BSG HST commands: no nexus needed */
784 drv_fcxp->bfa_rport = NULL;
785
786 } else if (command_type == FC_BSG_RPT_ELS ||
787 command_type == FC_BSG_RPT_CT) {
788 /* BSG RPT commands: nexus needed */
789 fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
790 bsg_fcpt->dpwwn);
791 if (fcs_rport == NULL) {
792 bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
793 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
794 goto out_free_mem;
795 }
796
797 drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
798
799 } else { /* Unknown BSG msgcode; return -EINVAL */
800 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
801 goto out_free_mem;
802 }
803
804 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
805
806 /* allocate memory for req / rsp buffers */
807 req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
808 if (!req_kbuf) {
809 printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
810 bfad->pci_name);
811 rc = -ENOMEM;
812 goto out_free_mem;
813 }
814
815 rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
816 if (!rsp_kbuf) {
817 printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
818 bfad->pci_name);
819 rc = -ENOMEM;
820 goto out_free_mem;
821 }
822
823 /* map req sg - copy the sg_list passed in to the linear buffer */
824 sg_copy_to_buffer(job->request_payload.sg_list,
825 job->request_payload.sg_cnt, req_kbuf,
826 job->request_payload.payload_len);
827
828 drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
829 job->request_payload.payload_len,
830 &drv_fcxp->num_req_sgles);
831 if (!drv_fcxp->reqbuf_info) {
832 printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
833 bfad->pci_name);
834 rc = -ENOMEM;
835 goto out_free_mem;
836 }
837
838 drv_fcxp->req_sge = (struct bfa_sge_s *)
839 (((uint8_t *)drv_fcxp->reqbuf_info) +
840 (sizeof(struct bfad_buf_info) *
841 drv_fcxp->num_req_sgles));
842
843 /* map rsp sg */
844 drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
845 job->reply_payload.payload_len,
846 &drv_fcxp->num_rsp_sgles);
847 if (!drv_fcxp->rspbuf_info) {
848 printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
849 bfad->pci_name);
850 rc = -ENOMEM;
851 goto out_free_mem;
852 }
853
854 rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
855 drv_fcxp->rsp_sge = (struct bfa_sge_s *)
856 (((uint8_t *)drv_fcxp->rspbuf_info) +
857 (sizeof(struct bfad_buf_info) *
858 drv_fcxp->num_rsp_sgles));
859
860 /* fcxp send */
861 init_completion(&drv_fcxp->comp);
862 rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
863 if (rc == BFA_STATUS_OK) {
864 wait_for_completion(&drv_fcxp->comp);
865 bsg_fcpt->status = drv_fcxp->req_status;
866 } else {
867 bsg_fcpt->status = rc;
868 goto out_free_mem;
869 }
870
871 /* fill the job->reply data */
872 if (drv_fcxp->req_status == BFA_STATUS_OK) {
873 job->reply_len = drv_fcxp->rsp_len;
874 job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
875 job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
876 } else {
877 job->reply->reply_payload_rcv_len =
878 sizeof(struct fc_bsg_ctels_reply);
879 job->reply_len = sizeof(uint32_t);
880 job->reply->reply_data.ctels_reply.status =
881 FC_CTELS_STATUS_REJECT;
882 }
883
884 /* Copy the response data to the reply_payload sg list */
885 sg_copy_from_buffer(job->reply_payload.sg_list,
886 job->reply_payload.sg_cnt,
887 (uint8_t *)rsp_buf_info->virt,
888 job->reply_payload.payload_len);
889
890out_free_mem:
891 bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
892 drv_fcxp->num_rsp_sgles);
893 bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
894 drv_fcxp->num_req_sgles);
895 kfree(req_kbuf);
896 kfree(rsp_kbuf);
897
898 /* Need a copy to user op */
899 if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
900 bsg_data->payload_len))
901 rc = -EIO;
902
903 kfree(bsg_fcpt);
904 kfree(drv_fcxp);
905out:
906 job->reply->result = rc;
907
908 if (rc == BFA_STATUS_OK)
909 job->job_done(job);
910
911 return rc;
912}
913
914int
915bfad_im_bsg_request(struct fc_bsg_job *job)
916{
917 uint32_t rc = BFA_STATUS_OK;
918
919 /* Increment the bfa module refcnt - if bsg request is in service */
920 bfad_im_bsg_get_kobject(job);
921
922 switch (job->request->msgcode) {
923 case FC_BSG_HST_VENDOR:
924 /* Process BSG HST Vendor requests */
925 rc = bfad_im_bsg_vendor_request(job);
926 break;
927 case FC_BSG_HST_ELS_NOLOGIN:
928 case FC_BSG_RPT_ELS:
929 case FC_BSG_HST_CT:
930 case FC_BSG_RPT_CT:
931 /* Process BSG ELS/CT commands */
932 rc = bfad_im_bsg_els_ct_request(job);
933 break;
934 default:
935 job->reply->result = rc = -EINVAL;
936 job->reply->reply_payload_rcv_len = 0;
937 break;
938 }
939
940 /* Decrement the bfa module refcnt - on completion of bsg request */
941 bfad_im_bsg_put_kobject(job);
942
943 return rc;
944}
945
946int
947bfad_im_bsg_timeout(struct fc_bsg_job *job)
948{
949 /* Don't complete the BSG job request - return -EAGAIN
950 * to reset bsg job timeout : for ELS/CT pass thru we
951 * already have timer to track the request.
952 */
953 return -EAGAIN;
954}