blob: f46378fb802ca32429c15623beb40a63a2fcae81 [file] [log] [blame]
James Smartf1c3b0f2009-07-19 10:01:32 -04001/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
James Smart792581d2011-03-11 16:06:44 -05004 * Copyright (C) 2009-2011 Emulex. All rights reserved. *
James Smartf1c3b0f2009-07-19 10:01:32 -04005 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#include <linux/interrupt.h>
22#include <linux/mempool.h>
23#include <linux/pci.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
James Smart277e76f2010-02-18 11:07:15 -050025#include <linux/delay.h>
James Smart7ad20aa2011-05-24 11:44:28 -040026#include <linux/list.h>
James Smartf1c3b0f2009-07-19 10:01:32 -040027
28#include <scsi/scsi.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31#include <scsi/scsi_bsg_fc.h>
James Smart6a9c52c2009-10-02 15:16:51 -040032#include <scsi/fc/fc_fs.h>
James Smartf1c3b0f2009-07-19 10:01:32 -040033
34#include "lpfc_hw4.h"
35#include "lpfc_hw.h"
36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
38#include "lpfc_nl.h"
James Smart4fede782010-01-26 23:08:55 -050039#include "lpfc_bsg.h"
James Smartf1c3b0f2009-07-19 10:01:32 -040040#include "lpfc_disc.h"
41#include "lpfc_scsi.h"
42#include "lpfc.h"
43#include "lpfc_logmsg.h"
44#include "lpfc_crtn.h"
45#include "lpfc_vport.h"
46#include "lpfc_version.h"
47
James Smart4cc0e562010-01-26 23:09:48 -050048struct lpfc_bsg_event {
49 struct list_head node;
50 struct kref kref;
51 wait_queue_head_t wq;
52
53 /* Event type and waiter identifiers */
54 uint32_t type_mask;
55 uint32_t req_id;
56 uint32_t reg_id;
57
58 /* next two flags are here for the auto-delete logic */
59 unsigned long wait_time_stamp;
60 int waiting;
61
62 /* seen and not seen events */
63 struct list_head events_to_get;
64 struct list_head events_to_see;
65
66 /* job waiting for this event to finish */
67 struct fc_bsg_job *set_job;
68};
69
70struct lpfc_bsg_iocb {
71 struct lpfc_iocbq *cmdiocbq;
72 struct lpfc_iocbq *rspiocbq;
73 struct lpfc_dmabuf *bmp;
74 struct lpfc_nodelist *ndlp;
75
76 /* job waiting for this iocb to finish */
77 struct fc_bsg_job *set_job;
78};
79
James Smart3b5dd522010-01-26 23:10:15 -050080struct lpfc_bsg_mbox {
81 LPFC_MBOXQ_t *pmboxq;
82 MAILBOX_t *mb;
James Smart7ad20aa2011-05-24 11:44:28 -040083 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
James Smart7a470272010-03-15 11:25:20 -040084 uint8_t *ext; /* extended mailbox data */
85 uint32_t mbOffset; /* from app */
86 uint32_t inExtWLen; /* from app */
James Smartc7495932010-04-06 15:05:28 -040087 uint32_t outExtWLen; /* from app */
James Smart3b5dd522010-01-26 23:10:15 -050088
89 /* job waiting for this mbox command to finish */
90 struct fc_bsg_job *set_job;
91};
92
James Smarte2aed292010-02-26 14:15:00 -050093#define MENLO_DID 0x0000FC0E
94
95struct lpfc_bsg_menlo {
96 struct lpfc_iocbq *cmdiocbq;
97 struct lpfc_iocbq *rspiocbq;
98 struct lpfc_dmabuf *bmp;
99
100 /* job waiting for this iocb to finish */
101 struct fc_bsg_job *set_job;
102};
103
James Smart4cc0e562010-01-26 23:09:48 -0500104#define TYPE_EVT 1
105#define TYPE_IOCB 2
James Smart3b5dd522010-01-26 23:10:15 -0500106#define TYPE_MBOX 3
James Smarte2aed292010-02-26 14:15:00 -0500107#define TYPE_MENLO 4
James Smart4cc0e562010-01-26 23:09:48 -0500108struct bsg_job_data {
109 uint32_t type;
110 union {
111 struct lpfc_bsg_event *evt;
112 struct lpfc_bsg_iocb iocb;
James Smart3b5dd522010-01-26 23:10:15 -0500113 struct lpfc_bsg_mbox mbox;
James Smarte2aed292010-02-26 14:15:00 -0500114 struct lpfc_bsg_menlo menlo;
James Smart4cc0e562010-01-26 23:09:48 -0500115 } context_un;
116};
117
118struct event_data {
119 struct list_head node;
120 uint32_t type;
121 uint32_t immed_dat;
122 void *data;
123 uint32_t len;
124};
125
James Smart3b5dd522010-01-26 23:10:15 -0500126#define BUF_SZ_4K 4096
James Smart4cc0e562010-01-26 23:09:48 -0500127#define SLI_CT_ELX_LOOPBACK 0x10
128
129enum ELX_LOOPBACK_CMD {
130 ELX_LOOPBACK_XRI_SETUP,
131 ELX_LOOPBACK_DATA,
132};
133
James Smart3b5dd522010-01-26 23:10:15 -0500134#define ELX_LOOPBACK_HEADER_SZ \
135 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
136
James Smart4cc0e562010-01-26 23:09:48 -0500137struct lpfc_dmabufext {
138 struct lpfc_dmabuf dma;
139 uint32_t size;
140 uint32_t flag;
141};
142
James Smartf1c3b0f2009-07-19 10:01:32 -0400143/**
James Smart4cc0e562010-01-26 23:09:48 -0500144 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
145 * @phba: Pointer to HBA context object.
146 * @cmdiocbq: Pointer to command iocb.
147 * @rspiocbq: Pointer to response iocb.
148 *
149 * This function is the completion handler for iocbs issued using
150 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
151 * ring event handler function without any lock held. This function
152 * can be called from both worker thread context and interrupt
153 * context. This function also can be called from another thread which
154 * cleans up the SLI layer objects.
155 * This function copies the contents of the response iocb to the
156 * response iocb memory object provided by the caller of
157 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
158 * sleeps for the iocb completion.
159 **/
160static void
161lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
162 struct lpfc_iocbq *cmdiocbq,
163 struct lpfc_iocbq *rspiocbq)
164{
James Smart4cc0e562010-01-26 23:09:48 -0500165 struct bsg_job_data *dd_data;
166 struct fc_bsg_job *job;
167 IOCB_t *rsp;
168 struct lpfc_dmabuf *bmp;
169 struct lpfc_nodelist *ndlp;
170 struct lpfc_bsg_iocb *iocb;
171 unsigned long flags;
172 int rc = 0;
173
174 spin_lock_irqsave(&phba->ct_ev_lock, flags);
James Smartbe858b62010-12-15 17:57:20 -0500175 dd_data = cmdiocbq->context2;
James Smart4cc0e562010-01-26 23:09:48 -0500176 if (!dd_data) {
177 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
James Smartbe858b62010-12-15 17:57:20 -0500178 lpfc_sli_release_iocbq(phba, cmdiocbq);
James Smart4cc0e562010-01-26 23:09:48 -0500179 return;
180 }
181
182 iocb = &dd_data->context_un.iocb;
183 job = iocb->set_job;
184 job->dd_data = NULL; /* so timeout handler does not reply */
185
James Smart4cc0e562010-01-26 23:09:48 -0500186 bmp = iocb->bmp;
James Smart4cc0e562010-01-26 23:09:48 -0500187 rsp = &rspiocbq->iocb;
James Smartbe858b62010-12-15 17:57:20 -0500188 ndlp = cmdiocbq->context1;
James Smart4cc0e562010-01-26 23:09:48 -0500189
190 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
191 job->request_payload.sg_cnt, DMA_TO_DEVICE);
192 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
193 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
194
195 if (rsp->ulpStatus) {
196 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
197 switch (rsp->un.ulpWord[4] & 0xff) {
198 case IOERR_SEQUENCE_TIMEOUT:
199 rc = -ETIMEDOUT;
200 break;
201 case IOERR_INVALID_RPI:
202 rc = -EFAULT;
203 break;
204 default:
205 rc = -EACCES;
206 break;
207 }
208 } else
209 rc = -EACCES;
210 } else
211 job->reply->reply_payload_rcv_len =
212 rsp->un.genreq64.bdl.bdeSize;
213
214 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
James Smart4cc0e562010-01-26 23:09:48 -0500215 lpfc_sli_release_iocbq(phba, cmdiocbq);
216 lpfc_nlp_put(ndlp);
217 kfree(bmp);
218 kfree(dd_data);
219 /* make error code available to userspace */
220 job->reply->result = rc;
221 /* complete the job back to userspace */
222 job->job_done(job);
223 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
224 return;
225}
226
227/**
228 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
James Smartf1c3b0f2009-07-19 10:01:32 -0400229 * @job: fc_bsg_job to handle
James Smart3b5dd522010-01-26 23:10:15 -0500230 **/
James Smartf1c3b0f2009-07-19 10:01:32 -0400231static int
James Smart4cc0e562010-01-26 23:09:48 -0500232lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
James Smartf1c3b0f2009-07-19 10:01:32 -0400233{
James Smartf1c3b0f2009-07-19 10:01:32 -0400234 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
235 struct lpfc_hba *phba = vport->phba;
236 struct lpfc_rport_data *rdata = job->rport->dd_data;
237 struct lpfc_nodelist *ndlp = rdata->pnode;
238 struct ulp_bde64 *bpl = NULL;
239 uint32_t timeout;
240 struct lpfc_iocbq *cmdiocbq = NULL;
James Smartf1c3b0f2009-07-19 10:01:32 -0400241 IOCB_t *cmd;
James Smartf1c3b0f2009-07-19 10:01:32 -0400242 struct lpfc_dmabuf *bmp = NULL;
243 int request_nseg;
244 int reply_nseg;
245 struct scatterlist *sgel = NULL;
246 int numbde;
247 dma_addr_t busaddr;
James Smart4cc0e562010-01-26 23:09:48 -0500248 struct bsg_job_data *dd_data;
249 uint32_t creg_val;
James Smartf1c3b0f2009-07-19 10:01:32 -0400250 int rc = 0;
James Smartd439d282010-09-29 11:18:45 -0400251 int iocb_stat;
James Smartf1c3b0f2009-07-19 10:01:32 -0400252
253 /* in case no data is transferred */
254 job->reply->reply_payload_rcv_len = 0;
255
James Smart4cc0e562010-01-26 23:09:48 -0500256 /* allocate our bsg tracking structure */
257 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
258 if (!dd_data) {
259 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
260 "2733 Failed allocation of dd_data\n");
261 rc = -ENOMEM;
262 goto no_dd_data;
263 }
264
James Smartf1c3b0f2009-07-19 10:01:32 -0400265 if (!lpfc_nlp_get(ndlp)) {
James Smart4cc0e562010-01-26 23:09:48 -0500266 rc = -ENODEV;
267 goto no_ndlp;
268 }
269
270 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
271 if (!bmp) {
272 rc = -ENOMEM;
273 goto free_ndlp;
James Smartf1c3b0f2009-07-19 10:01:32 -0400274 }
275
276 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
277 rc = -ENODEV;
James Smart4cc0e562010-01-26 23:09:48 -0500278 goto free_bmp;
James Smartf1c3b0f2009-07-19 10:01:32 -0400279 }
280
James Smartf1c3b0f2009-07-19 10:01:32 -0400281 cmdiocbq = lpfc_sli_get_iocbq(phba);
282 if (!cmdiocbq) {
283 rc = -ENOMEM;
James Smart4cc0e562010-01-26 23:09:48 -0500284 goto free_bmp;
James Smartf1c3b0f2009-07-19 10:01:32 -0400285 }
James Smartf1c3b0f2009-07-19 10:01:32 -0400286
James Smart4cc0e562010-01-26 23:09:48 -0500287 cmd = &cmdiocbq->iocb;
James Smartf1c3b0f2009-07-19 10:01:32 -0400288 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
289 if (!bmp->virt) {
290 rc = -ENOMEM;
James Smartbe858b62010-12-15 17:57:20 -0500291 goto free_cmdiocbq;
James Smartf1c3b0f2009-07-19 10:01:32 -0400292 }
James Smartf1c3b0f2009-07-19 10:01:32 -0400293
294 INIT_LIST_HEAD(&bmp->list);
295 bpl = (struct ulp_bde64 *) bmp->virt;
James Smartf1c3b0f2009-07-19 10:01:32 -0400296 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
297 job->request_payload.sg_cnt, DMA_TO_DEVICE);
298 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
299 busaddr = sg_dma_address(sgel);
300 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
301 bpl->tus.f.bdeSize = sg_dma_len(sgel);
302 bpl->tus.w = cpu_to_le32(bpl->tus.w);
303 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
304 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
305 bpl++;
306 }
307
308 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
309 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
310 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
311 busaddr = sg_dma_address(sgel);
312 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
313 bpl->tus.f.bdeSize = sg_dma_len(sgel);
314 bpl->tus.w = cpu_to_le32(bpl->tus.w);
315 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
316 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
317 bpl++;
318 }
319
320 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
321 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
322 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
323 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
324 cmd->un.genreq64.bdl.bdeSize =
325 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
326 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
327 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
328 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
James Smart6a9c52c2009-10-02 15:16:51 -0400329 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
330 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
James Smartf1c3b0f2009-07-19 10:01:32 -0400331 cmd->ulpBdeCount = 1;
332 cmd->ulpLe = 1;
333 cmd->ulpClass = CLASS3;
334 cmd->ulpContext = ndlp->nlp_rpi;
James Smart6d368e52011-05-24 11:44:12 -0400335 if (phba->sli_rev == LPFC_SLI_REV4)
336 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
James Smartf1c3b0f2009-07-19 10:01:32 -0400337 cmd->ulpOwner = OWN_CHIP;
338 cmdiocbq->vport = phba->pport;
James Smart4cc0e562010-01-26 23:09:48 -0500339 cmdiocbq->context3 = bmp;
James Smartf1c3b0f2009-07-19 10:01:32 -0400340 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
James Smartf1c3b0f2009-07-19 10:01:32 -0400341 timeout = phba->fc_ratov * 2;
James Smart4cc0e562010-01-26 23:09:48 -0500342 cmd->ulpTimeout = timeout;
James Smartf1c3b0f2009-07-19 10:01:32 -0400343
James Smart4cc0e562010-01-26 23:09:48 -0500344 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
James Smartbe858b62010-12-15 17:57:20 -0500345 cmdiocbq->context1 = ndlp;
346 cmdiocbq->context2 = dd_data;
James Smart4cc0e562010-01-26 23:09:48 -0500347 dd_data->type = TYPE_IOCB;
348 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
James Smart4cc0e562010-01-26 23:09:48 -0500349 dd_data->context_un.iocb.set_job = job;
350 dd_data->context_un.iocb.bmp = bmp;
James Smartf1c3b0f2009-07-19 10:01:32 -0400351
James Smart4cc0e562010-01-26 23:09:48 -0500352 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
James Smart9940b972011-03-11 16:06:12 -0500353 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
354 rc = -EIO ;
355 goto free_cmdiocbq;
356 }
James Smart4cc0e562010-01-26 23:09:48 -0500357 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
358 writel(creg_val, phba->HCregaddr);
359 readl(phba->HCregaddr); /* flush */
James Smartf1c3b0f2009-07-19 10:01:32 -0400360 }
361
James Smartd439d282010-09-29 11:18:45 -0400362 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
363 if (iocb_stat == IOCB_SUCCESS)
James Smart4cc0e562010-01-26 23:09:48 -0500364 return 0; /* done for now */
James Smartd439d282010-09-29 11:18:45 -0400365 else if (iocb_stat == IOCB_BUSY)
366 rc = -EAGAIN;
James Smart2a9bf3d2010-06-07 15:24:45 -0400367 else
James Smartd439d282010-09-29 11:18:45 -0400368 rc = -EIO;
James Smart2a9bf3d2010-06-07 15:24:45 -0400369
James Smartf1c3b0f2009-07-19 10:01:32 -0400370
James Smart4cc0e562010-01-26 23:09:48 -0500371 /* iocb failed so cleanup */
372 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
373 job->request_payload.sg_cnt, DMA_TO_DEVICE);
374 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
375 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
James Smartf1c3b0f2009-07-19 10:01:32 -0400376
James Smartf1c3b0f2009-07-19 10:01:32 -0400377 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
James Smart4cc0e562010-01-26 23:09:48 -0500378
James Smartf1c3b0f2009-07-19 10:01:32 -0400379free_cmdiocbq:
380 lpfc_sli_release_iocbq(phba, cmdiocbq);
James Smart4cc0e562010-01-26 23:09:48 -0500381free_bmp:
382 kfree(bmp);
383free_ndlp:
James Smartf1c3b0f2009-07-19 10:01:32 -0400384 lpfc_nlp_put(ndlp);
James Smart4cc0e562010-01-26 23:09:48 -0500385no_ndlp:
386 kfree(dd_data);
387no_dd_data:
James Smartf1c3b0f2009-07-19 10:01:32 -0400388 /* make error code available to userspace */
389 job->reply->result = rc;
James Smart4cc0e562010-01-26 23:09:48 -0500390 job->dd_data = NULL;
391 return rc;
392}
393
394/**
395 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
396 * @phba: Pointer to HBA context object.
397 * @cmdiocbq: Pointer to command iocb.
398 * @rspiocbq: Pointer to response iocb.
399 *
400 * This function is the completion handler for iocbs issued using
401 * lpfc_bsg_rport_els_cmp function. This function is called by the
402 * ring event handler function without any lock held. This function
403 * can be called from both worker thread context and interrupt
404 * context. This function also can be called from other thread which
405 * cleans up the SLI layer objects.
James Smart3b5dd522010-01-26 23:10:15 -0500406 * This function copies the contents of the response iocb to the
James Smart4cc0e562010-01-26 23:09:48 -0500407 * response iocb memory object provided by the caller of
408 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
409 * sleeps for the iocb completion.
410 **/
411static void
412lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
413 struct lpfc_iocbq *cmdiocbq,
414 struct lpfc_iocbq *rspiocbq)
415{
416 struct bsg_job_data *dd_data;
417 struct fc_bsg_job *job;
418 IOCB_t *rsp;
419 struct lpfc_nodelist *ndlp;
420 struct lpfc_dmabuf *pbuflist = NULL;
421 struct fc_bsg_ctels_reply *els_reply;
422 uint8_t *rjt_data;
423 unsigned long flags;
424 int rc = 0;
425
426 spin_lock_irqsave(&phba->ct_ev_lock, flags);
427 dd_data = cmdiocbq->context1;
428 /* normal completion and timeout crossed paths, already done */
429 if (!dd_data) {
Jiri Slaby67221a42010-03-16 16:23:58 +0100430 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
James Smart4cc0e562010-01-26 23:09:48 -0500431 return;
432 }
433
434 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
435 if (cmdiocbq->context2 && rspiocbq)
436 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
437 &rspiocbq->iocb, sizeof(IOCB_t));
438
439 job = dd_data->context_un.iocb.set_job;
440 cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
441 rspiocbq = dd_data->context_un.iocb.rspiocbq;
442 rsp = &rspiocbq->iocb;
443 ndlp = dd_data->context_un.iocb.ndlp;
444
445 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
446 job->request_payload.sg_cnt, DMA_TO_DEVICE);
447 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
448 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
449
450 if (job->reply->result == -EAGAIN)
451 rc = -EAGAIN;
452 else if (rsp->ulpStatus == IOSTAT_SUCCESS)
453 job->reply->reply_payload_rcv_len =
454 rsp->un.elsreq64.bdl.bdeSize;
455 else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
456 job->reply->reply_payload_rcv_len =
457 sizeof(struct fc_bsg_ctels_reply);
458 /* LS_RJT data returned in word 4 */
459 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
460 els_reply = &job->reply->reply_data.ctels_reply;
461 els_reply->status = FC_CTELS_STATUS_REJECT;
462 els_reply->rjt_data.action = rjt_data[3];
463 els_reply->rjt_data.reason_code = rjt_data[2];
464 els_reply->rjt_data.reason_explanation = rjt_data[1];
465 els_reply->rjt_data.vendor_unique = rjt_data[0];
466 } else
467 rc = -EIO;
468
469 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
470 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
471 lpfc_sli_release_iocbq(phba, rspiocbq);
472 lpfc_sli_release_iocbq(phba, cmdiocbq);
473 lpfc_nlp_put(ndlp);
474 kfree(dd_data);
475 /* make error code available to userspace */
476 job->reply->result = rc;
477 job->dd_data = NULL;
James Smartf1c3b0f2009-07-19 10:01:32 -0400478 /* complete the job back to userspace */
479 job->job_done(job);
James Smart4cc0e562010-01-26 23:09:48 -0500480 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
481 return;
James Smartf1c3b0f2009-07-19 10:01:32 -0400482}
483
484/**
485 * lpfc_bsg_rport_els - send an ELS command from a bsg request
486 * @job: fc_bsg_job to handle
James Smart3b5dd522010-01-26 23:10:15 -0500487 **/
James Smartf1c3b0f2009-07-19 10:01:32 -0400488static int
489lpfc_bsg_rport_els(struct fc_bsg_job *job)
490{
491 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
492 struct lpfc_hba *phba = vport->phba;
493 struct lpfc_rport_data *rdata = job->rport->dd_data;
494 struct lpfc_nodelist *ndlp = rdata->pnode;
James Smartf1c3b0f2009-07-19 10:01:32 -0400495 uint32_t elscmd;
496 uint32_t cmdsize;
497 uint32_t rspsize;
498 struct lpfc_iocbq *rspiocbq;
499 struct lpfc_iocbq *cmdiocbq;
500 IOCB_t *rsp;
501 uint16_t rpi = 0;
502 struct lpfc_dmabuf *pcmd;
503 struct lpfc_dmabuf *prsp;
504 struct lpfc_dmabuf *pbuflist = NULL;
505 struct ulp_bde64 *bpl;
James Smartf1c3b0f2009-07-19 10:01:32 -0400506 int request_nseg;
507 int reply_nseg;
508 struct scatterlist *sgel = NULL;
509 int numbde;
510 dma_addr_t busaddr;
James Smart4cc0e562010-01-26 23:09:48 -0500511 struct bsg_job_data *dd_data;
512 uint32_t creg_val;
James Smartf1c3b0f2009-07-19 10:01:32 -0400513 int rc = 0;
514
515 /* in case no data is transferred */
516 job->reply->reply_payload_rcv_len = 0;
517
James Smart4cc0e562010-01-26 23:09:48 -0500518 /* allocate our bsg tracking structure */
519 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
520 if (!dd_data) {
521 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
522 "2735 Failed allocation of dd_data\n");
523 rc = -ENOMEM;
524 goto no_dd_data;
525 }
526
James Smartf1c3b0f2009-07-19 10:01:32 -0400527 if (!lpfc_nlp_get(ndlp)) {
528 rc = -ENODEV;
James Smart4cc0e562010-01-26 23:09:48 -0500529 goto free_dd_data;
James Smartf1c3b0f2009-07-19 10:01:32 -0400530 }
531
532 elscmd = job->request->rqst_data.r_els.els_code;
533 cmdsize = job->request_payload.payload_len;
534 rspsize = job->reply_payload.payload_len;
535 rspiocbq = lpfc_sli_get_iocbq(phba);
536 if (!rspiocbq) {
537 lpfc_nlp_put(ndlp);
538 rc = -ENOMEM;
James Smart4cc0e562010-01-26 23:09:48 -0500539 goto free_dd_data;
James Smartf1c3b0f2009-07-19 10:01:32 -0400540 }
541
542 rsp = &rspiocbq->iocb;
543 rpi = ndlp->nlp_rpi;
544
James Smart4cc0e562010-01-26 23:09:48 -0500545 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
James Smartf1c3b0f2009-07-19 10:01:32 -0400546 ndlp->nlp_DID, elscmd);
James Smartf1c3b0f2009-07-19 10:01:32 -0400547 if (!cmdiocbq) {
James Smart4cc0e562010-01-26 23:09:48 -0500548 rc = -EIO;
549 goto free_rspiocbq;
James Smartf1c3b0f2009-07-19 10:01:32 -0400550 }
551
James Smart4cc0e562010-01-26 23:09:48 -0500552 /* prep els iocb set context1 to the ndlp, context2 to the command
James Smart3b5dd522010-01-26 23:10:15 -0500553 * dmabuf, context3 holds the data dmabuf
554 */
James Smartf1c3b0f2009-07-19 10:01:32 -0400555 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
556 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
James Smartf1c3b0f2009-07-19 10:01:32 -0400557 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
558 kfree(pcmd);
559 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
560 kfree(prsp);
561 cmdiocbq->context2 = NULL;
562
563 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
564 bpl = (struct ulp_bde64 *) pbuflist->virt;
565
566 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
567 job->request_payload.sg_cnt, DMA_TO_DEVICE);
James Smartf1c3b0f2009-07-19 10:01:32 -0400568 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
569 busaddr = sg_dma_address(sgel);
570 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
571 bpl->tus.f.bdeSize = sg_dma_len(sgel);
572 bpl->tus.w = cpu_to_le32(bpl->tus.w);
573 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
574 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
575 bpl++;
576 }
577
578 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
579 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
580 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
581 busaddr = sg_dma_address(sgel);
582 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
583 bpl->tus.f.bdeSize = sg_dma_len(sgel);
584 bpl->tus.w = cpu_to_le32(bpl->tus.w);
585 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
586 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
587 bpl++;
588 }
James Smartf1c3b0f2009-07-19 10:01:32 -0400589 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
590 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
591 cmdiocbq->iocb.ulpContext = rpi;
592 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
593 cmdiocbq->context1 = NULL;
594 cmdiocbq->context2 = NULL;
595
James Smart4cc0e562010-01-26 23:09:48 -0500596 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
597 cmdiocbq->context1 = dd_data;
598 cmdiocbq->context2 = rspiocbq;
599 dd_data->type = TYPE_IOCB;
600 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
601 dd_data->context_un.iocb.rspiocbq = rspiocbq;
602 dd_data->context_un.iocb.set_job = job;
Justin P. Mattock6eab04a2011-04-08 19:49:08 -0700603 dd_data->context_un.iocb.bmp = NULL;
James Smart4cc0e562010-01-26 23:09:48 -0500604 dd_data->context_un.iocb.ndlp = ndlp;
James Smartf1c3b0f2009-07-19 10:01:32 -0400605
James Smart4cc0e562010-01-26 23:09:48 -0500606 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
James Smart9940b972011-03-11 16:06:12 -0500607 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
608 rc = -EIO;
609 goto linkdown_err;
610 }
James Smart4cc0e562010-01-26 23:09:48 -0500611 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
612 writel(creg_val, phba->HCregaddr);
613 readl(phba->HCregaddr); /* flush */
James Smartf1c3b0f2009-07-19 10:01:32 -0400614 }
James Smart4cc0e562010-01-26 23:09:48 -0500615 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
616 lpfc_nlp_put(ndlp);
617 if (rc == IOCB_SUCCESS)
618 return 0; /* done for now */
James Smart2a9bf3d2010-06-07 15:24:45 -0400619 else if (rc == IOCB_BUSY)
James Smartd439d282010-09-29 11:18:45 -0400620 rc = -EAGAIN;
James Smart2a9bf3d2010-06-07 15:24:45 -0400621 else
James Smartd439d282010-09-29 11:18:45 -0400622 rc = -EIO;
James Smartf1c3b0f2009-07-19 10:01:32 -0400623
James Smart9940b972011-03-11 16:06:12 -0500624linkdown_err:
James Smart4cc0e562010-01-26 23:09:48 -0500625 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
626 job->request_payload.sg_cnt, DMA_TO_DEVICE);
627 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
628 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
James Smartf1c3b0f2009-07-19 10:01:32 -0400629
James Smart4cc0e562010-01-26 23:09:48 -0500630 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
James Smartf1c3b0f2009-07-19 10:01:32 -0400631
James Smart4cc0e562010-01-26 23:09:48 -0500632 lpfc_sli_release_iocbq(phba, cmdiocbq);
James Smartf1c3b0f2009-07-19 10:01:32 -0400633
James Smart4cc0e562010-01-26 23:09:48 -0500634free_rspiocbq:
James Smartf1c3b0f2009-07-19 10:01:32 -0400635 lpfc_sli_release_iocbq(phba, rspiocbq);
636
James Smart4cc0e562010-01-26 23:09:48 -0500637free_dd_data:
638 kfree(dd_data);
639
640no_dd_data:
James Smartf1c3b0f2009-07-19 10:01:32 -0400641 /* make error code available to userspace */
642 job->reply->result = rc;
James Smart4cc0e562010-01-26 23:09:48 -0500643 job->dd_data = NULL;
644 return rc;
James Smartf1c3b0f2009-07-19 10:01:32 -0400645}
646
James Smart3b5dd522010-01-26 23:10:15 -0500647/**
648 * lpfc_bsg_event_free - frees an allocated event structure
649 * @kref: Pointer to a kref.
650 *
651 * Called from kref_put. Back cast the kref into an event structure address.
652 * Free any events to get, delete associated nodes, free any events to see,
653 * free any data then free the event itself.
654 **/
James Smartf1c3b0f2009-07-19 10:01:32 -0400655static void
James Smart4cc0e562010-01-26 23:09:48 -0500656lpfc_bsg_event_free(struct kref *kref)
James Smartf1c3b0f2009-07-19 10:01:32 -0400657{
James Smart4cc0e562010-01-26 23:09:48 -0500658 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
659 kref);
James Smartf1c3b0f2009-07-19 10:01:32 -0400660 struct event_data *ed;
661
662 list_del(&evt->node);
663
664 while (!list_empty(&evt->events_to_get)) {
665 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
666 list_del(&ed->node);
667 kfree(ed->data);
668 kfree(ed);
669 }
670
671 while (!list_empty(&evt->events_to_see)) {
672 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
673 list_del(&ed->node);
674 kfree(ed->data);
675 kfree(ed);
676 }
677
678 kfree(evt);
679}
680
James Smart3b5dd522010-01-26 23:10:15 -0500681/**
682 * lpfc_bsg_event_ref - increments the kref for an event
683 * @evt: Pointer to an event structure.
684 **/
James Smartf1c3b0f2009-07-19 10:01:32 -0400685static inline void
James Smart4cc0e562010-01-26 23:09:48 -0500686lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
James Smartf1c3b0f2009-07-19 10:01:32 -0400687{
James Smart4cc0e562010-01-26 23:09:48 -0500688 kref_get(&evt->kref);
James Smartf1c3b0f2009-07-19 10:01:32 -0400689}
690
James Smart3b5dd522010-01-26 23:10:15 -0500691/**
692 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
693 * @evt: Pointer to an event structure.
694 **/
James Smartf1c3b0f2009-07-19 10:01:32 -0400695static inline void
James Smart4cc0e562010-01-26 23:09:48 -0500696lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
James Smartf1c3b0f2009-07-19 10:01:32 -0400697{
James Smart4cc0e562010-01-26 23:09:48 -0500698 kref_put(&evt->kref, lpfc_bsg_event_free);
James Smartf1c3b0f2009-07-19 10:01:32 -0400699}
700
James Smart3b5dd522010-01-26 23:10:15 -0500701/**
702 * lpfc_bsg_event_new - allocate and initialize a event structure
703 * @ev_mask: Mask of events.
704 * @ev_reg_id: Event reg id.
705 * @ev_req_id: Event request id.
706 **/
James Smart4cc0e562010-01-26 23:09:48 -0500707static struct lpfc_bsg_event *
708lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
709{
710 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
James Smartf1c3b0f2009-07-19 10:01:32 -0400711
James Smart4cc0e562010-01-26 23:09:48 -0500712 if (!evt)
713 return NULL;
714
715 INIT_LIST_HEAD(&evt->events_to_get);
716 INIT_LIST_HEAD(&evt->events_to_see);
717 evt->type_mask = ev_mask;
718 evt->req_id = ev_req_id;
719 evt->reg_id = ev_reg_id;
720 evt->wait_time_stamp = jiffies;
721 init_waitqueue_head(&evt->wq);
722 kref_init(&evt->kref);
723 return evt;
724}
725
James Smart3b5dd522010-01-26 23:10:15 -0500726/**
727 * diag_cmd_data_free - Frees an lpfc dma buffer extension
728 * @phba: Pointer to HBA context object.
729 * @mlist: Pointer to an lpfc dma buffer extension.
730 **/
James Smart4cc0e562010-01-26 23:09:48 -0500731static int
James Smart3b5dd522010-01-26 23:10:15 -0500732diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
James Smart4cc0e562010-01-26 23:09:48 -0500733{
734 struct lpfc_dmabufext *mlast;
735 struct pci_dev *pcidev;
736 struct list_head head, *curr, *next;
737
738 if ((!mlist) || (!lpfc_is_link_up(phba) &&
739 (phba->link_flag & LS_LOOPBACK_MODE))) {
740 return 0;
741 }
742
743 pcidev = phba->pcidev;
744 list_add_tail(&head, &mlist->dma.list);
745
746 list_for_each_safe(curr, next, &head) {
747 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
748 if (mlast->dma.virt)
749 dma_free_coherent(&pcidev->dev,
750 mlast->size,
751 mlast->dma.virt,
752 mlast->dma.phys);
753 kfree(mlast);
754 }
755 return 0;
756}
James Smartf1c3b0f2009-07-19 10:01:32 -0400757
758/**
759 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
760 * @phba:
761 * @pring:
762 * @piocbq:
763 *
764 * This function is called when an unsolicited CT command is received. It
James Smart4cc0e562010-01-26 23:09:48 -0500765 * forwards the event to any processes registered to receive CT events.
James Smart3b5dd522010-01-26 23:10:15 -0500766 **/
James Smart4fede782010-01-26 23:08:55 -0500767int
James Smartf1c3b0f2009-07-19 10:01:32 -0400768lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
769 struct lpfc_iocbq *piocbq)
770{
771 uint32_t evt_req_id = 0;
772 uint32_t cmd;
773 uint32_t len;
774 struct lpfc_dmabuf *dmabuf = NULL;
James Smart4cc0e562010-01-26 23:09:48 -0500775 struct lpfc_bsg_event *evt;
James Smartf1c3b0f2009-07-19 10:01:32 -0400776 struct event_data *evt_dat = NULL;
777 struct lpfc_iocbq *iocbq;
778 size_t offset = 0;
779 struct list_head head;
780 struct ulp_bde64 *bde;
781 dma_addr_t dma_addr;
782 int i;
783 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
784 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
785 struct lpfc_hbq_entry *hbqe;
786 struct lpfc_sli_ct_request *ct_req;
James Smart4cc0e562010-01-26 23:09:48 -0500787 struct fc_bsg_job *job = NULL;
James Smart4fede782010-01-26 23:08:55 -0500788 unsigned long flags;
James Smart4cc0e562010-01-26 23:09:48 -0500789 int size = 0;
James Smartf1c3b0f2009-07-19 10:01:32 -0400790
791 INIT_LIST_HEAD(&head);
792 list_add_tail(&head, &piocbq->list);
793
794 if (piocbq->iocb.ulpBdeCount == 0 ||
795 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
796 goto error_ct_unsol_exit;
797
James Smart4cc0e562010-01-26 23:09:48 -0500798 if (phba->link_state == LPFC_HBA_ERROR ||
799 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
800 goto error_ct_unsol_exit;
801
James Smartf1c3b0f2009-07-19 10:01:32 -0400802 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
803 dmabuf = bdeBuf1;
804 else {
805 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
806 piocbq->iocb.un.cont64[0].addrLow);
807 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
808 }
James Smart4cc0e562010-01-26 23:09:48 -0500809 if (dmabuf == NULL)
810 goto error_ct_unsol_exit;
James Smartf1c3b0f2009-07-19 10:01:32 -0400811 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
812 evt_req_id = ct_req->FsType;
813 cmd = ct_req->CommandResponse.bits.CmdRsp;
814 len = ct_req->CommandResponse.bits.Size;
815 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
816 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
817
James Smart4fede782010-01-26 23:08:55 -0500818 spin_lock_irqsave(&phba->ct_ev_lock, flags);
James Smartf1c3b0f2009-07-19 10:01:32 -0400819 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
James Smart4cc0e562010-01-26 23:09:48 -0500820 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
821 evt->req_id != evt_req_id)
James Smartf1c3b0f2009-07-19 10:01:32 -0400822 continue;
823
James Smart4cc0e562010-01-26 23:09:48 -0500824 lpfc_bsg_event_ref(evt);
825 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
James Smartf1c3b0f2009-07-19 10:01:32 -0400826 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
James Smart4cc0e562010-01-26 23:09:48 -0500827 if (evt_dat == NULL) {
828 spin_lock_irqsave(&phba->ct_ev_lock, flags);
829 lpfc_bsg_event_unref(evt);
James Smartf1c3b0f2009-07-19 10:01:32 -0400830 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
831 "2614 Memory allocation failed for "
832 "CT event\n");
833 break;
834 }
835
James Smartf1c3b0f2009-07-19 10:01:32 -0400836 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
837 /* take accumulated byte count from the last iocbq */
838 iocbq = list_entry(head.prev, typeof(*iocbq), list);
839 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
840 } else {
841 list_for_each_entry(iocbq, &head, list) {
842 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
843 evt_dat->len +=
844 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
845 }
846 }
847
848 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
James Smart4cc0e562010-01-26 23:09:48 -0500849 if (evt_dat->data == NULL) {
James Smartf1c3b0f2009-07-19 10:01:32 -0400850 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
851 "2615 Memory allocation failed for "
852 "CT event data, size %d\n",
853 evt_dat->len);
854 kfree(evt_dat);
James Smart4fede782010-01-26 23:08:55 -0500855 spin_lock_irqsave(&phba->ct_ev_lock, flags);
James Smart4cc0e562010-01-26 23:09:48 -0500856 lpfc_bsg_event_unref(evt);
James Smart4fede782010-01-26 23:08:55 -0500857 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
James Smartf1c3b0f2009-07-19 10:01:32 -0400858 goto error_ct_unsol_exit;
859 }
860
861 list_for_each_entry(iocbq, &head, list) {
James Smart4cc0e562010-01-26 23:09:48 -0500862 size = 0;
James Smartf1c3b0f2009-07-19 10:01:32 -0400863 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
864 bdeBuf1 = iocbq->context2;
865 bdeBuf2 = iocbq->context3;
866 }
867 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
James Smartf1c3b0f2009-07-19 10:01:32 -0400868 if (phba->sli3_options &
869 LPFC_SLI3_HBQ_ENABLED) {
870 if (i == 0) {
871 hbqe = (struct lpfc_hbq_entry *)
872 &iocbq->iocb.un.ulpWord[0];
873 size = hbqe->bde.tus.f.bdeSize;
874 dmabuf = bdeBuf1;
875 } else if (i == 1) {
876 hbqe = (struct lpfc_hbq_entry *)
877 &iocbq->iocb.unsli3.
878 sli3Words[4];
879 size = hbqe->bde.tus.f.bdeSize;
880 dmabuf = bdeBuf2;
881 }
882 if ((offset + size) > evt_dat->len)
883 size = evt_dat->len - offset;
884 } else {
885 size = iocbq->iocb.un.cont64[i].
886 tus.f.bdeSize;
887 bde = &iocbq->iocb.un.cont64[i];
888 dma_addr = getPaddr(bde->addrHigh,
889 bde->addrLow);
890 dmabuf = lpfc_sli_ringpostbuf_get(phba,
891 pring, dma_addr);
892 }
893 if (!dmabuf) {
894 lpfc_printf_log(phba, KERN_ERR,
895 LOG_LIBDFC, "2616 No dmabuf "
896 "found for iocbq 0x%p\n",
897 iocbq);
898 kfree(evt_dat->data);
899 kfree(evt_dat);
James Smart4fede782010-01-26 23:08:55 -0500900 spin_lock_irqsave(&phba->ct_ev_lock,
901 flags);
James Smart4cc0e562010-01-26 23:09:48 -0500902 lpfc_bsg_event_unref(evt);
James Smart4fede782010-01-26 23:08:55 -0500903 spin_unlock_irqrestore(
904 &phba->ct_ev_lock, flags);
James Smartf1c3b0f2009-07-19 10:01:32 -0400905 goto error_ct_unsol_exit;
906 }
907 memcpy((char *)(evt_dat->data) + offset,
908 dmabuf->virt, size);
909 offset += size;
910 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
911 !(phba->sli3_options &
912 LPFC_SLI3_HBQ_ENABLED)) {
913 lpfc_sli_ringpostbuf_put(phba, pring,
914 dmabuf);
915 } else {
916 switch (cmd) {
James Smart4cc0e562010-01-26 23:09:48 -0500917 case ELX_LOOPBACK_DATA:
James Smart3b5dd522010-01-26 23:10:15 -0500918 diag_cmd_data_free(phba,
James Smart4cc0e562010-01-26 23:09:48 -0500919 (struct lpfc_dmabufext *)
920 dmabuf);
921 break;
James Smartf1c3b0f2009-07-19 10:01:32 -0400922 case ELX_LOOPBACK_XRI_SETUP:
James Smart4cc0e562010-01-26 23:09:48 -0500923 if ((phba->sli_rev ==
924 LPFC_SLI_REV2) ||
925 (phba->sli3_options &
926 LPFC_SLI3_HBQ_ENABLED
927 )) {
928 lpfc_in_buf_free(phba,
929 dmabuf);
930 } else {
James Smartf1c3b0f2009-07-19 10:01:32 -0400931 lpfc_post_buffer(phba,
932 pring,
933 1);
James Smart4cc0e562010-01-26 23:09:48 -0500934 }
James Smartf1c3b0f2009-07-19 10:01:32 -0400935 break;
936 default:
937 if (!(phba->sli3_options &
938 LPFC_SLI3_HBQ_ENABLED))
939 lpfc_post_buffer(phba,
940 pring,
941 1);
942 break;
943 }
944 }
945 }
946 }
947
James Smart4fede782010-01-26 23:08:55 -0500948 spin_lock_irqsave(&phba->ct_ev_lock, flags);
James Smartf1c3b0f2009-07-19 10:01:32 -0400949 if (phba->sli_rev == LPFC_SLI_REV4) {
950 evt_dat->immed_dat = phba->ctx_idx;
951 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
James Smart589a52d2010-07-14 15:30:54 -0400952 /* Provide warning for over-run of the ct_ctx array */
953 if (phba->ct_ctx[evt_dat->immed_dat].flags &
954 UNSOL_VALID)
955 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
956 "2717 CT context array entry "
957 "[%d] over-run: oxid:x%x, "
958 "sid:x%x\n", phba->ctx_idx,
959 phba->ct_ctx[
960 evt_dat->immed_dat].oxid,
961 phba->ct_ctx[
962 evt_dat->immed_dat].SID);
James Smart7851fe22011-07-22 18:36:52 -0400963 phba->ct_ctx[evt_dat->immed_dat].rxid =
964 piocbq->iocb.ulpContext;
James Smartf1c3b0f2009-07-19 10:01:32 -0400965 phba->ct_ctx[evt_dat->immed_dat].oxid =
James Smart7851fe22011-07-22 18:36:52 -0400966 piocbq->iocb.unsli3.rcvsli3.ox_id;
James Smartf1c3b0f2009-07-19 10:01:32 -0400967 phba->ct_ctx[evt_dat->immed_dat].SID =
968 piocbq->iocb.un.rcvels.remoteID;
James Smart589a52d2010-07-14 15:30:54 -0400969 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
James Smartf1c3b0f2009-07-19 10:01:32 -0400970 } else
971 evt_dat->immed_dat = piocbq->iocb.ulpContext;
972
973 evt_dat->type = FC_REG_CT_EVENT;
974 list_add(&evt_dat->node, &evt->events_to_see);
James Smart4cc0e562010-01-26 23:09:48 -0500975 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
976 wake_up_interruptible(&evt->wq);
977 lpfc_bsg_event_unref(evt);
James Smartf1c3b0f2009-07-19 10:01:32 -0400978 break;
James Smart4cc0e562010-01-26 23:09:48 -0500979 }
980
981 list_move(evt->events_to_see.prev, &evt->events_to_get);
982 lpfc_bsg_event_unref(evt);
983
984 job = evt->set_job;
985 evt->set_job = NULL;
986 if (job) {
987 job->reply->reply_payload_rcv_len = size;
988 /* make error code available to userspace */
989 job->reply->result = 0;
990 job->dd_data = NULL;
991 /* complete the job back to userspace */
992 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
993 job->job_done(job);
994 spin_lock_irqsave(&phba->ct_ev_lock, flags);
995 }
James Smartf1c3b0f2009-07-19 10:01:32 -0400996 }
James Smart4fede782010-01-26 23:08:55 -0500997 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
James Smartf1c3b0f2009-07-19 10:01:32 -0400998
999error_ct_unsol_exit:
1000 if (!list_empty(&head))
1001 list_del(&head);
James Smart4cc0e562010-01-26 23:09:48 -05001002 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
1003 return 0;
James Smart4fede782010-01-26 23:08:55 -05001004 return 1;
James Smartf1c3b0f2009-07-19 10:01:32 -04001005}
1006
1007/**
James Smart4cc0e562010-01-26 23:09:48 -05001008 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
James Smartf1c3b0f2009-07-19 10:01:32 -04001009 * @job: SET_EVENT fc_bsg_job
James Smart3b5dd522010-01-26 23:10:15 -05001010 **/
James Smartf1c3b0f2009-07-19 10:01:32 -04001011static int
James Smart4cc0e562010-01-26 23:09:48 -05001012lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
James Smartf1c3b0f2009-07-19 10:01:32 -04001013{
1014 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1015 struct lpfc_hba *phba = vport->phba;
1016 struct set_ct_event *event_req;
James Smart4cc0e562010-01-26 23:09:48 -05001017 struct lpfc_bsg_event *evt;
James Smartf1c3b0f2009-07-19 10:01:32 -04001018 int rc = 0;
James Smart4cc0e562010-01-26 23:09:48 -05001019 struct bsg_job_data *dd_data = NULL;
1020 uint32_t ev_mask;
1021 unsigned long flags;
James Smartf1c3b0f2009-07-19 10:01:32 -04001022
1023 if (job->request_len <
1024 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1025 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1026 "2612 Received SET_CT_EVENT below minimum "
1027 "size\n");
James Smart4cc0e562010-01-26 23:09:48 -05001028 rc = -EINVAL;
1029 goto job_error;
1030 }
1031
1032 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1033 if (dd_data == NULL) {
1034 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1035 "2734 Failed allocation of dd_data\n");
1036 rc = -ENOMEM;
1037 goto job_error;
James Smartf1c3b0f2009-07-19 10:01:32 -04001038 }
1039
1040 event_req = (struct set_ct_event *)
1041 job->request->rqst_data.h_vendor.vendor_cmd;
James Smart4cc0e562010-01-26 23:09:48 -05001042 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1043 FC_REG_EVENT_MASK);
James Smart4fede782010-01-26 23:08:55 -05001044 spin_lock_irqsave(&phba->ct_ev_lock, flags);
James Smartf1c3b0f2009-07-19 10:01:32 -04001045 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1046 if (evt->reg_id == event_req->ev_reg_id) {
James Smart4cc0e562010-01-26 23:09:48 -05001047 lpfc_bsg_event_ref(evt);
James Smartf1c3b0f2009-07-19 10:01:32 -04001048 evt->wait_time_stamp = jiffies;
1049 break;
1050 }
1051 }
James Smart4fede782010-01-26 23:08:55 -05001052 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
James Smartf1c3b0f2009-07-19 10:01:32 -04001053
1054 if (&evt->node == &phba->ct_ev_waiters) {
1055 /* no event waiting struct yet - first call */
James Smart4cc0e562010-01-26 23:09:48 -05001056 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
James Smartf1c3b0f2009-07-19 10:01:32 -04001057 event_req->ev_req_id);
1058 if (!evt) {
1059 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1060 "2617 Failed allocation of event "
1061 "waiter\n");
James Smart4cc0e562010-01-26 23:09:48 -05001062 rc = -ENOMEM;
1063 goto job_error;
James Smartf1c3b0f2009-07-19 10:01:32 -04001064 }
1065
James Smart4fede782010-01-26 23:08:55 -05001066 spin_lock_irqsave(&phba->ct_ev_lock, flags);
James Smartf1c3b0f2009-07-19 10:01:32 -04001067 list_add(&evt->node, &phba->ct_ev_waiters);
James Smart4cc0e562010-01-26 23:09:48 -05001068 lpfc_bsg_event_ref(evt);
1069 evt->wait_time_stamp = jiffies;
James Smart4fede782010-01-26 23:08:55 -05001070 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
James Smartf1c3b0f2009-07-19 10:01:32 -04001071 }
1072
James Smart4fede782010-01-26 23:08:55 -05001073 spin_lock_irqsave(&phba->ct_ev_lock, flags);
James Smart4cc0e562010-01-26 23:09:48 -05001074 evt->waiting = 1;
1075 dd_data->type = TYPE_EVT;
1076 dd_data->context_un.evt = evt;
1077 evt->set_job = job; /* for unsolicited command */
1078 job->dd_data = dd_data; /* for fc transport timeout callback*/
James Smart4fede782010-01-26 23:08:55 -05001079 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
James Smart4cc0e562010-01-26 23:09:48 -05001080 return 0; /* call job done later */
James Smartf1c3b0f2009-07-19 10:01:32 -04001081
James Smart4cc0e562010-01-26 23:09:48 -05001082job_error:
1083 if (dd_data != NULL)
1084 kfree(dd_data);
James Smartf1c3b0f2009-07-19 10:01:32 -04001085
James Smart4cc0e562010-01-26 23:09:48 -05001086 job->dd_data = NULL;
1087 return rc;
James Smartf1c3b0f2009-07-19 10:01:32 -04001088}
1089
1090/**
James Smart4cc0e562010-01-26 23:09:48 -05001091 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
James Smartf1c3b0f2009-07-19 10:01:32 -04001092 * @job: GET_EVENT fc_bsg_job
James Smart3b5dd522010-01-26 23:10:15 -05001093 **/
James Smartf1c3b0f2009-07-19 10:01:32 -04001094static int
James Smart4cc0e562010-01-26 23:09:48 -05001095lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
James Smartf1c3b0f2009-07-19 10:01:32 -04001096{
1097 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1098 struct lpfc_hba *phba = vport->phba;
1099 struct get_ct_event *event_req;
1100 struct get_ct_event_reply *event_reply;
James Smart4cc0e562010-01-26 23:09:48 -05001101 struct lpfc_bsg_event *evt;
James Smartf1c3b0f2009-07-19 10:01:32 -04001102 struct event_data *evt_dat = NULL;
James Smart4fede782010-01-26 23:08:55 -05001103 unsigned long flags;
James Smart4cc0e562010-01-26 23:09:48 -05001104 uint32_t rc = 0;
James Smartf1c3b0f2009-07-19 10:01:32 -04001105
1106 if (job->request_len <
1107 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1108 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1109 "2613 Received GET_CT_EVENT request below "
1110 "minimum size\n");
James Smart4cc0e562010-01-26 23:09:48 -05001111 rc = -EINVAL;
1112 goto job_error;
James Smartf1c3b0f2009-07-19 10:01:32 -04001113 }
1114
1115 event_req = (struct get_ct_event *)
1116 job->request->rqst_data.h_vendor.vendor_cmd;
1117
1118 event_reply = (struct get_ct_event_reply *)
1119 job->reply->reply_data.vendor_reply.vendor_rsp;
James Smart4fede782010-01-26 23:08:55 -05001120 spin_lock_irqsave(&phba->ct_ev_lock, flags);
James Smartf1c3b0f2009-07-19 10:01:32 -04001121 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1122 if (evt->reg_id == event_req->ev_reg_id) {
1123 if (list_empty(&evt->events_to_get))
1124 break;
James Smart4cc0e562010-01-26 23:09:48 -05001125 lpfc_bsg_event_ref(evt);
James Smartf1c3b0f2009-07-19 10:01:32 -04001126 evt->wait_time_stamp = jiffies;
1127 evt_dat = list_entry(evt->events_to_get.prev,
1128 struct event_data, node);
1129 list_del(&evt_dat->node);
1130 break;
1131 }
1132 }
James Smart4fede782010-01-26 23:08:55 -05001133 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
James Smartf1c3b0f2009-07-19 10:01:32 -04001134
James Smart4cc0e562010-01-26 23:09:48 -05001135 /* The app may continue to ask for event data until it gets
1136 * an error indicating that there isn't anymore
1137 */
1138 if (evt_dat == NULL) {
James Smartf1c3b0f2009-07-19 10:01:32 -04001139 job->reply->reply_payload_rcv_len = 0;
1140 rc = -ENOENT;
James Smart4cc0e562010-01-26 23:09:48 -05001141 goto job_error;
James Smartf1c3b0f2009-07-19 10:01:32 -04001142 }
1143
James Smart4cc0e562010-01-26 23:09:48 -05001144 if (evt_dat->len > job->request_payload.payload_len) {
1145 evt_dat->len = job->request_payload.payload_len;
1146 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1147 "2618 Truncated event data at %d "
1148 "bytes\n",
1149 job->request_payload.payload_len);
James Smartf1c3b0f2009-07-19 10:01:32 -04001150 }
1151
James Smart4cc0e562010-01-26 23:09:48 -05001152 event_reply->type = evt_dat->type;
James Smartf1c3b0f2009-07-19 10:01:32 -04001153 event_reply->immed_data = evt_dat->immed_dat;
James Smartf1c3b0f2009-07-19 10:01:32 -04001154 if (evt_dat->len > 0)
1155 job->reply->reply_payload_rcv_len =
James Smart4cc0e562010-01-26 23:09:48 -05001156 sg_copy_from_buffer(job->request_payload.sg_list,
1157 job->request_payload.sg_cnt,
James Smartf1c3b0f2009-07-19 10:01:32 -04001158 evt_dat->data, evt_dat->len);
1159 else
1160 job->reply->reply_payload_rcv_len = 0;
James Smartf1c3b0f2009-07-19 10:01:32 -04001161
James Smart4cc0e562010-01-26 23:09:48 -05001162 if (evt_dat) {
James Smartf1c3b0f2009-07-19 10:01:32 -04001163 kfree(evt_dat->data);
James Smart4cc0e562010-01-26 23:09:48 -05001164 kfree(evt_dat);
1165 }
1166
James Smart4fede782010-01-26 23:08:55 -05001167 spin_lock_irqsave(&phba->ct_ev_lock, flags);
James Smart4cc0e562010-01-26 23:09:48 -05001168 lpfc_bsg_event_unref(evt);
James Smart4fede782010-01-26 23:08:55 -05001169 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
James Smart4cc0e562010-01-26 23:09:48 -05001170 job->dd_data = NULL;
1171 job->reply->result = 0;
James Smartf1c3b0f2009-07-19 10:01:32 -04001172 job->job_done(job);
James Smart4cc0e562010-01-26 23:09:48 -05001173 return 0;
James Smartf1c3b0f2009-07-19 10:01:32 -04001174
James Smart4cc0e562010-01-26 23:09:48 -05001175job_error:
1176 job->dd_data = NULL;
1177 job->reply->result = rc;
James Smartf1c3b0f2009-07-19 10:01:32 -04001178 return rc;
1179}
1180
1181/**
James Smart3b5dd522010-01-26 23:10:15 -05001182 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1183 * @phba: Pointer to HBA context object.
1184 * @cmdiocbq: Pointer to command iocb.
1185 * @rspiocbq: Pointer to response iocb.
1186 *
1187 * This function is the completion handler for iocbs issued using
1188 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1189 * ring event handler function without any lock held. This function
1190 * can be called from both worker thread context and interrupt
1191 * context. This function also can be called from other thread which
1192 * cleans up the SLI layer objects.
1193 * This function copy the contents of the response iocb to the
1194 * response iocb memory object provided by the caller of
1195 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1196 * sleeps for the iocb completion.
1197 **/
1198static void
1199lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1200 struct lpfc_iocbq *cmdiocbq,
1201 struct lpfc_iocbq *rspiocbq)
1202{
1203 struct bsg_job_data *dd_data;
1204 struct fc_bsg_job *job;
1205 IOCB_t *rsp;
1206 struct lpfc_dmabuf *bmp;
1207 struct lpfc_nodelist *ndlp;
1208 unsigned long flags;
1209 int rc = 0;
1210
1211 spin_lock_irqsave(&phba->ct_ev_lock, flags);
James Smartbe858b62010-12-15 17:57:20 -05001212 dd_data = cmdiocbq->context2;
James Smart3b5dd522010-01-26 23:10:15 -05001213 /* normal completion and timeout crossed paths, already done */
1214 if (!dd_data) {
Jiri Slaby67221a42010-03-16 16:23:58 +01001215 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
James Smart3b5dd522010-01-26 23:10:15 -05001216 return;
1217 }
1218
1219 job = dd_data->context_un.iocb.set_job;
1220 bmp = dd_data->context_un.iocb.bmp;
1221 rsp = &rspiocbq->iocb;
1222 ndlp = dd_data->context_un.iocb.ndlp;
1223
1224 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1225 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1226
1227 if (rsp->ulpStatus) {
1228 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1229 switch (rsp->un.ulpWord[4] & 0xff) {
1230 case IOERR_SEQUENCE_TIMEOUT:
1231 rc = -ETIMEDOUT;
1232 break;
1233 case IOERR_INVALID_RPI:
1234 rc = -EFAULT;
1235 break;
1236 default:
1237 rc = -EACCES;
1238 break;
1239 }
1240 } else
1241 rc = -EACCES;
1242 } else
1243 job->reply->reply_payload_rcv_len =
1244 rsp->un.genreq64.bdl.bdeSize;
1245
1246 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1247 lpfc_sli_release_iocbq(phba, cmdiocbq);
1248 lpfc_nlp_put(ndlp);
1249 kfree(bmp);
1250 kfree(dd_data);
1251 /* make error code available to userspace */
1252 job->reply->result = rc;
1253 job->dd_data = NULL;
1254 /* complete the job back to userspace */
1255 job->job_done(job);
1256 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1257 return;
1258}
1259
1260/**
1261 * lpfc_issue_ct_rsp - issue a ct response
1262 * @phba: Pointer to HBA context object.
1263 * @job: Pointer to the job object.
1264 * @tag: tag index value into the ports context exchange array.
1265 * @bmp: Pointer to a dma buffer descriptor.
1266 * @num_entry: Number of enties in the bde.
1267 **/
1268static int
1269lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1270 struct lpfc_dmabuf *bmp, int num_entry)
1271{
1272 IOCB_t *icmd;
1273 struct lpfc_iocbq *ctiocb = NULL;
1274 int rc = 0;
1275 struct lpfc_nodelist *ndlp = NULL;
1276 struct bsg_job_data *dd_data;
1277 uint32_t creg_val;
1278
1279 /* allocate our bsg tracking structure */
1280 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1281 if (!dd_data) {
1282 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1283 "2736 Failed allocation of dd_data\n");
1284 rc = -ENOMEM;
1285 goto no_dd_data;
1286 }
1287
1288 /* Allocate buffer for command iocb */
1289 ctiocb = lpfc_sli_get_iocbq(phba);
1290 if (!ctiocb) {
James Smartd439d282010-09-29 11:18:45 -04001291 rc = -ENOMEM;
James Smart3b5dd522010-01-26 23:10:15 -05001292 goto no_ctiocb;
1293 }
1294
1295 icmd = &ctiocb->iocb;
1296 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1297 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1298 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1299 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1300 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1301 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1302 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1303 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1304 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1305
1306 /* Fill in rest of iocb */
1307 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1308 icmd->ulpBdeCount = 1;
1309 icmd->ulpLe = 1;
1310 icmd->ulpClass = CLASS3;
1311 if (phba->sli_rev == LPFC_SLI_REV4) {
1312 /* Do not issue unsol response if oxid not marked as valid */
1313 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
1314 rc = IOCB_ERROR;
1315 goto issue_ct_rsp_exit;
1316 }
James Smart7851fe22011-07-22 18:36:52 -04001317 icmd->ulpContext = phba->ct_ctx[tag].rxid;
1318 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
James Smart3b5dd522010-01-26 23:10:15 -05001319 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1320 if (!ndlp) {
1321 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1322 "2721 ndlp null for oxid %x SID %x\n",
1323 icmd->ulpContext,
1324 phba->ct_ctx[tag].SID);
1325 rc = IOCB_ERROR;
1326 goto issue_ct_rsp_exit;
1327 }
James Smart589a52d2010-07-14 15:30:54 -04001328
1329 /* Check if the ndlp is active */
1330 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1331 rc = -IOCB_ERROR;
1332 goto issue_ct_rsp_exit;
1333 }
1334
1335 /* get a refernece count so the ndlp doesn't go away while
1336 * we respond
1337 */
1338 if (!lpfc_nlp_get(ndlp)) {
1339 rc = -IOCB_ERROR;
1340 goto issue_ct_rsp_exit;
1341 }
1342
James Smart7851fe22011-07-22 18:36:52 -04001343 icmd->un.ulpWord[3] =
James Smart6d368e52011-05-24 11:44:12 -04001344 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1345
James Smart3b5dd522010-01-26 23:10:15 -05001346 /* The exchange is done, mark the entry as invalid */
1347 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1348 } else
1349 icmd->ulpContext = (ushort) tag;
1350
1351 icmd->ulpTimeout = phba->fc_ratov * 2;
1352
1353 /* Xmit CT response on exchange <xid> */
1354 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
James Smart7851fe22011-07-22 18:36:52 -04001355 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1356 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
James Smart3b5dd522010-01-26 23:10:15 -05001357
1358 ctiocb->iocb_cmpl = NULL;
1359 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1360 ctiocb->vport = phba->pport;
1361 ctiocb->context3 = bmp;
1362
1363 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
James Smartbe858b62010-12-15 17:57:20 -05001364 ctiocb->context2 = dd_data;
1365 ctiocb->context1 = ndlp;
James Smart3b5dd522010-01-26 23:10:15 -05001366 dd_data->type = TYPE_IOCB;
1367 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1368 dd_data->context_un.iocb.rspiocbq = NULL;
1369 dd_data->context_un.iocb.set_job = job;
1370 dd_data->context_un.iocb.bmp = bmp;
1371 dd_data->context_un.iocb.ndlp = ndlp;
1372
1373 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
James Smart9940b972011-03-11 16:06:12 -05001374 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1375 rc = -IOCB_ERROR;
1376 goto issue_ct_rsp_exit;
1377 }
James Smart3b5dd522010-01-26 23:10:15 -05001378 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1379 writel(creg_val, phba->HCregaddr);
1380 readl(phba->HCregaddr); /* flush */
1381 }
1382
1383 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1384
1385 if (rc == IOCB_SUCCESS)
1386 return 0; /* done for now */
1387
1388issue_ct_rsp_exit:
1389 lpfc_sli_release_iocbq(phba, ctiocb);
1390no_ctiocb:
1391 kfree(dd_data);
1392no_dd_data:
1393 return rc;
1394}
1395
1396/**
1397 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1398 * @job: SEND_MGMT_RESP fc_bsg_job
1399 **/
1400static int
1401lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1402{
1403 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1404 struct lpfc_hba *phba = vport->phba;
1405 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1406 job->request->rqst_data.h_vendor.vendor_cmd;
1407 struct ulp_bde64 *bpl;
1408 struct lpfc_dmabuf *bmp = NULL;
1409 struct scatterlist *sgel = NULL;
1410 int request_nseg;
1411 int numbde;
1412 dma_addr_t busaddr;
1413 uint32_t tag = mgmt_resp->tag;
1414 unsigned long reqbfrcnt =
1415 (unsigned long)job->request_payload.payload_len;
1416 int rc = 0;
1417
1418 /* in case no data is transferred */
1419 job->reply->reply_payload_rcv_len = 0;
1420
1421 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1422 rc = -ERANGE;
1423 goto send_mgmt_rsp_exit;
1424 }
1425
1426 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1427 if (!bmp) {
1428 rc = -ENOMEM;
1429 goto send_mgmt_rsp_exit;
1430 }
1431
1432 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1433 if (!bmp->virt) {
1434 rc = -ENOMEM;
1435 goto send_mgmt_rsp_free_bmp;
1436 }
1437
1438 INIT_LIST_HEAD(&bmp->list);
1439 bpl = (struct ulp_bde64 *) bmp->virt;
1440 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1441 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1442 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1443 busaddr = sg_dma_address(sgel);
1444 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1445 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1446 bpl->tus.w = cpu_to_le32(bpl->tus.w);
1447 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1448 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1449 bpl++;
1450 }
1451
1452 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1453
1454 if (rc == IOCB_SUCCESS)
1455 return 0; /* done for now */
1456
1457 /* TBD need to handle a timeout */
1458 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1459 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1460 rc = -EACCES;
1461 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1462
1463send_mgmt_rsp_free_bmp:
1464 kfree(bmp);
1465send_mgmt_rsp_exit:
1466 /* make error code available to userspace */
1467 job->reply->result = rc;
1468 job->dd_data = NULL;
1469 return rc;
1470}
1471
1472/**
James Smart7ad20aa2011-05-24 11:44:28 -04001473 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1474 * @phba: Pointer to HBA context object.
James Smart3b5dd522010-01-26 23:10:15 -05001475 *
James Smart7ad20aa2011-05-24 11:44:28 -04001476 * This function is responsible for preparing driver for diag loopback
1477 * on device.
1478 */
1479static int
James Smart88a2cfb2011-07-22 18:36:33 -04001480lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
James Smart7ad20aa2011-05-24 11:44:28 -04001481{
1482 struct lpfc_vport **vports;
1483 struct Scsi_Host *shost;
1484 struct lpfc_sli *psli;
1485 struct lpfc_sli_ring *pring;
1486 int i = 0;
1487
1488 psli = &phba->sli;
1489 if (!psli)
1490 return -ENODEV;
1491
1492 pring = &psli->ring[LPFC_FCP_RING];
1493 if (!pring)
1494 return -ENODEV;
1495
1496 if ((phba->link_state == LPFC_HBA_ERROR) ||
1497 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1498 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1499 return -EACCES;
1500
1501 vports = lpfc_create_vport_work_array(phba);
1502 if (vports) {
1503 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1504 shost = lpfc_shost_from_vport(vports[i]);
1505 scsi_block_requests(shost);
1506 }
1507 lpfc_destroy_vport_work_array(phba, vports);
1508 } else {
1509 shost = lpfc_shost_from_vport(phba->pport);
1510 scsi_block_requests(shost);
1511 }
1512
1513 while (pring->txcmplq_cnt) {
1514 if (i++ > 500) /* wait up to 5 seconds */
1515 break;
1516 msleep(10);
1517 }
1518 return 0;
1519}
1520
1521/**
1522 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1523 * @phba: Pointer to HBA context object.
James Smart7ad20aa2011-05-24 11:44:28 -04001524 *
1525 * This function is responsible for driver exit processing of setting up
1526 * diag loopback mode on device.
1527 */
1528static void
1529lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1530{
1531 struct Scsi_Host *shost;
1532 struct lpfc_vport **vports;
1533 int i;
1534
1535 vports = lpfc_create_vport_work_array(phba);
1536 if (vports) {
1537 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1538 shost = lpfc_shost_from_vport(vports[i]);
1539 scsi_unblock_requests(shost);
1540 }
1541 lpfc_destroy_vport_work_array(phba, vports);
1542 } else {
1543 shost = lpfc_shost_from_vport(phba->pport);
1544 scsi_unblock_requests(shost);
1545 }
1546 return;
1547}
1548
1549/**
1550 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1551 * @phba: Pointer to HBA context object.
1552 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1553 *
1554 * This function is responsible for placing an sli3 port into diagnostic
1555 * loopback mode in order to perform a diagnostic loopback test.
James Smart3b5dd522010-01-26 23:10:15 -05001556 * All new scsi requests are blocked, a small delay is used to allow the
1557 * scsi requests to complete then the link is brought down. If the link is
1558 * is placed in loopback mode then scsi requests are again allowed
1559 * so the scsi mid-layer doesn't give up on the port.
1560 * All of this is done in-line.
1561 */
1562static int
James Smart7ad20aa2011-05-24 11:44:28 -04001563lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
James Smart3b5dd522010-01-26 23:10:15 -05001564{
James Smart3b5dd522010-01-26 23:10:15 -05001565 struct diag_mode_set *loopback_mode;
James Smart3b5dd522010-01-26 23:10:15 -05001566 uint32_t link_flags;
1567 uint32_t timeout;
James Smart3b5dd522010-01-26 23:10:15 -05001568 LPFC_MBOXQ_t *pmboxq;
1569 int mbxstatus;
1570 int i = 0;
1571 int rc = 0;
1572
1573 /* no data to return just the return code */
1574 job->reply->reply_payload_rcv_len = 0;
1575
James Smart7ad20aa2011-05-24 11:44:28 -04001576 if (job->request_len < sizeof(struct fc_bsg_request) +
1577 sizeof(struct diag_mode_set)) {
James Smart3b5dd522010-01-26 23:10:15 -05001578 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
James Smart7ad20aa2011-05-24 11:44:28 -04001579 "2738 Received DIAG MODE request size:%d "
1580 "below the minimum size:%d\n",
1581 job->request_len,
1582 (int)(sizeof(struct fc_bsg_request) +
1583 sizeof(struct diag_mode_set)));
James Smart3b5dd522010-01-26 23:10:15 -05001584 rc = -EINVAL;
1585 goto job_error;
1586 }
1587
James Smart88a2cfb2011-07-22 18:36:33 -04001588 rc = lpfc_bsg_diag_mode_enter(phba);
James Smart7ad20aa2011-05-24 11:44:28 -04001589 if (rc)
1590 goto job_error;
1591
1592 /* bring the link to diagnostic mode */
James Smart3b5dd522010-01-26 23:10:15 -05001593 loopback_mode = (struct diag_mode_set *)
1594 job->request->rqst_data.h_vendor.vendor_cmd;
1595 link_flags = loopback_mode->type;
James Smart515e0aa2010-09-29 11:19:00 -04001596 timeout = loopback_mode->timeout * 100;
James Smart3b5dd522010-01-26 23:10:15 -05001597
James Smart3b5dd522010-01-26 23:10:15 -05001598 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1599 if (!pmboxq) {
1600 rc = -ENOMEM;
James Smart7ad20aa2011-05-24 11:44:28 -04001601 goto loopback_mode_exit;
James Smart3b5dd522010-01-26 23:10:15 -05001602 }
James Smart3b5dd522010-01-26 23:10:15 -05001603 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1604 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1605 pmboxq->u.mb.mbxOwner = OWN_HOST;
1606
1607 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1608
1609 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1610 /* wait for link down before proceeding */
1611 i = 0;
1612 while (phba->link_state != LPFC_LINK_DOWN) {
1613 if (i++ > timeout) {
1614 rc = -ETIMEDOUT;
1615 goto loopback_mode_exit;
1616 }
1617
1618 msleep(10);
1619 }
1620
1621 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1622 if (link_flags == INTERNAL_LOOP_BACK)
1623 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1624 else
1625 pmboxq->u.mb.un.varInitLnk.link_flags =
1626 FLAGS_TOPOLOGY_MODE_LOOP;
1627
1628 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1629 pmboxq->u.mb.mbxOwner = OWN_HOST;
1630
1631 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1632 LPFC_MBOX_TMO);
1633
1634 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1635 rc = -ENODEV;
1636 else {
1637 phba->link_flag |= LS_LOOPBACK_MODE;
1638 /* wait for the link attention interrupt */
1639 msleep(100);
1640
1641 i = 0;
1642 while (phba->link_state != LPFC_HBA_READY) {
1643 if (i++ > timeout) {
1644 rc = -ETIMEDOUT;
1645 break;
1646 }
1647
1648 msleep(10);
1649 }
1650 }
1651
1652 } else
1653 rc = -ENODEV;
1654
1655loopback_mode_exit:
James Smart7ad20aa2011-05-24 11:44:28 -04001656 lpfc_bsg_diag_mode_exit(phba);
James Smart3b5dd522010-01-26 23:10:15 -05001657
1658 /*
1659 * Let SLI layer release mboxq if mbox command completed after timeout.
1660 */
1661 if (mbxstatus != MBX_TIMEOUT)
1662 mempool_free(pmboxq, phba->mbox_mem_pool);
1663
1664job_error:
1665 /* make error code available to userspace */
1666 job->reply->result = rc;
1667 /* complete the job back to userspace if no error */
1668 if (rc == 0)
1669 job->job_done(job);
1670 return rc;
1671}
1672
1673/**
James Smart7ad20aa2011-05-24 11:44:28 -04001674 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1675 * @phba: Pointer to HBA context object.
1676 * @diag: Flag for set link to diag or nomral operation state.
1677 *
1678 * This function is responsible for issuing a sli4 mailbox command for setting
1679 * link to either diag state or normal operation state.
1680 */
1681static int
1682lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1683{
1684 LPFC_MBOXQ_t *pmboxq;
1685 struct lpfc_mbx_set_link_diag_state *link_diag_state;
1686 uint32_t req_len, alloc_len;
1687 int mbxstatus = MBX_SUCCESS, rc;
1688
1689 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1690 if (!pmboxq)
1691 return -ENOMEM;
1692
1693 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1694 sizeof(struct lpfc_sli4_cfg_mhdr));
1695 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1696 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1697 req_len, LPFC_SLI4_MBX_EMBED);
1698 if (alloc_len != req_len) {
1699 rc = -ENOMEM;
1700 goto link_diag_state_set_out;
1701 }
1702 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1703 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1704 phba->sli4_hba.link_state.number);
1705 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1706 phba->sli4_hba.link_state.type);
1707 if (diag)
1708 bf_set(lpfc_mbx_set_diag_state_diag,
1709 &link_diag_state->u.req, 1);
1710 else
1711 bf_set(lpfc_mbx_set_diag_state_diag,
1712 &link_diag_state->u.req, 0);
1713
1714 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1715
1716 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1717 rc = 0;
1718 else
1719 rc = -ENODEV;
1720
1721link_diag_state_set_out:
1722 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1723 mempool_free(pmboxq, phba->mbox_mem_pool);
1724
1725 return rc;
1726}
1727
1728/**
1729 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
1730 * @phba: Pointer to HBA context object.
1731 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1732 *
1733 * This function is responsible for placing an sli4 port into diagnostic
1734 * loopback mode in order to perform a diagnostic loopback test.
1735 */
1736static int
1737lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1738{
1739 struct diag_mode_set *loopback_mode;
1740 uint32_t link_flags, timeout, req_len, alloc_len;
1741 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1742 LPFC_MBOXQ_t *pmboxq = NULL;
1743 int mbxstatus, i, rc = 0;
1744
1745 /* no data to return just the return code */
1746 job->reply->reply_payload_rcv_len = 0;
1747
1748 if (job->request_len < sizeof(struct fc_bsg_request) +
1749 sizeof(struct diag_mode_set)) {
1750 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1751 "3011 Received DIAG MODE request size:%d "
1752 "below the minimum size:%d\n",
1753 job->request_len,
1754 (int)(sizeof(struct fc_bsg_request) +
1755 sizeof(struct diag_mode_set)));
1756 rc = -EINVAL;
1757 goto job_error;
1758 }
1759
James Smart88a2cfb2011-07-22 18:36:33 -04001760 rc = lpfc_bsg_diag_mode_enter(phba);
James Smart7ad20aa2011-05-24 11:44:28 -04001761 if (rc)
1762 goto job_error;
1763
1764 /* bring the link to diagnostic mode */
1765 loopback_mode = (struct diag_mode_set *)
1766 job->request->rqst_data.h_vendor.vendor_cmd;
1767 link_flags = loopback_mode->type;
1768 timeout = loopback_mode->timeout * 100;
1769
1770 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1771 if (rc)
1772 goto loopback_mode_exit;
1773
1774 /* wait for link down before proceeding */
1775 i = 0;
1776 while (phba->link_state != LPFC_LINK_DOWN) {
1777 if (i++ > timeout) {
1778 rc = -ETIMEDOUT;
1779 goto loopback_mode_exit;
1780 }
1781 msleep(10);
1782 }
1783 /* set up loopback mode */
1784 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1785 if (!pmboxq) {
1786 rc = -ENOMEM;
1787 goto loopback_mode_exit;
1788 }
1789 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1790 sizeof(struct lpfc_sli4_cfg_mhdr));
1791 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1792 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1793 req_len, LPFC_SLI4_MBX_EMBED);
1794 if (alloc_len != req_len) {
1795 rc = -ENOMEM;
1796 goto loopback_mode_exit;
1797 }
1798 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1799 bf_set(lpfc_mbx_set_diag_state_link_num,
1800 &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
1801 bf_set(lpfc_mbx_set_diag_state_link_type,
1802 &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
1803 if (link_flags == INTERNAL_LOOP_BACK)
1804 bf_set(lpfc_mbx_set_diag_lpbk_type,
1805 &link_diag_loopback->u.req,
1806 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
1807 else
1808 bf_set(lpfc_mbx_set_diag_lpbk_type,
1809 &link_diag_loopback->u.req,
1810 LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
1811
1812 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1813 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1814 rc = -ENODEV;
1815 else {
1816 phba->link_flag |= LS_LOOPBACK_MODE;
1817 /* wait for the link attention interrupt */
1818 msleep(100);
1819 i = 0;
1820 while (phba->link_state != LPFC_HBA_READY) {
1821 if (i++ > timeout) {
1822 rc = -ETIMEDOUT;
1823 break;
1824 }
1825 msleep(10);
1826 }
1827 }
1828
1829loopback_mode_exit:
1830 lpfc_bsg_diag_mode_exit(phba);
1831
1832 /*
1833 * Let SLI layer release mboxq if mbox command completed after timeout.
1834 */
1835 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1836 mempool_free(pmboxq, phba->mbox_mem_pool);
1837
1838job_error:
1839 /* make error code available to userspace */
1840 job->reply->result = rc;
1841 /* complete the job back to userspace if no error */
1842 if (rc == 0)
1843 job->job_done(job);
1844 return rc;
1845}
1846
1847/**
1848 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
1849 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1850 *
1851 * This function is responsible for responding to check and dispatch bsg diag
1852 * command from the user to proper driver action routines.
1853 */
1854static int
1855lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
1856{
1857 struct Scsi_Host *shost;
1858 struct lpfc_vport *vport;
1859 struct lpfc_hba *phba;
1860 int rc;
1861
1862 shost = job->shost;
1863 if (!shost)
1864 return -ENODEV;
1865 vport = (struct lpfc_vport *)job->shost->hostdata;
1866 if (!vport)
1867 return -ENODEV;
1868 phba = vport->phba;
1869 if (!phba)
1870 return -ENODEV;
1871
1872 if (phba->sli_rev < LPFC_SLI_REV4)
1873 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
1874 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1875 LPFC_SLI_INTF_IF_TYPE_2)
1876 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
1877 else
1878 rc = -ENODEV;
1879
1880 return rc;
1881
1882}
1883
1884/**
1885 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
1886 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
1887 *
1888 * This function is responsible for responding to check and dispatch bsg diag
1889 * command from the user to proper driver action routines.
1890 */
1891static int
1892lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
1893{
1894 struct Scsi_Host *shost;
1895 struct lpfc_vport *vport;
1896 struct lpfc_hba *phba;
1897 int rc;
1898
1899 shost = job->shost;
1900 if (!shost)
1901 return -ENODEV;
1902 vport = (struct lpfc_vport *)job->shost->hostdata;
1903 if (!vport)
1904 return -ENODEV;
1905 phba = vport->phba;
1906 if (!phba)
1907 return -ENODEV;
1908
1909 if (phba->sli_rev < LPFC_SLI_REV4)
1910 return -ENODEV;
1911 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1912 LPFC_SLI_INTF_IF_TYPE_2)
1913 return -ENODEV;
1914
1915 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
1916
1917 if (!rc)
1918 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1919
1920 return rc;
1921}
1922
1923/**
1924 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
1925 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
1926 *
1927 * This function is to perform SLI4 diag link test request from the user
1928 * applicaiton.
1929 */
1930static int
1931lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
1932{
1933 struct Scsi_Host *shost;
1934 struct lpfc_vport *vport;
1935 struct lpfc_hba *phba;
1936 LPFC_MBOXQ_t *pmboxq;
1937 struct sli4_link_diag *link_diag_test_cmd;
1938 uint32_t req_len, alloc_len;
1939 uint32_t timeout;
1940 struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
1941 union lpfc_sli4_cfg_shdr *shdr;
1942 uint32_t shdr_status, shdr_add_status;
1943 struct diag_status *diag_status_reply;
1944 int mbxstatus, rc = 0;
1945
1946 shost = job->shost;
1947 if (!shost) {
1948 rc = -ENODEV;
1949 goto job_error;
1950 }
1951 vport = (struct lpfc_vport *)job->shost->hostdata;
1952 if (!vport) {
1953 rc = -ENODEV;
1954 goto job_error;
1955 }
1956 phba = vport->phba;
1957 if (!phba) {
1958 rc = -ENODEV;
1959 goto job_error;
1960 }
1961
1962 if (phba->sli_rev < LPFC_SLI_REV4) {
1963 rc = -ENODEV;
1964 goto job_error;
1965 }
1966 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1967 LPFC_SLI_INTF_IF_TYPE_2) {
1968 rc = -ENODEV;
1969 goto job_error;
1970 }
1971
1972 if (job->request_len < sizeof(struct fc_bsg_request) +
1973 sizeof(struct sli4_link_diag)) {
1974 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1975 "3013 Received LINK DIAG TEST request "
1976 " size:%d below the minimum size:%d\n",
1977 job->request_len,
1978 (int)(sizeof(struct fc_bsg_request) +
1979 sizeof(struct sli4_link_diag)));
1980 rc = -EINVAL;
1981 goto job_error;
1982 }
1983
James Smart88a2cfb2011-07-22 18:36:33 -04001984 rc = lpfc_bsg_diag_mode_enter(phba);
James Smart7ad20aa2011-05-24 11:44:28 -04001985 if (rc)
1986 goto job_error;
1987
1988 link_diag_test_cmd = (struct sli4_link_diag *)
1989 job->request->rqst_data.h_vendor.vendor_cmd;
1990 timeout = link_diag_test_cmd->timeout * 100;
1991
1992 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1993
1994 if (rc)
1995 goto job_error;
1996
1997 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1998 if (!pmboxq) {
1999 rc = -ENOMEM;
2000 goto link_diag_test_exit;
2001 }
2002
2003 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2004 sizeof(struct lpfc_sli4_cfg_mhdr));
2005 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2006 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2007 req_len, LPFC_SLI4_MBX_EMBED);
2008 if (alloc_len != req_len) {
2009 rc = -ENOMEM;
2010 goto link_diag_test_exit;
2011 }
2012 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2013 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2014 phba->sli4_hba.link_state.number);
2015 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2016 phba->sli4_hba.link_state.type);
2017 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2018 link_diag_test_cmd->test_id);
2019 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2020 link_diag_test_cmd->loops);
2021 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2022 link_diag_test_cmd->test_version);
2023 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2024 link_diag_test_cmd->error_action);
2025
2026 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2027
2028 shdr = (union lpfc_sli4_cfg_shdr *)
2029 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2030 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2031 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2032 if (shdr_status || shdr_add_status || mbxstatus) {
2033 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2034 "3010 Run link diag test mailbox failed with "
2035 "mbx_status x%x status x%x, add_status x%x\n",
2036 mbxstatus, shdr_status, shdr_add_status);
2037 }
2038
2039 diag_status_reply = (struct diag_status *)
2040 job->reply->reply_data.vendor_reply.vendor_rsp;
2041
2042 if (job->reply_len <
2043 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
2044 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2045 "3012 Received Run link diag test reply "
2046 "below minimum size (%d): reply_len:%d\n",
2047 (int)(sizeof(struct fc_bsg_request) +
2048 sizeof(struct diag_status)),
2049 job->reply_len);
2050 rc = -EINVAL;
2051 goto job_error;
2052 }
2053
2054 diag_status_reply->mbox_status = mbxstatus;
2055 diag_status_reply->shdr_status = shdr_status;
2056 diag_status_reply->shdr_add_status = shdr_add_status;
2057
2058link_diag_test_exit:
2059 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2060
2061 if (pmboxq)
2062 mempool_free(pmboxq, phba->mbox_mem_pool);
2063
2064 lpfc_bsg_diag_mode_exit(phba);
2065
2066job_error:
2067 /* make error code available to userspace */
2068 job->reply->result = rc;
2069 /* complete the job back to userspace if no error */
2070 if (rc == 0)
2071 job->job_done(job);
2072 return rc;
2073}
2074
2075/**
James Smart3b5dd522010-01-26 23:10:15 -05002076 * lpfcdiag_loop_self_reg - obtains a remote port login id
2077 * @phba: Pointer to HBA context object
2078 * @rpi: Pointer to a remote port login id
2079 *
2080 * This function obtains a remote port login id so the diag loopback test
2081 * can send and receive its own unsolicited CT command.
2082 **/
James Smart40426292010-12-15 17:58:10 -05002083static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
James Smart3b5dd522010-01-26 23:10:15 -05002084{
2085 LPFC_MBOXQ_t *mbox;
2086 struct lpfc_dmabuf *dmabuff;
2087 int status;
2088
2089 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2090 if (!mbox)
James Smartd439d282010-09-29 11:18:45 -04002091 return -ENOMEM;
James Smart3b5dd522010-01-26 23:10:15 -05002092
James Smart40426292010-12-15 17:58:10 -05002093 if (phba->sli_rev == LPFC_SLI_REV4)
2094 *rpi = lpfc_sli4_alloc_rpi(phba);
James Smart3b5dd522010-01-26 23:10:15 -05002095 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
James Smart40426292010-12-15 17:58:10 -05002096 (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi);
James Smart3b5dd522010-01-26 23:10:15 -05002097 if (status) {
2098 mempool_free(mbox, phba->mbox_mem_pool);
James Smart40426292010-12-15 17:58:10 -05002099 if (phba->sli_rev == LPFC_SLI_REV4)
2100 lpfc_sli4_free_rpi(phba, *rpi);
James Smartd439d282010-09-29 11:18:45 -04002101 return -ENOMEM;
James Smart3b5dd522010-01-26 23:10:15 -05002102 }
2103
2104 dmabuff = (struct lpfc_dmabuf *) mbox->context1;
2105 mbox->context1 = NULL;
James Smartd439d282010-09-29 11:18:45 -04002106 mbox->context2 = NULL;
James Smart3b5dd522010-01-26 23:10:15 -05002107 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2108
2109 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2110 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2111 kfree(dmabuff);
2112 if (status != MBX_TIMEOUT)
2113 mempool_free(mbox, phba->mbox_mem_pool);
James Smart40426292010-12-15 17:58:10 -05002114 if (phba->sli_rev == LPFC_SLI_REV4)
2115 lpfc_sli4_free_rpi(phba, *rpi);
James Smartd439d282010-09-29 11:18:45 -04002116 return -ENODEV;
James Smart3b5dd522010-01-26 23:10:15 -05002117 }
2118
2119 *rpi = mbox->u.mb.un.varWords[0];
2120
2121 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2122 kfree(dmabuff);
2123 mempool_free(mbox, phba->mbox_mem_pool);
2124 return 0;
2125}
2126
2127/**
2128 * lpfcdiag_loop_self_unreg - unregs from the rpi
2129 * @phba: Pointer to HBA context object
2130 * @rpi: Remote port login id
2131 *
2132 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2133 **/
2134static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2135{
2136 LPFC_MBOXQ_t *mbox;
2137 int status;
2138
2139 /* Allocate mboxq structure */
2140 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2141 if (mbox == NULL)
James Smartd439d282010-09-29 11:18:45 -04002142 return -ENOMEM;
James Smart3b5dd522010-01-26 23:10:15 -05002143
2144 lpfc_unreg_login(phba, 0, rpi, mbox);
2145 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2146
2147 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2148 if (status != MBX_TIMEOUT)
2149 mempool_free(mbox, phba->mbox_mem_pool);
James Smartd439d282010-09-29 11:18:45 -04002150 return -EIO;
James Smart3b5dd522010-01-26 23:10:15 -05002151 }
James Smart3b5dd522010-01-26 23:10:15 -05002152 mempool_free(mbox, phba->mbox_mem_pool);
James Smart40426292010-12-15 17:58:10 -05002153 if (phba->sli_rev == LPFC_SLI_REV4)
2154 lpfc_sli4_free_rpi(phba, rpi);
James Smart3b5dd522010-01-26 23:10:15 -05002155 return 0;
2156}
2157
2158/**
2159 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2160 * @phba: Pointer to HBA context object
2161 * @rpi: Remote port login id
2162 * @txxri: Pointer to transmit exchange id
2163 * @rxxri: Pointer to response exchabge id
2164 *
2165 * This function obtains the transmit and receive ids required to send
2166 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2167 * flags are used to the unsolicted response handler is able to process
2168 * the ct command sent on the same port.
2169 **/
2170static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2171 uint16_t *txxri, uint16_t * rxxri)
2172{
2173 struct lpfc_bsg_event *evt;
2174 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2175 IOCB_t *cmd, *rsp;
2176 struct lpfc_dmabuf *dmabuf;
2177 struct ulp_bde64 *bpl = NULL;
2178 struct lpfc_sli_ct_request *ctreq = NULL;
2179 int ret_val = 0;
James Smartd439d282010-09-29 11:18:45 -04002180 int time_left;
James Smart515e0aa2010-09-29 11:19:00 -04002181 int iocb_stat = 0;
James Smart3b5dd522010-01-26 23:10:15 -05002182 unsigned long flags;
2183
2184 *txxri = 0;
2185 *rxxri = 0;
2186 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2187 SLI_CT_ELX_LOOPBACK);
2188 if (!evt)
James Smartd439d282010-09-29 11:18:45 -04002189 return -ENOMEM;
James Smart3b5dd522010-01-26 23:10:15 -05002190
2191 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2192 list_add(&evt->node, &phba->ct_ev_waiters);
2193 lpfc_bsg_event_ref(evt);
2194 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2195
2196 cmdiocbq = lpfc_sli_get_iocbq(phba);
2197 rspiocbq = lpfc_sli_get_iocbq(phba);
2198
2199 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2200 if (dmabuf) {
2201 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
James Smartc7495932010-04-06 15:05:28 -04002202 if (dmabuf->virt) {
2203 INIT_LIST_HEAD(&dmabuf->list);
2204 bpl = (struct ulp_bde64 *) dmabuf->virt;
2205 memset(bpl, 0, sizeof(*bpl));
2206 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2207 bpl->addrHigh =
2208 le32_to_cpu(putPaddrHigh(dmabuf->phys +
2209 sizeof(*bpl)));
2210 bpl->addrLow =
2211 le32_to_cpu(putPaddrLow(dmabuf->phys +
2212 sizeof(*bpl)));
2213 bpl->tus.f.bdeFlags = 0;
2214 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2215 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2216 }
James Smart3b5dd522010-01-26 23:10:15 -05002217 }
2218
2219 if (cmdiocbq == NULL || rspiocbq == NULL ||
James Smartc7495932010-04-06 15:05:28 -04002220 dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2221 dmabuf->virt == NULL) {
James Smartd439d282010-09-29 11:18:45 -04002222 ret_val = -ENOMEM;
James Smart3b5dd522010-01-26 23:10:15 -05002223 goto err_get_xri_exit;
2224 }
2225
2226 cmd = &cmdiocbq->iocb;
2227 rsp = &rspiocbq->iocb;
2228
2229 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2230
2231 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2232 ctreq->RevisionId.bits.InId = 0;
2233 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2234 ctreq->FsSubType = 0;
2235 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2236 ctreq->CommandResponse.bits.Size = 0;
2237
2238
2239 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
2240 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
2241 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2242 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
2243
2244 cmd->un.xseq64.w5.hcsw.Fctl = LA;
2245 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2246 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2247 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2248
2249 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2250 cmd->ulpBdeCount = 1;
2251 cmd->ulpLe = 1;
2252 cmd->ulpClass = CLASS3;
2253 cmd->ulpContext = rpi;
2254
2255 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2256 cmdiocbq->vport = phba->pport;
2257
James Smartd439d282010-09-29 11:18:45 -04002258 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
James Smart3b5dd522010-01-26 23:10:15 -05002259 rspiocbq,
2260 (phba->fc_ratov * 2)
2261 + LPFC_DRVR_TIMEOUT);
James Smartd439d282010-09-29 11:18:45 -04002262 if (iocb_stat) {
2263 ret_val = -EIO;
James Smart3b5dd522010-01-26 23:10:15 -05002264 goto err_get_xri_exit;
James Smartd439d282010-09-29 11:18:45 -04002265 }
James Smart3b5dd522010-01-26 23:10:15 -05002266 *txxri = rsp->ulpContext;
2267
2268 evt->waiting = 1;
2269 evt->wait_time_stamp = jiffies;
James Smartd439d282010-09-29 11:18:45 -04002270 time_left = wait_event_interruptible_timeout(
James Smart3b5dd522010-01-26 23:10:15 -05002271 evt->wq, !list_empty(&evt->events_to_see),
2272 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2273 if (list_empty(&evt->events_to_see))
James Smartd439d282010-09-29 11:18:45 -04002274 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
James Smart3b5dd522010-01-26 23:10:15 -05002275 else {
James Smart3b5dd522010-01-26 23:10:15 -05002276 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2277 list_move(evt->events_to_see.prev, &evt->events_to_get);
2278 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2279 *rxxri = (list_entry(evt->events_to_get.prev,
2280 typeof(struct event_data),
2281 node))->immed_dat;
2282 }
2283 evt->waiting = 0;
2284
2285err_get_xri_exit:
2286 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2287 lpfc_bsg_event_unref(evt); /* release ref */
2288 lpfc_bsg_event_unref(evt); /* delete */
2289 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2290
2291 if (dmabuf) {
2292 if (dmabuf->virt)
2293 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2294 kfree(dmabuf);
2295 }
2296
James Smartd439d282010-09-29 11:18:45 -04002297 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
James Smart3b5dd522010-01-26 23:10:15 -05002298 lpfc_sli_release_iocbq(phba, cmdiocbq);
2299 if (rspiocbq)
2300 lpfc_sli_release_iocbq(phba, rspiocbq);
2301 return ret_val;
2302}
2303
2304/**
James Smart7ad20aa2011-05-24 11:44:28 -04002305 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2306 * @phba: Pointer to HBA context object
2307 *
2308 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
2309 * retruns the pointer to the buffer.
2310 **/
2311static struct lpfc_dmabuf *
2312lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2313{
2314 struct lpfc_dmabuf *dmabuf;
2315 struct pci_dev *pcidev = phba->pcidev;
2316
2317 /* allocate dma buffer struct */
2318 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2319 if (!dmabuf)
2320 return NULL;
2321
2322 INIT_LIST_HEAD(&dmabuf->list);
2323
2324 /* now, allocate dma buffer */
2325 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2326 &(dmabuf->phys), GFP_KERNEL);
2327
2328 if (!dmabuf->virt) {
2329 kfree(dmabuf);
2330 return NULL;
2331 }
2332 memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
2333
2334 return dmabuf;
2335}
2336
2337/**
2338 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2339 * @phba: Pointer to HBA context object.
2340 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2341 *
2342 * This routine just simply frees a dma buffer and its associated buffer
2343 * descriptor referred by @dmabuf.
2344 **/
2345static void
2346lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2347{
2348 struct pci_dev *pcidev = phba->pcidev;
2349
2350 if (!dmabuf)
2351 return;
2352
2353 if (dmabuf->virt)
2354 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2355 dmabuf->virt, dmabuf->phys);
2356 kfree(dmabuf);
2357 return;
2358}
2359
2360/**
2361 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2362 * @phba: Pointer to HBA context object.
2363 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2364 *
2365 * This routine just simply frees all dma buffers and their associated buffer
2366 * descriptors referred by @dmabuf_list.
2367 **/
2368static void
2369lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2370 struct list_head *dmabuf_list)
2371{
2372 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2373
2374 if (list_empty(dmabuf_list))
2375 return;
2376
2377 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2378 list_del_init(&dmabuf->list);
2379 lpfc_bsg_dma_page_free(phba, dmabuf);
2380 }
2381 return;
2382}
2383
2384/**
James Smart3b5dd522010-01-26 23:10:15 -05002385 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2386 * @phba: Pointer to HBA context object
2387 * @bpl: Pointer to 64 bit bde structure
2388 * @size: Number of bytes to process
2389 * @nocopydata: Flag to copy user data into the allocated buffer
2390 *
2391 * This function allocates page size buffers and populates an lpfc_dmabufext.
2392 * If allowed the user data pointed to with indataptr is copied into the kernel
2393 * memory. The chained list of page size buffers is returned.
2394 **/
2395static struct lpfc_dmabufext *
2396diag_cmd_data_alloc(struct lpfc_hba *phba,
2397 struct ulp_bde64 *bpl, uint32_t size,
2398 int nocopydata)
2399{
2400 struct lpfc_dmabufext *mlist = NULL;
2401 struct lpfc_dmabufext *dmp;
2402 int cnt, offset = 0, i = 0;
2403 struct pci_dev *pcidev;
2404
2405 pcidev = phba->pcidev;
2406
2407 while (size) {
2408 /* We get chunks of 4K */
2409 if (size > BUF_SZ_4K)
2410 cnt = BUF_SZ_4K;
2411 else
2412 cnt = size;
2413
2414 /* allocate struct lpfc_dmabufext buffer header */
2415 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2416 if (!dmp)
2417 goto out;
2418
2419 INIT_LIST_HEAD(&dmp->dma.list);
2420
2421 /* Queue it to a linked list */
2422 if (mlist)
2423 list_add_tail(&dmp->dma.list, &mlist->dma.list);
2424 else
2425 mlist = dmp;
2426
2427 /* allocate buffer */
2428 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2429 cnt,
2430 &(dmp->dma.phys),
2431 GFP_KERNEL);
2432
2433 if (!dmp->dma.virt)
2434 goto out;
2435
2436 dmp->size = cnt;
2437
2438 if (nocopydata) {
2439 bpl->tus.f.bdeFlags = 0;
2440 pci_dma_sync_single_for_device(phba->pcidev,
2441 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
2442
2443 } else {
2444 memset((uint8_t *)dmp->dma.virt, 0, cnt);
2445 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2446 }
2447
2448 /* build buffer ptr list for IOCB */
2449 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2450 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2451 bpl->tus.f.bdeSize = (ushort) cnt;
2452 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2453 bpl++;
2454
2455 i++;
2456 offset += cnt;
2457 size -= cnt;
2458 }
2459
2460 mlist->flag = i;
2461 return mlist;
2462out:
2463 diag_cmd_data_free(phba, mlist);
2464 return NULL;
2465}
2466
2467/**
2468 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2469 * @phba: Pointer to HBA context object
2470 * @rxxri: Receive exchange id
2471 * @len: Number of data bytes
2472 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002473 * This function allocates and posts a data buffer of sufficient size to receive
James Smart3b5dd522010-01-26 23:10:15 -05002474 * an unsolicted CT command.
2475 **/
2476static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2477 size_t len)
2478{
2479 struct lpfc_sli *psli = &phba->sli;
2480 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2481 struct lpfc_iocbq *cmdiocbq;
2482 IOCB_t *cmd = NULL;
2483 struct list_head head, *curr, *next;
2484 struct lpfc_dmabuf *rxbmp;
2485 struct lpfc_dmabuf *dmp;
2486 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2487 struct ulp_bde64 *rxbpl = NULL;
2488 uint32_t num_bde;
2489 struct lpfc_dmabufext *rxbuffer = NULL;
2490 int ret_val = 0;
James Smartd439d282010-09-29 11:18:45 -04002491 int iocb_stat;
James Smart3b5dd522010-01-26 23:10:15 -05002492 int i = 0;
2493
2494 cmdiocbq = lpfc_sli_get_iocbq(phba);
2495 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2496 if (rxbmp != NULL) {
2497 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
James Smartc7495932010-04-06 15:05:28 -04002498 if (rxbmp->virt) {
2499 INIT_LIST_HEAD(&rxbmp->list);
2500 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2501 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2502 }
James Smart3b5dd522010-01-26 23:10:15 -05002503 }
2504
2505 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
James Smartd439d282010-09-29 11:18:45 -04002506 ret_val = -ENOMEM;
James Smart3b5dd522010-01-26 23:10:15 -05002507 goto err_post_rxbufs_exit;
2508 }
2509
2510 /* Queue buffers for the receive exchange */
2511 num_bde = (uint32_t)rxbuffer->flag;
2512 dmp = &rxbuffer->dma;
2513
2514 cmd = &cmdiocbq->iocb;
2515 i = 0;
2516
2517 INIT_LIST_HEAD(&head);
2518 list_add_tail(&head, &dmp->list);
2519 list_for_each_safe(curr, next, &head) {
2520 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2521 list_del(curr);
2522
2523 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2524 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2525 cmd->un.quexri64cx.buff.bde.addrHigh =
2526 putPaddrHigh(mp[i]->phys);
2527 cmd->un.quexri64cx.buff.bde.addrLow =
2528 putPaddrLow(mp[i]->phys);
2529 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2530 ((struct lpfc_dmabufext *)mp[i])->size;
2531 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2532 cmd->ulpCommand = CMD_QUE_XRI64_CX;
2533 cmd->ulpPU = 0;
2534 cmd->ulpLe = 1;
2535 cmd->ulpBdeCount = 1;
2536 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2537
2538 } else {
2539 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2540 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2541 cmd->un.cont64[i].tus.f.bdeSize =
2542 ((struct lpfc_dmabufext *)mp[i])->size;
2543 cmd->ulpBdeCount = ++i;
2544
2545 if ((--num_bde > 0) && (i < 2))
2546 continue;
2547
2548 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
2549 cmd->ulpLe = 1;
2550 }
2551
2552 cmd->ulpClass = CLASS3;
2553 cmd->ulpContext = rxxri;
2554
James Smartd439d282010-09-29 11:18:45 -04002555 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2556 0);
2557 if (iocb_stat == IOCB_ERROR) {
James Smart3b5dd522010-01-26 23:10:15 -05002558 diag_cmd_data_free(phba,
2559 (struct lpfc_dmabufext *)mp[0]);
2560 if (mp[1])
2561 diag_cmd_data_free(phba,
2562 (struct lpfc_dmabufext *)mp[1]);
2563 dmp = list_entry(next, struct lpfc_dmabuf, list);
James Smartd439d282010-09-29 11:18:45 -04002564 ret_val = -EIO;
James Smart3b5dd522010-01-26 23:10:15 -05002565 goto err_post_rxbufs_exit;
2566 }
2567
2568 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2569 if (mp[1]) {
2570 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2571 mp[1] = NULL;
2572 }
2573
2574 /* The iocb was freed by lpfc_sli_issue_iocb */
2575 cmdiocbq = lpfc_sli_get_iocbq(phba);
2576 if (!cmdiocbq) {
2577 dmp = list_entry(next, struct lpfc_dmabuf, list);
James Smartd439d282010-09-29 11:18:45 -04002578 ret_val = -EIO;
James Smart3b5dd522010-01-26 23:10:15 -05002579 goto err_post_rxbufs_exit;
2580 }
2581
2582 cmd = &cmdiocbq->iocb;
2583 i = 0;
2584 }
2585 list_del(&head);
2586
2587err_post_rxbufs_exit:
2588
2589 if (rxbmp) {
2590 if (rxbmp->virt)
2591 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2592 kfree(rxbmp);
2593 }
2594
2595 if (cmdiocbq)
2596 lpfc_sli_release_iocbq(phba, cmdiocbq);
2597 return ret_val;
2598}
2599
2600/**
James Smart7ad20aa2011-05-24 11:44:28 -04002601 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
James Smart3b5dd522010-01-26 23:10:15 -05002602 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2603 *
2604 * This function receives a user data buffer to be transmitted and received on
2605 * the same port, the link must be up and in loopback mode prior
2606 * to being called.
2607 * 1. A kernel buffer is allocated to copy the user data into.
2608 * 2. The port registers with "itself".
2609 * 3. The transmit and receive exchange ids are obtained.
2610 * 4. The receive exchange id is posted.
2611 * 5. A new els loopback event is created.
2612 * 6. The command and response iocbs are allocated.
2613 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2614 *
2615 * This function is meant to be called n times while the port is in loopback
2616 * so it is the apps responsibility to issue a reset to take the port out
2617 * of loopback mode.
2618 **/
2619static int
James Smart7ad20aa2011-05-24 11:44:28 -04002620lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
James Smart3b5dd522010-01-26 23:10:15 -05002621{
2622 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2623 struct lpfc_hba *phba = vport->phba;
2624 struct diag_mode_test *diag_mode;
2625 struct lpfc_bsg_event *evt;
2626 struct event_data *evdat;
2627 struct lpfc_sli *psli = &phba->sli;
2628 uint32_t size;
2629 uint32_t full_size;
2630 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
James Smart40426292010-12-15 17:58:10 -05002631 uint16_t rpi = 0;
James Smart3b5dd522010-01-26 23:10:15 -05002632 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2633 IOCB_t *cmd, *rsp;
2634 struct lpfc_sli_ct_request *ctreq;
2635 struct lpfc_dmabuf *txbmp;
2636 struct ulp_bde64 *txbpl = NULL;
2637 struct lpfc_dmabufext *txbuffer = NULL;
2638 struct list_head head;
2639 struct lpfc_dmabuf *curr;
2640 uint16_t txxri, rxxri;
2641 uint32_t num_bde;
2642 uint8_t *ptr = NULL, *rx_databuf = NULL;
2643 int rc = 0;
James Smartd439d282010-09-29 11:18:45 -04002644 int time_left;
2645 int iocb_stat;
James Smart3b5dd522010-01-26 23:10:15 -05002646 unsigned long flags;
2647 void *dataout = NULL;
2648 uint32_t total_mem;
2649
2650 /* in case no data is returned return just the return code */
2651 job->reply->reply_payload_rcv_len = 0;
2652
2653 if (job->request_len <
2654 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2655 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2656 "2739 Received DIAG TEST request below minimum "
2657 "size\n");
2658 rc = -EINVAL;
2659 goto loopback_test_exit;
2660 }
2661
2662 if (job->request_payload.payload_len !=
2663 job->reply_payload.payload_len) {
2664 rc = -EINVAL;
2665 goto loopback_test_exit;
2666 }
2667
2668 diag_mode = (struct diag_mode_test *)
2669 job->request->rqst_data.h_vendor.vendor_cmd;
2670
2671 if ((phba->link_state == LPFC_HBA_ERROR) ||
2672 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2673 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2674 rc = -EACCES;
2675 goto loopback_test_exit;
2676 }
2677
2678 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2679 rc = -EACCES;
2680 goto loopback_test_exit;
2681 }
2682
2683 size = job->request_payload.payload_len;
2684 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2685
2686 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2687 rc = -ERANGE;
2688 goto loopback_test_exit;
2689 }
2690
James Smart63e801c2010-11-20 23:14:19 -05002691 if (full_size >= BUF_SZ_4K) {
James Smart3b5dd522010-01-26 23:10:15 -05002692 /*
2693 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2694 * then we allocate 64k and re-use that buffer over and over to
2695 * xfer the whole block. This is because Linux kernel has a
2696 * problem allocating more than 120k of kernel space memory. Saw
2697 * problem with GET_FCPTARGETMAPPING...
2698 */
2699 if (size <= (64 * 1024))
James Smart63e801c2010-11-20 23:14:19 -05002700 total_mem = full_size;
James Smart3b5dd522010-01-26 23:10:15 -05002701 else
2702 total_mem = 64 * 1024;
2703 } else
2704 /* Allocate memory for ioctl data */
2705 total_mem = BUF_SZ_4K;
2706
2707 dataout = kmalloc(total_mem, GFP_KERNEL);
2708 if (dataout == NULL) {
2709 rc = -ENOMEM;
2710 goto loopback_test_exit;
2711 }
2712
2713 ptr = dataout;
2714 ptr += ELX_LOOPBACK_HEADER_SZ;
2715 sg_copy_to_buffer(job->request_payload.sg_list,
2716 job->request_payload.sg_cnt,
2717 ptr, size);
James Smart3b5dd522010-01-26 23:10:15 -05002718 rc = lpfcdiag_loop_self_reg(phba, &rpi);
James Smartd439d282010-09-29 11:18:45 -04002719 if (rc)
James Smart3b5dd522010-01-26 23:10:15 -05002720 goto loopback_test_exit;
James Smart3b5dd522010-01-26 23:10:15 -05002721
2722 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2723 if (rc) {
2724 lpfcdiag_loop_self_unreg(phba, rpi);
James Smart3b5dd522010-01-26 23:10:15 -05002725 goto loopback_test_exit;
2726 }
2727
2728 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2729 if (rc) {
2730 lpfcdiag_loop_self_unreg(phba, rpi);
James Smart3b5dd522010-01-26 23:10:15 -05002731 goto loopback_test_exit;
2732 }
2733
2734 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2735 SLI_CT_ELX_LOOPBACK);
2736 if (!evt) {
2737 lpfcdiag_loop_self_unreg(phba, rpi);
2738 rc = -ENOMEM;
2739 goto loopback_test_exit;
2740 }
2741
2742 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2743 list_add(&evt->node, &phba->ct_ev_waiters);
2744 lpfc_bsg_event_ref(evt);
2745 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2746
2747 cmdiocbq = lpfc_sli_get_iocbq(phba);
2748 rspiocbq = lpfc_sli_get_iocbq(phba);
2749 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2750
2751 if (txbmp) {
2752 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
James Smartc7495932010-04-06 15:05:28 -04002753 if (txbmp->virt) {
2754 INIT_LIST_HEAD(&txbmp->list);
2755 txbpl = (struct ulp_bde64 *) txbmp->virt;
James Smart3b5dd522010-01-26 23:10:15 -05002756 txbuffer = diag_cmd_data_alloc(phba,
2757 txbpl, full_size, 0);
James Smartc7495932010-04-06 15:05:28 -04002758 }
James Smart3b5dd522010-01-26 23:10:15 -05002759 }
2760
James Smartc7495932010-04-06 15:05:28 -04002761 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
2762 !txbmp->virt) {
James Smart3b5dd522010-01-26 23:10:15 -05002763 rc = -ENOMEM;
2764 goto err_loopback_test_exit;
2765 }
2766
2767 cmd = &cmdiocbq->iocb;
2768 rsp = &rspiocbq->iocb;
2769
2770 INIT_LIST_HEAD(&head);
2771 list_add_tail(&head, &txbuffer->dma.list);
2772 list_for_each_entry(curr, &head, list) {
2773 segment_len = ((struct lpfc_dmabufext *)curr)->size;
2774 if (current_offset == 0) {
2775 ctreq = curr->virt;
2776 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2777 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2778 ctreq->RevisionId.bits.InId = 0;
2779 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2780 ctreq->FsSubType = 0;
2781 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
2782 ctreq->CommandResponse.bits.Size = size;
2783 segment_offset = ELX_LOOPBACK_HEADER_SZ;
2784 } else
2785 segment_offset = 0;
2786
2787 BUG_ON(segment_offset >= segment_len);
2788 memcpy(curr->virt + segment_offset,
2789 ptr + current_offset,
2790 segment_len - segment_offset);
2791
2792 current_offset += segment_len - segment_offset;
2793 BUG_ON(current_offset > size);
2794 }
2795 list_del(&head);
2796
2797 /* Build the XMIT_SEQUENCE iocb */
2798
2799 num_bde = (uint32_t)txbuffer->flag;
2800
2801 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
2802 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
2803 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2804 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
2805
2806 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
2807 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2808 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2809 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2810
2811 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
2812 cmd->ulpBdeCount = 1;
2813 cmd->ulpLe = 1;
2814 cmd->ulpClass = CLASS3;
2815 cmd->ulpContext = txxri;
2816
2817 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2818 cmdiocbq->vport = phba->pport;
2819
James Smartd439d282010-09-29 11:18:45 -04002820 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2821 rspiocbq, (phba->fc_ratov * 2) +
2822 LPFC_DRVR_TIMEOUT);
James Smart3b5dd522010-01-26 23:10:15 -05002823
James Smartd439d282010-09-29 11:18:45 -04002824 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
James Smart3b5dd522010-01-26 23:10:15 -05002825 rc = -EIO;
2826 goto err_loopback_test_exit;
2827 }
2828
2829 evt->waiting = 1;
James Smartd439d282010-09-29 11:18:45 -04002830 time_left = wait_event_interruptible_timeout(
James Smart3b5dd522010-01-26 23:10:15 -05002831 evt->wq, !list_empty(&evt->events_to_see),
2832 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2833 evt->waiting = 0;
2834 if (list_empty(&evt->events_to_see))
James Smartd439d282010-09-29 11:18:45 -04002835 rc = (time_left) ? -EINTR : -ETIMEDOUT;
James Smart3b5dd522010-01-26 23:10:15 -05002836 else {
2837 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2838 list_move(evt->events_to_see.prev, &evt->events_to_get);
2839 evdat = list_entry(evt->events_to_get.prev,
2840 typeof(*evdat), node);
2841 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2842 rx_databuf = evdat->data;
2843 if (evdat->len != full_size) {
2844 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2845 "1603 Loopback test did not receive expected "
2846 "data length. actual length 0x%x expected "
2847 "length 0x%x\n",
2848 evdat->len, full_size);
2849 rc = -EIO;
2850 } else if (rx_databuf == NULL)
2851 rc = -EIO;
2852 else {
2853 rc = IOCB_SUCCESS;
2854 /* skip over elx loopback header */
2855 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
2856 job->reply->reply_payload_rcv_len =
2857 sg_copy_from_buffer(job->reply_payload.sg_list,
2858 job->reply_payload.sg_cnt,
2859 rx_databuf, size);
2860 job->reply->reply_payload_rcv_len = size;
2861 }
2862 }
2863
2864err_loopback_test_exit:
2865 lpfcdiag_loop_self_unreg(phba, rpi);
2866
2867 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2868 lpfc_bsg_event_unref(evt); /* release ref */
2869 lpfc_bsg_event_unref(evt); /* delete */
2870 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2871
2872 if (cmdiocbq != NULL)
2873 lpfc_sli_release_iocbq(phba, cmdiocbq);
2874
2875 if (rspiocbq != NULL)
2876 lpfc_sli_release_iocbq(phba, rspiocbq);
2877
2878 if (txbmp != NULL) {
2879 if (txbpl != NULL) {
2880 if (txbuffer != NULL)
2881 diag_cmd_data_free(phba, txbuffer);
2882 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
2883 }
2884 kfree(txbmp);
2885 }
2886
2887loopback_test_exit:
2888 kfree(dataout);
2889 /* make error code available to userspace */
2890 job->reply->result = rc;
2891 job->dd_data = NULL;
2892 /* complete the job back to userspace if no error */
2893 if (rc == 0)
2894 job->job_done(job);
2895 return rc;
2896}
2897
2898/**
2899 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
2900 * @job: GET_DFC_REV fc_bsg_job
2901 **/
2902static int
2903lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
2904{
2905 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2906 struct lpfc_hba *phba = vport->phba;
2907 struct get_mgmt_rev *event_req;
2908 struct get_mgmt_rev_reply *event_reply;
2909 int rc = 0;
2910
2911 if (job->request_len <
2912 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
2913 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2914 "2740 Received GET_DFC_REV request below "
2915 "minimum size\n");
2916 rc = -EINVAL;
2917 goto job_error;
2918 }
2919
2920 event_req = (struct get_mgmt_rev *)
2921 job->request->rqst_data.h_vendor.vendor_cmd;
2922
2923 event_reply = (struct get_mgmt_rev_reply *)
2924 job->reply->reply_data.vendor_reply.vendor_rsp;
2925
2926 if (job->reply_len <
2927 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
2928 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2929 "2741 Received GET_DFC_REV reply below "
2930 "minimum size\n");
2931 rc = -EINVAL;
2932 goto job_error;
2933 }
2934
2935 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
2936 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
2937job_error:
2938 job->reply->result = rc;
2939 if (rc == 0)
2940 job->job_done(job);
2941 return rc;
2942}
2943
2944/**
James Smart7ad20aa2011-05-24 11:44:28 -04002945 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
James Smart3b5dd522010-01-26 23:10:15 -05002946 * @phba: Pointer to HBA context object.
2947 * @pmboxq: Pointer to mailbox command.
2948 *
2949 * This is completion handler function for mailbox commands issued from
2950 * lpfc_bsg_issue_mbox function. This function is called by the
2951 * mailbox event handler function with no lock held. This function
2952 * will wake up thread waiting on the wait queue pointed by context1
2953 * of the mailbox.
2954 **/
2955void
James Smart7ad20aa2011-05-24 11:44:28 -04002956lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
James Smart3b5dd522010-01-26 23:10:15 -05002957{
2958 struct bsg_job_data *dd_data;
James Smart3b5dd522010-01-26 23:10:15 -05002959 struct fc_bsg_job *job;
2960 uint32_t size;
2961 unsigned long flags;
James Smart7ad20aa2011-05-24 11:44:28 -04002962 uint8_t *pmb, *pmb_buf;
James Smart3b5dd522010-01-26 23:10:15 -05002963
2964 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2965 dd_data = pmboxq->context1;
James Smart7a470272010-03-15 11:25:20 -04002966 /* job already timed out? */
James Smart3b5dd522010-01-26 23:10:15 -05002967 if (!dd_data) {
2968 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2969 return;
2970 }
2971
James Smart7ad20aa2011-05-24 11:44:28 -04002972 /*
2973 * The outgoing buffer is readily referred from the dma buffer,
2974 * just need to get header part from mailboxq structure.
James Smart7a470272010-03-15 11:25:20 -04002975 */
James Smart7ad20aa2011-05-24 11:44:28 -04002976 pmb = (uint8_t *)&pmboxq->u.mb;
2977 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
2978 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
James Smart515e0aa2010-09-29 11:19:00 -04002979
James Smart3b5dd522010-01-26 23:10:15 -05002980 job = dd_data->context_un.mbox.set_job;
James Smart5a6f1332011-03-11 16:05:35 -05002981 if (job) {
2982 size = job->reply_payload.payload_len;
2983 job->reply->reply_payload_rcv_len =
2984 sg_copy_from_buffer(job->reply_payload.sg_list,
James Smart7ad20aa2011-05-24 11:44:28 -04002985 job->reply_payload.sg_cnt,
2986 pmb_buf, size);
James Smartb6e3b9c2011-04-16 11:03:43 -04002987 /* need to hold the lock until we set job->dd_data to NULL
2988 * to hold off the timeout handler returning to the mid-layer
2989 * while we are still processing the job.
2990 */
James Smart5a6f1332011-03-11 16:05:35 -05002991 job->dd_data = NULL;
James Smartb6e3b9c2011-04-16 11:03:43 -04002992 dd_data->context_un.mbox.set_job = NULL;
2993 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
James Smartb6e3b9c2011-04-16 11:03:43 -04002994 } else {
2995 dd_data->context_un.mbox.set_job = NULL;
2996 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
James Smart5a6f1332011-03-11 16:05:35 -05002997 }
James Smart7a470272010-03-15 11:25:20 -04002998
James Smart3b5dd522010-01-26 23:10:15 -05002999 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
James Smart7ad20aa2011-05-24 11:44:28 -04003000 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
James Smart3b5dd522010-01-26 23:10:15 -05003001 kfree(dd_data);
James Smart7ad20aa2011-05-24 11:44:28 -04003002
3003 if (job) {
3004 job->reply->result = 0;
3005 job->job_done(job);
3006 }
James Smart3b5dd522010-01-26 23:10:15 -05003007 return;
3008}
3009
3010/**
3011 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3012 * @phba: Pointer to HBA context object.
3013 * @mb: Pointer to a mailbox object.
3014 * @vport: Pointer to a vport object.
3015 *
3016 * Some commands require the port to be offline, some may not be called from
3017 * the application.
3018 **/
3019static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3020 MAILBOX_t *mb, struct lpfc_vport *vport)
3021{
3022 /* return negative error values for bsg job */
3023 switch (mb->mbxCommand) {
3024 /* Offline only */
3025 case MBX_INIT_LINK:
3026 case MBX_DOWN_LINK:
3027 case MBX_CONFIG_LINK:
3028 case MBX_CONFIG_RING:
3029 case MBX_RESET_RING:
3030 case MBX_UNREG_LOGIN:
3031 case MBX_CLEAR_LA:
3032 case MBX_DUMP_CONTEXT:
3033 case MBX_RUN_DIAGS:
3034 case MBX_RESTART:
3035 case MBX_SET_MASK:
3036 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3037 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3038 "2743 Command 0x%x is illegal in on-line "
3039 "state\n",
3040 mb->mbxCommand);
3041 return -EPERM;
3042 }
3043 case MBX_WRITE_NV:
3044 case MBX_WRITE_VPARMS:
3045 case MBX_LOAD_SM:
3046 case MBX_READ_NV:
3047 case MBX_READ_CONFIG:
3048 case MBX_READ_RCONFIG:
3049 case MBX_READ_STATUS:
3050 case MBX_READ_XRI:
3051 case MBX_READ_REV:
3052 case MBX_READ_LNK_STAT:
3053 case MBX_DUMP_MEMORY:
3054 case MBX_DOWN_LOAD:
3055 case MBX_UPDATE_CFG:
3056 case MBX_KILL_BOARD:
3057 case MBX_LOAD_AREA:
3058 case MBX_LOAD_EXP_ROM:
3059 case MBX_BEACON:
3060 case MBX_DEL_LD_ENTRY:
3061 case MBX_SET_DEBUG:
3062 case MBX_WRITE_WWN:
3063 case MBX_SLI4_CONFIG:
James Smartc7495932010-04-06 15:05:28 -04003064 case MBX_READ_EVENT_LOG:
James Smart3b5dd522010-01-26 23:10:15 -05003065 case MBX_READ_EVENT_LOG_STATUS:
3066 case MBX_WRITE_EVENT_LOG:
3067 case MBX_PORT_CAPABILITIES:
3068 case MBX_PORT_IOV_CONTROL:
James Smart7a470272010-03-15 11:25:20 -04003069 case MBX_RUN_BIU_DIAG64:
James Smart3b5dd522010-01-26 23:10:15 -05003070 break;
3071 case MBX_SET_VARIABLE:
James Smarte2aed292010-02-26 14:15:00 -05003072 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3073 "1226 mbox: set_variable 0x%x, 0x%x\n",
3074 mb->un.varWords[0],
3075 mb->un.varWords[1]);
3076 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3077 && (mb->un.varWords[1] == 1)) {
3078 phba->wait_4_mlo_maint_flg = 1;
3079 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
3080 phba->link_flag &= ~LS_LOOPBACK_MODE;
James Smart76a95d72010-11-20 23:11:48 -05003081 phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
James Smarte2aed292010-02-26 14:15:00 -05003082 }
3083 break;
James Smart3b5dd522010-01-26 23:10:15 -05003084 case MBX_READ_SPARM64:
James Smart76a95d72010-11-20 23:11:48 -05003085 case MBX_READ_TOPOLOGY:
James Smart3b5dd522010-01-26 23:10:15 -05003086 case MBX_REG_LOGIN:
3087 case MBX_REG_LOGIN64:
3088 case MBX_CONFIG_PORT:
3089 case MBX_RUN_BIU_DIAG:
3090 default:
3091 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3092 "2742 Unknown Command 0x%x\n",
3093 mb->mbxCommand);
3094 return -EPERM;
3095 }
3096
3097 return 0; /* ok */
3098}
3099
3100/**
James Smart7ad20aa2011-05-24 11:44:28 -04003101 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3102 * @phba: Pointer to HBA context object.
3103 *
3104 * This is routine clean up and reset BSG handling of multi-buffer mbox
3105 * command session.
3106 **/
3107static void
3108lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3109{
3110 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3111 return;
3112
3113 /* free all memory, including dma buffers */
3114 lpfc_bsg_dma_page_list_free(phba,
3115 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3116 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3117 /* multi-buffer write mailbox command pass-through complete */
3118 memset((char *)&phba->mbox_ext_buf_ctx, 0,
3119 sizeof(struct lpfc_mbox_ext_buf_ctx));
3120 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3121
3122 return;
3123}
3124
3125/**
3126 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3127 * @phba: Pointer to HBA context object.
3128 * @pmboxq: Pointer to mailbox command.
3129 *
3130 * This is routine handles BSG job for mailbox commands completions with
3131 * multiple external buffers.
3132 **/
3133static struct fc_bsg_job *
3134lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3135{
3136 struct bsg_job_data *dd_data;
3137 struct fc_bsg_job *job;
3138 uint8_t *pmb, *pmb_buf;
3139 unsigned long flags;
3140 uint32_t size;
3141 int rc = 0;
3142
3143 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3144 dd_data = pmboxq->context1;
3145 /* has the job already timed out? */
3146 if (!dd_data) {
3147 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3148 job = NULL;
3149 goto job_done_out;
3150 }
3151
3152 /*
3153 * The outgoing buffer is readily referred from the dma buffer,
3154 * just need to get header part from mailboxq structure.
3155 */
3156 pmb = (uint8_t *)&pmboxq->u.mb;
3157 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3158 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3159
3160 job = dd_data->context_un.mbox.set_job;
3161 if (job) {
3162 size = job->reply_payload.payload_len;
3163 job->reply->reply_payload_rcv_len =
3164 sg_copy_from_buffer(job->reply_payload.sg_list,
3165 job->reply_payload.sg_cnt,
3166 pmb_buf, size);
3167 /* result for successful */
3168 job->reply->result = 0;
3169 job->dd_data = NULL;
3170 /* need to hold the lock util we set job->dd_data to NULL
3171 * to hold off the timeout handler from midlayer to take
3172 * any action.
3173 */
3174 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3175 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3176 "2937 SLI_CONFIG ext-buffer maibox command "
3177 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3178 phba->mbox_ext_buf_ctx.nembType,
3179 phba->mbox_ext_buf_ctx.mboxType, size);
3180 } else
3181 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3182
3183job_done_out:
3184 if (!job)
3185 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3186 "2938 SLI_CONFIG ext-buffer maibox "
3187 "command (x%x/x%x) failure, rc:x%x\n",
3188 phba->mbox_ext_buf_ctx.nembType,
3189 phba->mbox_ext_buf_ctx.mboxType, rc);
3190 /* state change */
3191 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3192 kfree(dd_data);
3193
3194 return job;
3195}
3196
3197/**
3198 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3199 * @phba: Pointer to HBA context object.
3200 * @pmboxq: Pointer to mailbox command.
3201 *
3202 * This is completion handler function for mailbox read commands with multiple
3203 * external buffers.
3204 **/
3205static void
3206lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3207{
3208 struct fc_bsg_job *job;
3209
3210 /* handle the BSG job with mailbox command */
3211 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3212 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3213
3214 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3215 "2939 SLI_CONFIG ext-buffer rd maibox command "
3216 "complete, ctxState:x%x, mbxStatus:x%x\n",
3217 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3218
3219 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3220
3221 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3222 lpfc_bsg_mbox_ext_session_reset(phba);
3223
3224 /* free base driver mailbox structure memory */
3225 mempool_free(pmboxq, phba->mbox_mem_pool);
3226
3227 /* complete the bsg job if we have it */
3228 if (job)
3229 job->job_done(job);
3230
3231 return;
3232}
3233
3234/**
3235 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3236 * @phba: Pointer to HBA context object.
3237 * @pmboxq: Pointer to mailbox command.
3238 *
3239 * This is completion handler function for mailbox write commands with multiple
3240 * external buffers.
3241 **/
3242static void
3243lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3244{
3245 struct fc_bsg_job *job;
3246
3247 /* handle the BSG job with the mailbox command */
3248 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3249 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3250
3251 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3252 "2940 SLI_CONFIG ext-buffer wr maibox command "
3253 "complete, ctxState:x%x, mbxStatus:x%x\n",
3254 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3255
3256 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3257
3258 /* free all memory, including dma buffers */
3259 mempool_free(pmboxq, phba->mbox_mem_pool);
3260 lpfc_bsg_mbox_ext_session_reset(phba);
3261
3262 /* complete the bsg job if we have it */
3263 if (job)
3264 job->job_done(job);
3265
3266 return;
3267}
3268
3269static void
3270lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3271 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3272 struct lpfc_dmabuf *ext_dmabuf)
3273{
3274 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3275
3276 /* pointer to the start of mailbox command */
3277 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3278
3279 if (nemb_tp == nemb_mse) {
3280 if (index == 0) {
3281 sli_cfg_mbx->un.sli_config_emb0_subsys.
3282 mse[index].pa_hi =
3283 putPaddrHigh(mbx_dmabuf->phys +
3284 sizeof(MAILBOX_t));
3285 sli_cfg_mbx->un.sli_config_emb0_subsys.
3286 mse[index].pa_lo =
3287 putPaddrLow(mbx_dmabuf->phys +
3288 sizeof(MAILBOX_t));
3289 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3290 "2943 SLI_CONFIG(mse)[%d], "
3291 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3292 index,
3293 sli_cfg_mbx->un.sli_config_emb0_subsys.
3294 mse[index].buf_len,
3295 sli_cfg_mbx->un.sli_config_emb0_subsys.
3296 mse[index].pa_hi,
3297 sli_cfg_mbx->un.sli_config_emb0_subsys.
3298 mse[index].pa_lo);
3299 } else {
3300 sli_cfg_mbx->un.sli_config_emb0_subsys.
3301 mse[index].pa_hi =
3302 putPaddrHigh(ext_dmabuf->phys);
3303 sli_cfg_mbx->un.sli_config_emb0_subsys.
3304 mse[index].pa_lo =
3305 putPaddrLow(ext_dmabuf->phys);
3306 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3307 "2944 SLI_CONFIG(mse)[%d], "
3308 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3309 index,
3310 sli_cfg_mbx->un.sli_config_emb0_subsys.
3311 mse[index].buf_len,
3312 sli_cfg_mbx->un.sli_config_emb0_subsys.
3313 mse[index].pa_hi,
3314 sli_cfg_mbx->un.sli_config_emb0_subsys.
3315 mse[index].pa_lo);
3316 }
3317 } else {
3318 if (index == 0) {
3319 sli_cfg_mbx->un.sli_config_emb1_subsys.
3320 hbd[index].pa_hi =
3321 putPaddrHigh(mbx_dmabuf->phys +
3322 sizeof(MAILBOX_t));
3323 sli_cfg_mbx->un.sli_config_emb1_subsys.
3324 hbd[index].pa_lo =
3325 putPaddrLow(mbx_dmabuf->phys +
3326 sizeof(MAILBOX_t));
3327 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3328 "3007 SLI_CONFIG(hbd)[%d], "
3329 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3330 index,
3331 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3332 &sli_cfg_mbx->un.
3333 sli_config_emb1_subsys.hbd[index]),
3334 sli_cfg_mbx->un.sli_config_emb1_subsys.
3335 hbd[index].pa_hi,
3336 sli_cfg_mbx->un.sli_config_emb1_subsys.
3337 hbd[index].pa_lo);
3338
3339 } else {
3340 sli_cfg_mbx->un.sli_config_emb1_subsys.
3341 hbd[index].pa_hi =
3342 putPaddrHigh(ext_dmabuf->phys);
3343 sli_cfg_mbx->un.sli_config_emb1_subsys.
3344 hbd[index].pa_lo =
3345 putPaddrLow(ext_dmabuf->phys);
3346 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3347 "3008 SLI_CONFIG(hbd)[%d], "
3348 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3349 index,
3350 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3351 &sli_cfg_mbx->un.
3352 sli_config_emb1_subsys.hbd[index]),
3353 sli_cfg_mbx->un.sli_config_emb1_subsys.
3354 hbd[index].pa_hi,
3355 sli_cfg_mbx->un.sli_config_emb1_subsys.
3356 hbd[index].pa_lo);
3357 }
3358 }
3359 return;
3360}
3361
3362/**
3363 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3364 * @phba: Pointer to HBA context object.
3365 * @mb: Pointer to a BSG mailbox object.
3366 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3367 * @dmabuff: Pointer to a DMA buffer descriptor.
3368 *
3369 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3370 * non-embedded external bufffers.
3371 **/
3372static int
3373lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3374 enum nemb_type nemb_tp,
3375 struct lpfc_dmabuf *dmabuf)
3376{
3377 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3378 struct dfc_mbox_req *mbox_req;
3379 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3380 uint32_t ext_buf_cnt, ext_buf_index;
3381 struct lpfc_dmabuf *ext_dmabuf = NULL;
3382 struct bsg_job_data *dd_data = NULL;
3383 LPFC_MBOXQ_t *pmboxq = NULL;
3384 MAILBOX_t *pmb;
3385 uint8_t *pmbx;
3386 int rc, i;
3387
3388 mbox_req =
3389 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3390
3391 /* pointer to the start of mailbox command */
3392 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3393
3394 if (nemb_tp == nemb_mse) {
3395 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3396 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3397 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3398 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3399 "2945 Handled SLI_CONFIG(mse) rd, "
3400 "ext_buf_cnt(%d) out of range(%d)\n",
3401 ext_buf_cnt,
3402 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3403 rc = -ERANGE;
3404 goto job_error;
3405 }
3406 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3407 "2941 Handled SLI_CONFIG(mse) rd, "
3408 "ext_buf_cnt:%d\n", ext_buf_cnt);
3409 } else {
3410 /* sanity check on interface type for support */
3411 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3412 LPFC_SLI_INTF_IF_TYPE_2) {
3413 rc = -ENODEV;
3414 goto job_error;
3415 }
3416 /* nemb_tp == nemb_hbd */
3417 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3418 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3419 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3420 "2946 Handled SLI_CONFIG(hbd) rd, "
3421 "ext_buf_cnt(%d) out of range(%d)\n",
3422 ext_buf_cnt,
3423 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3424 rc = -ERANGE;
3425 goto job_error;
3426 }
3427 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3428 "2942 Handled SLI_CONFIG(hbd) rd, "
3429 "ext_buf_cnt:%d\n", ext_buf_cnt);
3430 }
3431
3432 /* reject non-embedded mailbox command with none external buffer */
3433 if (ext_buf_cnt == 0) {
3434 rc = -EPERM;
3435 goto job_error;
3436 } else if (ext_buf_cnt > 1) {
3437 /* additional external read buffers */
3438 for (i = 1; i < ext_buf_cnt; i++) {
3439 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3440 if (!ext_dmabuf) {
3441 rc = -ENOMEM;
3442 goto job_error;
3443 }
3444 list_add_tail(&ext_dmabuf->list,
3445 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3446 }
3447 }
3448
3449 /* bsg tracking structure */
3450 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3451 if (!dd_data) {
3452 rc = -ENOMEM;
3453 goto job_error;
3454 }
3455
3456 /* mailbox command structure for base driver */
3457 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3458 if (!pmboxq) {
3459 rc = -ENOMEM;
3460 goto job_error;
3461 }
3462 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3463
3464 /* for the first external buffer */
3465 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3466
3467 /* for the rest of external buffer descriptors if any */
3468 if (ext_buf_cnt > 1) {
3469 ext_buf_index = 1;
3470 list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3471 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3472 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3473 ext_buf_index, dmabuf,
3474 curr_dmabuf);
3475 ext_buf_index++;
3476 }
3477 }
3478
3479 /* construct base driver mbox command */
3480 pmb = &pmboxq->u.mb;
3481 pmbx = (uint8_t *)dmabuf->virt;
3482 memcpy(pmb, pmbx, sizeof(*pmb));
3483 pmb->mbxOwner = OWN_HOST;
3484 pmboxq->vport = phba->pport;
3485
3486 /* multi-buffer handling context */
3487 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3488 phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3489 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3490 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3491 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3492 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3493
3494 /* callback for multi-buffer read mailbox command */
3495 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3496
3497 /* context fields to callback function */
3498 pmboxq->context1 = dd_data;
3499 dd_data->type = TYPE_MBOX;
3500 dd_data->context_un.mbox.pmboxq = pmboxq;
3501 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3502 dd_data->context_un.mbox.set_job = job;
3503 job->dd_data = dd_data;
3504
3505 /* state change */
3506 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3507
3508 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3509 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3510 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3511 "2947 Issued SLI_CONFIG ext-buffer "
3512 "maibox command, rc:x%x\n", rc);
James Smart88a2cfb2011-07-22 18:36:33 -04003513 return SLI_CONFIG_HANDLED;
James Smart7ad20aa2011-05-24 11:44:28 -04003514 }
3515 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3516 "2948 Failed to issue SLI_CONFIG ext-buffer "
3517 "maibox command, rc:x%x\n", rc);
3518 rc = -EPIPE;
3519
3520job_error:
3521 if (pmboxq)
3522 mempool_free(pmboxq, phba->mbox_mem_pool);
3523 lpfc_bsg_dma_page_list_free(phba,
3524 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3525 kfree(dd_data);
3526 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3527 return rc;
3528}
3529
3530/**
3531 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
3532 * @phba: Pointer to HBA context object.
3533 * @mb: Pointer to a BSG mailbox object.
3534 * @dmabuff: Pointer to a DMA buffer descriptor.
3535 *
3536 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
3537 * non-embedded external bufffers.
3538 **/
3539static int
3540lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3541 enum nemb_type nemb_tp,
3542 struct lpfc_dmabuf *dmabuf)
3543{
3544 struct dfc_mbox_req *mbox_req;
3545 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3546 uint32_t ext_buf_cnt;
3547 struct bsg_job_data *dd_data = NULL;
3548 LPFC_MBOXQ_t *pmboxq = NULL;
3549 MAILBOX_t *pmb;
3550 uint8_t *mbx;
James Smart88a2cfb2011-07-22 18:36:33 -04003551 int rc = SLI_CONFIG_NOT_HANDLED, i;
James Smart7ad20aa2011-05-24 11:44:28 -04003552
3553 mbox_req =
3554 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3555
3556 /* pointer to the start of mailbox command */
3557 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3558
3559 if (nemb_tp == nemb_mse) {
3560 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3561 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3562 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3563 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3564 "2953 Handled SLI_CONFIG(mse) wr, "
3565 "ext_buf_cnt(%d) out of range(%d)\n",
3566 ext_buf_cnt,
3567 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3568 return -ERANGE;
3569 }
3570 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3571 "2949 Handled SLI_CONFIG(mse) wr, "
3572 "ext_buf_cnt:%d\n", ext_buf_cnt);
3573 } else {
3574 /* sanity check on interface type for support */
3575 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3576 LPFC_SLI_INTF_IF_TYPE_2)
3577 return -ENODEV;
3578 /* nemb_tp == nemb_hbd */
3579 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3580 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3581 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3582 "2954 Handled SLI_CONFIG(hbd) wr, "
3583 "ext_buf_cnt(%d) out of range(%d)\n",
3584 ext_buf_cnt,
3585 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3586 return -ERANGE;
3587 }
3588 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3589 "2950 Handled SLI_CONFIG(hbd) wr, "
3590 "ext_buf_cnt:%d\n", ext_buf_cnt);
3591 }
3592
3593 if (ext_buf_cnt == 0)
3594 return -EPERM;
3595
3596 /* for the first external buffer */
3597 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3598
3599 /* log for looking forward */
3600 for (i = 1; i < ext_buf_cnt; i++) {
3601 if (nemb_tp == nemb_mse)
3602 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3603 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
3604 i, sli_cfg_mbx->un.sli_config_emb0_subsys.
3605 mse[i].buf_len);
3606 else
3607 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3608 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
3609 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3610 &sli_cfg_mbx->un.sli_config_emb1_subsys.
3611 hbd[i]));
3612 }
3613
3614 /* multi-buffer handling context */
3615 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3616 phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
3617 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3618 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3619 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3620 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3621
3622 if (ext_buf_cnt == 1) {
3623 /* bsg tracking structure */
3624 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3625 if (!dd_data) {
3626 rc = -ENOMEM;
3627 goto job_error;
3628 }
3629
3630 /* mailbox command structure for base driver */
3631 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3632 if (!pmboxq) {
3633 rc = -ENOMEM;
3634 goto job_error;
3635 }
3636 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3637 pmb = &pmboxq->u.mb;
3638 mbx = (uint8_t *)dmabuf->virt;
3639 memcpy(pmb, mbx, sizeof(*pmb));
3640 pmb->mbxOwner = OWN_HOST;
3641 pmboxq->vport = phba->pport;
3642
3643 /* callback for multi-buffer read mailbox command */
3644 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3645
3646 /* context fields to callback function */
3647 pmboxq->context1 = dd_data;
3648 dd_data->type = TYPE_MBOX;
3649 dd_data->context_un.mbox.pmboxq = pmboxq;
3650 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
3651 dd_data->context_un.mbox.set_job = job;
3652 job->dd_data = dd_data;
3653
3654 /* state change */
3655 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3656
3657 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3658 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3659 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3660 "2955 Issued SLI_CONFIG ext-buffer "
3661 "maibox command, rc:x%x\n", rc);
James Smart88a2cfb2011-07-22 18:36:33 -04003662 return SLI_CONFIG_HANDLED;
James Smart7ad20aa2011-05-24 11:44:28 -04003663 }
3664 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3665 "2956 Failed to issue SLI_CONFIG ext-buffer "
3666 "maibox command, rc:x%x\n", rc);
3667 rc = -EPIPE;
3668 }
3669
James Smart88a2cfb2011-07-22 18:36:33 -04003670 /* wait for additoinal external buffers */
3671 job->reply->result = 0;
3672 job->job_done(job);
3673 return SLI_CONFIG_HANDLED;
3674
James Smart7ad20aa2011-05-24 11:44:28 -04003675job_error:
3676 if (pmboxq)
3677 mempool_free(pmboxq, phba->mbox_mem_pool);
3678 kfree(dd_data);
3679
3680 return rc;
3681}
3682
3683/**
3684 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
3685 * @phba: Pointer to HBA context object.
3686 * @mb: Pointer to a BSG mailbox object.
3687 * @dmabuff: Pointer to a DMA buffer descriptor.
3688 *
3689 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
3690 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
3691 * with embedded sussystem 0x1 and opcodes with external HBDs.
3692 **/
3693static int
3694lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3695 struct lpfc_dmabuf *dmabuf)
3696{
3697 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3698 uint32_t subsys;
3699 uint32_t opcode;
3700 int rc = SLI_CONFIG_NOT_HANDLED;
3701
3702 /* state change */
3703 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
3704
3705 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3706
3707 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3708 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3709 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
3710 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3711 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
3712 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3713 if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
3714 switch (opcode) {
3715 case FCOE_OPCODE_READ_FCF:
3716 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3717 "2957 Handled SLI_CONFIG "
3718 "subsys_fcoe, opcode:x%x\n",
3719 opcode);
3720 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3721 nemb_mse, dmabuf);
3722 break;
3723 case FCOE_OPCODE_ADD_FCF:
3724 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3725 "2958 Handled SLI_CONFIG "
3726 "subsys_fcoe, opcode:x%x\n",
3727 opcode);
3728 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3729 nemb_mse, dmabuf);
3730 break;
3731 default:
3732 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3733 "2959 Not handled SLI_CONFIG "
3734 "subsys_fcoe, opcode:x%x\n",
3735 opcode);
3736 rc = SLI_CONFIG_NOT_HANDLED;
3737 break;
3738 }
3739 } else {
3740 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3741 "2977 Handled SLI_CONFIG "
3742 "subsys:x%d, opcode:x%x\n",
3743 subsys, opcode);
3744 rc = SLI_CONFIG_NOT_HANDLED;
3745 }
3746 } else {
3747 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
3748 &sli_cfg_mbx->un.sli_config_emb1_subsys);
3749 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
3750 &sli_cfg_mbx->un.sli_config_emb1_subsys);
3751 if (subsys == SLI_CONFIG_SUBSYS_COMN) {
3752 switch (opcode) {
3753 case COMN_OPCODE_READ_OBJECT:
3754 case COMN_OPCODE_READ_OBJECT_LIST:
3755 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3756 "2960 Handled SLI_CONFIG "
3757 "subsys_comn, opcode:x%x\n",
3758 opcode);
3759 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3760 nemb_hbd, dmabuf);
3761 break;
3762 case COMN_OPCODE_WRITE_OBJECT:
3763 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3764 "2961 Handled SLI_CONFIG "
3765 "subsys_comn, opcode:x%x\n",
3766 opcode);
3767 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3768 nemb_hbd, dmabuf);
3769 break;
3770 default:
3771 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3772 "2962 Not handled SLI_CONFIG "
3773 "subsys_comn, opcode:x%x\n",
3774 opcode);
3775 rc = SLI_CONFIG_NOT_HANDLED;
3776 break;
3777 }
3778 } else {
3779 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3780 "2978 Handled SLI_CONFIG "
3781 "subsys:x%d, opcode:x%x\n",
3782 subsys, opcode);
3783 rc = SLI_CONFIG_NOT_HANDLED;
3784 }
3785 }
3786 return rc;
3787}
3788
3789/**
3790 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
3791 * @phba: Pointer to HBA context object.
3792 *
3793 * This routine is for requesting to abort a pass-through mailbox command with
3794 * multiple external buffers due to error condition.
3795 **/
3796static void
3797lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
3798{
3799 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
3800 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
3801 else
3802 lpfc_bsg_mbox_ext_session_reset(phba);
3803 return;
3804}
3805
3806/**
3807 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
3808 * @phba: Pointer to HBA context object.
3809 * @dmabuf: Pointer to a DMA buffer descriptor.
3810 *
3811 * This routine extracts the next mailbox read external buffer back to
3812 * user space through BSG.
3813 **/
3814static int
3815lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
3816{
3817 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3818 struct lpfc_dmabuf *dmabuf;
3819 uint8_t *pbuf;
3820 uint32_t size;
3821 uint32_t index;
3822
3823 index = phba->mbox_ext_buf_ctx.seqNum;
3824 phba->mbox_ext_buf_ctx.seqNum++;
3825
3826 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
3827 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3828
3829 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
3830 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
3831 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
3832 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3833 "2963 SLI_CONFIG (mse) ext-buffer rd get "
3834 "buffer[%d], size:%d\n", index, size);
3835 } else {
3836 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3837 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
3838 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3839 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
3840 "buffer[%d], size:%d\n", index, size);
3841 }
3842 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
3843 return -EPIPE;
3844 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
3845 struct lpfc_dmabuf, list);
3846 list_del_init(&dmabuf->list);
3847 pbuf = (uint8_t *)dmabuf->virt;
3848 job->reply->reply_payload_rcv_len =
3849 sg_copy_from_buffer(job->reply_payload.sg_list,
3850 job->reply_payload.sg_cnt,
3851 pbuf, size);
3852
3853 lpfc_bsg_dma_page_free(phba, dmabuf);
3854
3855 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3856 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3857 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
3858 "command session done\n");
3859 lpfc_bsg_mbox_ext_session_reset(phba);
3860 }
3861
3862 job->reply->result = 0;
3863 job->job_done(job);
3864
3865 return SLI_CONFIG_HANDLED;
3866}
3867
3868/**
3869 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
3870 * @phba: Pointer to HBA context object.
3871 * @dmabuf: Pointer to a DMA buffer descriptor.
3872 *
3873 * This routine sets up the next mailbox read external buffer obtained
3874 * from user space through BSG.
3875 **/
3876static int
3877lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
3878 struct lpfc_dmabuf *dmabuf)
3879{
3880 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3881 struct bsg_job_data *dd_data = NULL;
3882 LPFC_MBOXQ_t *pmboxq = NULL;
3883 MAILBOX_t *pmb;
3884 enum nemb_type nemb_tp;
3885 uint8_t *pbuf;
3886 uint32_t size;
3887 uint32_t index;
3888 int rc;
3889
3890 index = phba->mbox_ext_buf_ctx.seqNum;
3891 phba->mbox_ext_buf_ctx.seqNum++;
3892 nemb_tp = phba->mbox_ext_buf_ctx.nembType;
3893
3894 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
3895 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3896
3897 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3898 if (!dd_data) {
3899 rc = -ENOMEM;
3900 goto job_error;
3901 }
3902
3903 pbuf = (uint8_t *)dmabuf->virt;
3904 size = job->request_payload.payload_len;
3905 sg_copy_to_buffer(job->request_payload.sg_list,
3906 job->request_payload.sg_cnt,
3907 pbuf, size);
3908
3909 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
3910 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3911 "2966 SLI_CONFIG (mse) ext-buffer wr set "
3912 "buffer[%d], size:%d\n",
3913 phba->mbox_ext_buf_ctx.seqNum, size);
3914
3915 } else {
3916 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3917 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
3918 "buffer[%d], size:%d\n",
3919 phba->mbox_ext_buf_ctx.seqNum, size);
3920
3921 }
3922
3923 /* set up external buffer descriptor and add to external buffer list */
3924 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
3925 phba->mbox_ext_buf_ctx.mbx_dmabuf,
3926 dmabuf);
3927 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3928
3929 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3930 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3931 "2968 SLI_CONFIG ext-buffer wr all %d "
3932 "ebuffers received\n",
3933 phba->mbox_ext_buf_ctx.numBuf);
3934 /* mailbox command structure for base driver */
3935 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3936 if (!pmboxq) {
3937 rc = -ENOMEM;
3938 goto job_error;
3939 }
3940 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3941 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3942 pmb = &pmboxq->u.mb;
3943 memcpy(pmb, pbuf, sizeof(*pmb));
3944 pmb->mbxOwner = OWN_HOST;
3945 pmboxq->vport = phba->pport;
3946
3947 /* callback for multi-buffer write mailbox command */
3948 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3949
3950 /* context fields to callback function */
3951 pmboxq->context1 = dd_data;
3952 dd_data->type = TYPE_MBOX;
3953 dd_data->context_un.mbox.pmboxq = pmboxq;
3954 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
3955 dd_data->context_un.mbox.set_job = job;
3956 job->dd_data = dd_data;
3957
3958 /* state change */
3959 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3960
3961 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3962 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3963 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3964 "2969 Issued SLI_CONFIG ext-buffer "
3965 "maibox command, rc:x%x\n", rc);
James Smart88a2cfb2011-07-22 18:36:33 -04003966 return SLI_CONFIG_HANDLED;
James Smart7ad20aa2011-05-24 11:44:28 -04003967 }
3968 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3969 "2970 Failed to issue SLI_CONFIG ext-buffer "
3970 "maibox command, rc:x%x\n", rc);
3971 rc = -EPIPE;
3972 goto job_error;
3973 }
3974
3975 /* wait for additoinal external buffers */
3976 job->reply->result = 0;
3977 job->job_done(job);
3978 return SLI_CONFIG_HANDLED;
3979
3980job_error:
3981 lpfc_bsg_dma_page_free(phba, dmabuf);
3982 kfree(dd_data);
3983
3984 return rc;
3985}
3986
3987/**
3988 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
3989 * @phba: Pointer to HBA context object.
3990 * @mb: Pointer to a BSG mailbox object.
3991 * @dmabuff: Pointer to a DMA buffer descriptor.
3992 *
3993 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
3994 * command with multiple non-embedded external buffers.
3995 **/
3996static int
3997lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
3998 struct lpfc_dmabuf *dmabuf)
3999{
4000 int rc;
4001
4002 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4003 "2971 SLI_CONFIG buffer (type:x%x)\n",
4004 phba->mbox_ext_buf_ctx.mboxType);
4005
4006 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4007 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4008 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4009 "2972 SLI_CONFIG rd buffer state "
4010 "mismatch:x%x\n",
4011 phba->mbox_ext_buf_ctx.state);
4012 lpfc_bsg_mbox_ext_abort(phba);
4013 return -EPIPE;
4014 }
4015 rc = lpfc_bsg_read_ebuf_get(phba, job);
4016 if (rc == SLI_CONFIG_HANDLED)
4017 lpfc_bsg_dma_page_free(phba, dmabuf);
4018 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4019 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4020 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4021 "2973 SLI_CONFIG wr buffer state "
4022 "mismatch:x%x\n",
4023 phba->mbox_ext_buf_ctx.state);
4024 lpfc_bsg_mbox_ext_abort(phba);
4025 return -EPIPE;
4026 }
4027 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4028 }
4029 return rc;
4030}
4031
4032/**
4033 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4034 * @phba: Pointer to HBA context object.
4035 * @mb: Pointer to a BSG mailbox object.
4036 * @dmabuff: Pointer to a DMA buffer descriptor.
4037 *
4038 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4039 * (0x9B) mailbox commands and external buffers.
4040 **/
4041static int
4042lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4043 struct lpfc_dmabuf *dmabuf)
4044{
4045 struct dfc_mbox_req *mbox_req;
James Smart88a2cfb2011-07-22 18:36:33 -04004046 int rc = SLI_CONFIG_NOT_HANDLED;
James Smart7ad20aa2011-05-24 11:44:28 -04004047
4048 mbox_req =
4049 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4050
4051 /* mbox command with/without single external buffer */
4052 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
James Smart88a2cfb2011-07-22 18:36:33 -04004053 return rc;
James Smart7ad20aa2011-05-24 11:44:28 -04004054
4055 /* mbox command and first external buffer */
4056 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4057 if (mbox_req->extSeqNum == 1) {
4058 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4059 "2974 SLI_CONFIG mailbox: tag:%d, "
4060 "seq:%d\n", mbox_req->extMboxTag,
4061 mbox_req->extSeqNum);
4062 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4063 return rc;
4064 } else
4065 goto sli_cfg_ext_error;
4066 }
4067
4068 /*
4069 * handle additional external buffers
4070 */
4071
4072 /* check broken pipe conditions */
4073 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4074 goto sli_cfg_ext_error;
4075 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4076 goto sli_cfg_ext_error;
4077 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4078 goto sli_cfg_ext_error;
4079
4080 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4081 "2975 SLI_CONFIG mailbox external buffer: "
4082 "extSta:x%x, tag:%d, seq:%d\n",
4083 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4084 mbox_req->extSeqNum);
4085 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4086 return rc;
4087
4088sli_cfg_ext_error:
4089 /* all other cases, broken pipe */
4090 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4091 "2976 SLI_CONFIG mailbox broken pipe: "
4092 "ctxSta:x%x, ctxNumBuf:%d "
4093 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4094 phba->mbox_ext_buf_ctx.state,
4095 phba->mbox_ext_buf_ctx.numBuf,
4096 phba->mbox_ext_buf_ctx.mbxTag,
4097 phba->mbox_ext_buf_ctx.seqNum,
4098 mbox_req->extMboxTag, mbox_req->extSeqNum);
4099
4100 lpfc_bsg_mbox_ext_session_reset(phba);
4101
4102 return -EPIPE;
4103}
4104
4105/**
James Smart3b5dd522010-01-26 23:10:15 -05004106 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4107 * @phba: Pointer to HBA context object.
4108 * @mb: Pointer to a mailbox object.
4109 * @vport: Pointer to a vport object.
4110 *
4111 * Allocate a tracking object, mailbox command memory, get a mailbox
4112 * from the mailbox pool, copy the caller mailbox command.
4113 *
4114 * If offline and the sli is active we need to poll for the command (port is
4115 * being reset) and com-plete the job, otherwise issue the mailbox command and
4116 * let our completion handler finish the command.
4117 **/
4118static uint32_t
4119lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4120 struct lpfc_vport *vport)
4121{
James Smart7a470272010-03-15 11:25:20 -04004122 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4123 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4124 /* a 4k buffer to hold the mb and extended data from/to the bsg */
James Smart7ad20aa2011-05-24 11:44:28 -04004125 uint8_t *pmbx = NULL;
James Smart7a470272010-03-15 11:25:20 -04004126 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
James Smart7ad20aa2011-05-24 11:44:28 -04004127 struct lpfc_dmabuf *dmabuf = NULL;
4128 struct dfc_mbox_req *mbox_req;
James Smartb6e3b9c2011-04-16 11:03:43 -04004129 struct READ_EVENT_LOG_VAR *rdEventLog;
4130 uint32_t transmit_length, receive_length, mode;
James Smart7ad20aa2011-05-24 11:44:28 -04004131 struct lpfc_mbx_sli4_config *sli4_config;
James Smartb6e3b9c2011-04-16 11:03:43 -04004132 struct lpfc_mbx_nembed_cmd *nembed_sge;
4133 struct mbox_header *header;
4134 struct ulp_bde64 *bde;
James Smart7a470272010-03-15 11:25:20 -04004135 uint8_t *ext = NULL;
James Smart3b5dd522010-01-26 23:10:15 -05004136 int rc = 0;
James Smart7a470272010-03-15 11:25:20 -04004137 uint8_t *from;
James Smart7ad20aa2011-05-24 11:44:28 -04004138 uint32_t size;
4139
James Smart7a470272010-03-15 11:25:20 -04004140
4141 /* in case no data is transferred */
4142 job->reply->reply_payload_rcv_len = 0;
4143
James Smartb6e3b9c2011-04-16 11:03:43 -04004144 /* sanity check to protect driver */
4145 if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4146 job->request_payload.payload_len > BSG_MBOX_SIZE) {
4147 rc = -ERANGE;
4148 goto job_done;
4149 }
4150
James Smart7ad20aa2011-05-24 11:44:28 -04004151 /*
4152 * Don't allow mailbox commands to be sent when blocked or when in
4153 * the middle of discovery
4154 */
4155 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4156 rc = -EAGAIN;
4157 goto job_done;
4158 }
4159
4160 mbox_req =
4161 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4162
James Smart7a470272010-03-15 11:25:20 -04004163 /* check if requested extended data lengths are valid */
James Smartb6e3b9c2011-04-16 11:03:43 -04004164 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4165 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
James Smart7a470272010-03-15 11:25:20 -04004166 rc = -ERANGE;
4167 goto job_done;
4168 }
James Smart3b5dd522010-01-26 23:10:15 -05004169
James Smart7ad20aa2011-05-24 11:44:28 -04004170 dmabuf = lpfc_bsg_dma_page_alloc(phba);
4171 if (!dmabuf || !dmabuf->virt) {
4172 rc = -ENOMEM;
4173 goto job_done;
4174 }
4175
4176 /* Get the mailbox command or external buffer from BSG */
4177 pmbx = (uint8_t *)dmabuf->virt;
4178 size = job->request_payload.payload_len;
4179 sg_copy_to_buffer(job->request_payload.sg_list,
4180 job->request_payload.sg_cnt, pmbx, size);
4181
4182 /* Handle possible SLI_CONFIG with non-embedded payloads */
4183 if (phba->sli_rev == LPFC_SLI_REV4) {
4184 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4185 if (rc == SLI_CONFIG_HANDLED)
4186 goto job_cont;
4187 if (rc)
4188 goto job_done;
4189 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4190 }
4191
4192 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4193 if (rc != 0)
4194 goto job_done; /* must be negative */
4195
James Smart3b5dd522010-01-26 23:10:15 -05004196 /* allocate our bsg tracking structure */
4197 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4198 if (!dd_data) {
4199 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4200 "2727 Failed allocation of dd_data\n");
James Smart7a470272010-03-15 11:25:20 -04004201 rc = -ENOMEM;
4202 goto job_done;
James Smart3b5dd522010-01-26 23:10:15 -05004203 }
4204
James Smart3b5dd522010-01-26 23:10:15 -05004205 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4206 if (!pmboxq) {
James Smart7a470272010-03-15 11:25:20 -04004207 rc = -ENOMEM;
4208 goto job_done;
James Smart3b5dd522010-01-26 23:10:15 -05004209 }
James Smart7a470272010-03-15 11:25:20 -04004210 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
James Smart3b5dd522010-01-26 23:10:15 -05004211
James Smart3b5dd522010-01-26 23:10:15 -05004212 pmb = &pmboxq->u.mb;
James Smart7ad20aa2011-05-24 11:44:28 -04004213 memcpy(pmb, pmbx, sizeof(*pmb));
James Smart3b5dd522010-01-26 23:10:15 -05004214 pmb->mbxOwner = OWN_HOST;
James Smart3b5dd522010-01-26 23:10:15 -05004215 pmboxq->vport = vport;
4216
James Smartc7495932010-04-06 15:05:28 -04004217 /* If HBA encountered an error attention, allow only DUMP
4218 * or RESTART mailbox commands until the HBA is restarted.
4219 */
4220 if (phba->pport->stopped &&
4221 pmb->mbxCommand != MBX_DUMP_MEMORY &&
4222 pmb->mbxCommand != MBX_RESTART &&
4223 pmb->mbxCommand != MBX_WRITE_VPARMS &&
4224 pmb->mbxCommand != MBX_WRITE_WWN)
4225 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4226 "2797 mbox: Issued mailbox cmd "
4227 "0x%x while in stopped state.\n",
4228 pmb->mbxCommand);
4229
James Smart7a470272010-03-15 11:25:20 -04004230 /* extended mailbox commands will need an extended buffer */
James Smartc7495932010-04-06 15:05:28 -04004231 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
James Smart7a470272010-03-15 11:25:20 -04004232 /* any data for the device? */
4233 if (mbox_req->inExtWLen) {
James Smart7ad20aa2011-05-24 11:44:28 -04004234 from = pmbx;
4235 ext = from + sizeof(MAILBOX_t);
James Smart7a470272010-03-15 11:25:20 -04004236 }
James Smart7a470272010-03-15 11:25:20 -04004237 pmboxq->context2 = ext;
4238 pmboxq->in_ext_byte_len =
James Smart7a470272010-03-15 11:25:20 -04004239 mbox_req->inExtWLen * sizeof(uint32_t);
4240 pmboxq->out_ext_byte_len =
James Smartc7495932010-04-06 15:05:28 -04004241 mbox_req->outExtWLen * sizeof(uint32_t);
James Smart7a470272010-03-15 11:25:20 -04004242 pmboxq->mbox_offset_word = mbox_req->mbOffset;
4243 }
4244
4245 /* biu diag will need a kernel buffer to transfer the data
4246 * allocate our own buffer and setup the mailbox command to
4247 * use ours
4248 */
4249 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
James Smartb6e3b9c2011-04-16 11:03:43 -04004250 transmit_length = pmb->un.varWords[1];
4251 receive_length = pmb->un.varWords[4];
James Smartc7495932010-04-06 15:05:28 -04004252 /* transmit length cannot be greater than receive length or
4253 * mailbox extension size
4254 */
4255 if ((transmit_length > receive_length) ||
James Smart88a2cfb2011-07-22 18:36:33 -04004256 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
James Smartc7495932010-04-06 15:05:28 -04004257 rc = -ERANGE;
4258 goto job_done;
4259 }
James Smart7a470272010-03-15 11:25:20 -04004260 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
James Smart7ad20aa2011-05-24 11:44:28 -04004261 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
James Smart7a470272010-03-15 11:25:20 -04004262 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
James Smart7ad20aa2011-05-24 11:44:28 -04004263 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
James Smart7a470272010-03-15 11:25:20 -04004264
4265 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
James Smart7ad20aa2011-05-24 11:44:28 -04004266 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4267 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
James Smart7a470272010-03-15 11:25:20 -04004268 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
James Smart7ad20aa2011-05-24 11:44:28 -04004269 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4270 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
James Smartc7495932010-04-06 15:05:28 -04004271 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
James Smartb6e3b9c2011-04-16 11:03:43 -04004272 rdEventLog = &pmb->un.varRdEventLog;
4273 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4274 mode = bf_get(lpfc_event_log, rdEventLog);
James Smartc7495932010-04-06 15:05:28 -04004275
4276 /* receive length cannot be greater than mailbox
4277 * extension size
4278 */
James Smart88a2cfb2011-07-22 18:36:33 -04004279 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
James Smartc7495932010-04-06 15:05:28 -04004280 rc = -ERANGE;
4281 goto job_done;
4282 }
4283
4284 /* mode zero uses a bde like biu diags command */
4285 if (mode == 0) {
James Smart7ad20aa2011-05-24 11:44:28 -04004286 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4287 + sizeof(MAILBOX_t));
4288 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4289 + sizeof(MAILBOX_t));
James Smartc7495932010-04-06 15:05:28 -04004290 }
4291 } else if (phba->sli_rev == LPFC_SLI_REV4) {
4292 if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
4293 /* rebuild the command for sli4 using our own buffers
4294 * like we do for biu diags
4295 */
James Smartb6e3b9c2011-04-16 11:03:43 -04004296 receive_length = pmb->un.varWords[2];
James Smartc7495932010-04-06 15:05:28 -04004297 /* receive length cannot be greater than mailbox
4298 * extension size
4299 */
James Smart7ad20aa2011-05-24 11:44:28 -04004300 if (receive_length == 0) {
James Smartc7495932010-04-06 15:05:28 -04004301 rc = -ERANGE;
4302 goto job_done;
4303 }
James Smart7ad20aa2011-05-24 11:44:28 -04004304 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4305 + sizeof(MAILBOX_t));
4306 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4307 + sizeof(MAILBOX_t));
James Smartc7495932010-04-06 15:05:28 -04004308 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4309 pmb->un.varUpdateCfg.co) {
James Smartb6e3b9c2011-04-16 11:03:43 -04004310 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
James Smartc7495932010-04-06 15:05:28 -04004311
4312 /* bde size cannot be greater than mailbox ext size */
James Smart88a2cfb2011-07-22 18:36:33 -04004313 if (bde->tus.f.bdeSize >
4314 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
James Smartc7495932010-04-06 15:05:28 -04004315 rc = -ERANGE;
4316 goto job_done;
4317 }
James Smart7ad20aa2011-05-24 11:44:28 -04004318 bde->addrHigh = putPaddrHigh(dmabuf->phys
4319 + sizeof(MAILBOX_t));
4320 bde->addrLow = putPaddrLow(dmabuf->phys
4321 + sizeof(MAILBOX_t));
James Smart515e0aa2010-09-29 11:19:00 -04004322 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
James Smart7ad20aa2011-05-24 11:44:28 -04004323 /* Handling non-embedded SLI_CONFIG mailbox command */
4324 sli4_config = &pmboxq->u.mqe.un.sli4_config;
4325 if (!bf_get(lpfc_mbox_hdr_emb,
4326 &sli4_config->header.cfg_mhdr)) {
4327 /* rebuild the command for sli4 using our
4328 * own buffers like we do for biu diags
4329 */
4330 header = (struct mbox_header *)
4331 &pmb->un.varWords[0];
4332 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4333 &pmb->un.varWords[0];
4334 receive_length = nembed_sge->sge[0].length;
James Smart515e0aa2010-09-29 11:19:00 -04004335
James Smart7ad20aa2011-05-24 11:44:28 -04004336 /* receive length cannot be greater than
4337 * mailbox extension size
4338 */
4339 if ((receive_length == 0) ||
James Smart88a2cfb2011-07-22 18:36:33 -04004340 (receive_length >
4341 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
James Smart7ad20aa2011-05-24 11:44:28 -04004342 rc = -ERANGE;
4343 goto job_done;
4344 }
4345
4346 nembed_sge->sge[0].pa_hi =
4347 putPaddrHigh(dmabuf->phys
4348 + sizeof(MAILBOX_t));
4349 nembed_sge->sge[0].pa_lo =
4350 putPaddrLow(dmabuf->phys
4351 + sizeof(MAILBOX_t));
James Smart515e0aa2010-09-29 11:19:00 -04004352 }
James Smartc7495932010-04-06 15:05:28 -04004353 }
James Smart3b5dd522010-01-26 23:10:15 -05004354 }
4355
James Smart7ad20aa2011-05-24 11:44:28 -04004356 dd_data->context_un.mbox.dmabuffers = dmabuf;
James Smartc7495932010-04-06 15:05:28 -04004357
James Smart3b5dd522010-01-26 23:10:15 -05004358 /* setup wake call as IOCB callback */
James Smart7ad20aa2011-05-24 11:44:28 -04004359 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
James Smart7a470272010-03-15 11:25:20 -04004360
James Smart3b5dd522010-01-26 23:10:15 -05004361 /* setup context field to pass wait_queue pointer to wake function */
4362 pmboxq->context1 = dd_data;
4363 dd_data->type = TYPE_MBOX;
4364 dd_data->context_un.mbox.pmboxq = pmboxq;
James Smart7ad20aa2011-05-24 11:44:28 -04004365 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
James Smart3b5dd522010-01-26 23:10:15 -05004366 dd_data->context_un.mbox.set_job = job;
James Smart7a470272010-03-15 11:25:20 -04004367 dd_data->context_un.mbox.ext = ext;
4368 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4369 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
James Smartc7495932010-04-06 15:05:28 -04004370 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
James Smart3b5dd522010-01-26 23:10:15 -05004371 job->dd_data = dd_data;
James Smart7a470272010-03-15 11:25:20 -04004372
4373 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4374 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4375 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4376 if (rc != MBX_SUCCESS) {
4377 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4378 goto job_done;
4379 }
4380
4381 /* job finished, copy the data */
James Smart7ad20aa2011-05-24 11:44:28 -04004382 memcpy(pmbx, pmb, sizeof(*pmb));
James Smart7a470272010-03-15 11:25:20 -04004383 job->reply->reply_payload_rcv_len =
4384 sg_copy_from_buffer(job->reply_payload.sg_list,
James Smart7ad20aa2011-05-24 11:44:28 -04004385 job->reply_payload.sg_cnt,
4386 pmbx, size);
James Smart7a470272010-03-15 11:25:20 -04004387 /* not waiting mbox already done */
4388 rc = 0;
4389 goto job_done;
James Smart3b5dd522010-01-26 23:10:15 -05004390 }
4391
James Smart7a470272010-03-15 11:25:20 -04004392 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4393 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4394 return 1; /* job started */
4395
4396job_done:
4397 /* common exit for error or job completed inline */
James Smart7a470272010-03-15 11:25:20 -04004398 if (pmboxq)
4399 mempool_free(pmboxq, phba->mbox_mem_pool);
James Smart7ad20aa2011-05-24 11:44:28 -04004400 lpfc_bsg_dma_page_free(phba, dmabuf);
James Smart7a470272010-03-15 11:25:20 -04004401 kfree(dd_data);
4402
James Smart7ad20aa2011-05-24 11:44:28 -04004403job_cont:
James Smart7a470272010-03-15 11:25:20 -04004404 return rc;
James Smart3b5dd522010-01-26 23:10:15 -05004405}
4406
4407/**
4408 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4409 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4410 **/
4411static int
4412lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
4413{
4414 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4415 struct lpfc_hba *phba = vport->phba;
James Smart7ad20aa2011-05-24 11:44:28 -04004416 struct dfc_mbox_req *mbox_req;
James Smart3b5dd522010-01-26 23:10:15 -05004417 int rc = 0;
4418
James Smart7ad20aa2011-05-24 11:44:28 -04004419 /* mix-and-match backward compatibility */
James Smart3b5dd522010-01-26 23:10:15 -05004420 job->reply->reply_payload_rcv_len = 0;
4421 if (job->request_len <
4422 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
James Smart7ad20aa2011-05-24 11:44:28 -04004423 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4424 "2737 Mix-and-match backward compability "
4425 "between MBOX_REQ old size:%d and "
4426 "new request size:%d\n",
4427 (int)(job->request_len -
4428 sizeof(struct fc_bsg_request)),
4429 (int)sizeof(struct dfc_mbox_req));
4430 mbox_req = (struct dfc_mbox_req *)
4431 job->request->rqst_data.h_vendor.vendor_cmd;
4432 mbox_req->extMboxTag = 0;
4433 mbox_req->extSeqNum = 0;
James Smart3b5dd522010-01-26 23:10:15 -05004434 }
4435
4436 rc = lpfc_bsg_issue_mbox(phba, job, vport);
4437
James Smart3b5dd522010-01-26 23:10:15 -05004438 if (rc == 0) {
4439 /* job done */
4440 job->reply->result = 0;
4441 job->dd_data = NULL;
4442 job->job_done(job);
4443 } else if (rc == 1)
4444 /* job submitted, will complete later*/
4445 rc = 0; /* return zero, no error */
4446 else {
4447 /* some error occurred */
4448 job->reply->result = rc;
4449 job->dd_data = NULL;
4450 }
4451
4452 return rc;
4453}
4454
4455/**
James Smarte2aed292010-02-26 14:15:00 -05004456 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
4457 * @phba: Pointer to HBA context object.
4458 * @cmdiocbq: Pointer to command iocb.
4459 * @rspiocbq: Pointer to response iocb.
4460 *
4461 * This function is the completion handler for iocbs issued using
4462 * lpfc_menlo_cmd function. This function is called by the
4463 * ring event handler function without any lock held. This function
4464 * can be called from both worker thread context and interrupt
4465 * context. This function also can be called from another thread which
4466 * cleans up the SLI layer objects.
4467 * This function copies the contents of the response iocb to the
4468 * response iocb memory object provided by the caller of
4469 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
4470 * sleeps for the iocb completion.
4471 **/
4472static void
4473lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4474 struct lpfc_iocbq *cmdiocbq,
4475 struct lpfc_iocbq *rspiocbq)
4476{
4477 struct bsg_job_data *dd_data;
4478 struct fc_bsg_job *job;
4479 IOCB_t *rsp;
4480 struct lpfc_dmabuf *bmp;
4481 struct lpfc_bsg_menlo *menlo;
4482 unsigned long flags;
4483 struct menlo_response *menlo_resp;
4484 int rc = 0;
4485
4486 spin_lock_irqsave(&phba->ct_ev_lock, flags);
4487 dd_data = cmdiocbq->context1;
4488 if (!dd_data) {
4489 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4490 return;
4491 }
4492
4493 menlo = &dd_data->context_un.menlo;
4494 job = menlo->set_job;
4495 job->dd_data = NULL; /* so timeout handler does not reply */
4496
James Smart5989b8d2010-10-22 11:06:56 -04004497 spin_lock(&phba->hbalock);
James Smarte2aed292010-02-26 14:15:00 -05004498 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
4499 if (cmdiocbq->context2 && rspiocbq)
4500 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
4501 &rspiocbq->iocb, sizeof(IOCB_t));
James Smart5989b8d2010-10-22 11:06:56 -04004502 spin_unlock(&phba->hbalock);
James Smarte2aed292010-02-26 14:15:00 -05004503
4504 bmp = menlo->bmp;
4505 rspiocbq = menlo->rspiocbq;
4506 rsp = &rspiocbq->iocb;
4507
4508 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
4509 job->request_payload.sg_cnt, DMA_TO_DEVICE);
4510 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
4511 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4512
4513 /* always return the xri, this would be used in the case
4514 * of a menlo download to allow the data to be sent as a continuation
4515 * of the exchange.
4516 */
4517 menlo_resp = (struct menlo_response *)
4518 job->reply->reply_data.vendor_reply.vendor_rsp;
4519 menlo_resp->xri = rsp->ulpContext;
4520 if (rsp->ulpStatus) {
4521 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
4522 switch (rsp->un.ulpWord[4] & 0xff) {
4523 case IOERR_SEQUENCE_TIMEOUT:
4524 rc = -ETIMEDOUT;
4525 break;
4526 case IOERR_INVALID_RPI:
4527 rc = -EFAULT;
4528 break;
4529 default:
4530 rc = -EACCES;
4531 break;
4532 }
4533 } else
4534 rc = -EACCES;
4535 } else
4536 job->reply->reply_payload_rcv_len =
4537 rsp->un.genreq64.bdl.bdeSize;
4538
4539 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
4540 lpfc_sli_release_iocbq(phba, rspiocbq);
4541 lpfc_sli_release_iocbq(phba, cmdiocbq);
4542 kfree(bmp);
4543 kfree(dd_data);
4544 /* make error code available to userspace */
4545 job->reply->result = rc;
4546 /* complete the job back to userspace */
4547 job->job_done(job);
4548 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4549 return;
4550}
4551
4552/**
4553 * lpfc_menlo_cmd - send an ioctl for menlo hardware
4554 * @job: fc_bsg_job to handle
4555 *
4556 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
4557 * all the command completions will return the xri for the command.
4558 * For menlo data requests a gen request 64 CX is used to continue the exchange
4559 * supplied in the menlo request header xri field.
4560 **/
4561static int
4562lpfc_menlo_cmd(struct fc_bsg_job *job)
4563{
4564 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4565 struct lpfc_hba *phba = vport->phba;
4566 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
4567 IOCB_t *cmd, *rsp;
4568 int rc = 0;
4569 struct menlo_command *menlo_cmd;
4570 struct menlo_response *menlo_resp;
4571 struct lpfc_dmabuf *bmp = NULL;
4572 int request_nseg;
4573 int reply_nseg;
4574 struct scatterlist *sgel = NULL;
4575 int numbde;
4576 dma_addr_t busaddr;
4577 struct bsg_job_data *dd_data;
4578 struct ulp_bde64 *bpl = NULL;
4579
4580 /* in case no data is returned return just the return code */
4581 job->reply->reply_payload_rcv_len = 0;
4582
4583 if (job->request_len <
4584 sizeof(struct fc_bsg_request) +
4585 sizeof(struct menlo_command)) {
4586 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4587 "2784 Received MENLO_CMD request below "
4588 "minimum size\n");
4589 rc = -ERANGE;
4590 goto no_dd_data;
4591 }
4592
4593 if (job->reply_len <
4594 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
4595 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4596 "2785 Received MENLO_CMD reply below "
4597 "minimum size\n");
4598 rc = -ERANGE;
4599 goto no_dd_data;
4600 }
4601
4602 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
4603 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4604 "2786 Adapter does not support menlo "
4605 "commands\n");
4606 rc = -EPERM;
4607 goto no_dd_data;
4608 }
4609
4610 menlo_cmd = (struct menlo_command *)
4611 job->request->rqst_data.h_vendor.vendor_cmd;
4612
4613 menlo_resp = (struct menlo_response *)
4614 job->reply->reply_data.vendor_reply.vendor_rsp;
4615
4616 /* allocate our bsg tracking structure */
4617 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4618 if (!dd_data) {
4619 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4620 "2787 Failed allocation of dd_data\n");
4621 rc = -ENOMEM;
4622 goto no_dd_data;
4623 }
4624
4625 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4626 if (!bmp) {
4627 rc = -ENOMEM;
4628 goto free_dd;
4629 }
4630
4631 cmdiocbq = lpfc_sli_get_iocbq(phba);
4632 if (!cmdiocbq) {
4633 rc = -ENOMEM;
4634 goto free_bmp;
4635 }
4636
4637 rspiocbq = lpfc_sli_get_iocbq(phba);
4638 if (!rspiocbq) {
4639 rc = -ENOMEM;
4640 goto free_cmdiocbq;
4641 }
4642
4643 rsp = &rspiocbq->iocb;
4644
4645 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
4646 if (!bmp->virt) {
4647 rc = -ENOMEM;
4648 goto free_rspiocbq;
4649 }
4650
4651 INIT_LIST_HEAD(&bmp->list);
4652 bpl = (struct ulp_bde64 *) bmp->virt;
4653 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
4654 job->request_payload.sg_cnt, DMA_TO_DEVICE);
4655 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
4656 busaddr = sg_dma_address(sgel);
4657 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
4658 bpl->tus.f.bdeSize = sg_dma_len(sgel);
4659 bpl->tus.w = cpu_to_le32(bpl->tus.w);
4660 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4661 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4662 bpl++;
4663 }
4664
4665 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
4666 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4667 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
4668 busaddr = sg_dma_address(sgel);
4669 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
4670 bpl->tus.f.bdeSize = sg_dma_len(sgel);
4671 bpl->tus.w = cpu_to_le32(bpl->tus.w);
4672 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4673 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4674 bpl++;
4675 }
4676
4677 cmd = &cmdiocbq->iocb;
4678 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
4679 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
4680 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
4681 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
4682 cmd->un.genreq64.bdl.bdeSize =
4683 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
4684 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
4685 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
4686 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
4687 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
4688 cmd->ulpBdeCount = 1;
4689 cmd->ulpClass = CLASS3;
4690 cmd->ulpOwner = OWN_CHIP;
4691 cmd->ulpLe = 1; /* Limited Edition */
4692 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
4693 cmdiocbq->vport = phba->pport;
4694 /* We want the firmware to timeout before we do */
4695 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
4696 cmdiocbq->context3 = bmp;
4697 cmdiocbq->context2 = rspiocbq;
4698 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
4699 cmdiocbq->context1 = dd_data;
4700 cmdiocbq->context2 = rspiocbq;
4701 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
4702 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
4703 cmd->ulpPU = MENLO_PU; /* 3 */
4704 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
4705 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
4706 } else {
4707 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
4708 cmd->ulpPU = 1;
4709 cmd->un.ulpWord[4] = 0;
4710 cmd->ulpContext = menlo_cmd->xri;
4711 }
4712
4713 dd_data->type = TYPE_MENLO;
4714 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
4715 dd_data->context_un.menlo.rspiocbq = rspiocbq;
4716 dd_data->context_un.menlo.set_job = job;
4717 dd_data->context_un.menlo.bmp = bmp;
4718
4719 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
4720 MENLO_TIMEOUT - 5);
4721 if (rc == IOCB_SUCCESS)
4722 return 0; /* done for now */
4723
4724 /* iocb failed so cleanup */
4725 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
4726 job->request_payload.sg_cnt, DMA_TO_DEVICE);
4727 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
4728 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4729
4730 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
4731
4732free_rspiocbq:
4733 lpfc_sli_release_iocbq(phba, rspiocbq);
4734free_cmdiocbq:
4735 lpfc_sli_release_iocbq(phba, cmdiocbq);
4736free_bmp:
4737 kfree(bmp);
4738free_dd:
4739 kfree(dd_data);
4740no_dd_data:
4741 /* make error code available to userspace */
4742 job->reply->result = rc;
4743 job->dd_data = NULL;
4744 return rc;
4745}
James Smartb6e3b9c2011-04-16 11:03:43 -04004746
James Smarte2aed292010-02-26 14:15:00 -05004747/**
James Smartf1c3b0f2009-07-19 10:01:32 -04004748 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
4749 * @job: fc_bsg_job to handle
James Smart3b5dd522010-01-26 23:10:15 -05004750 **/
James Smartf1c3b0f2009-07-19 10:01:32 -04004751static int
4752lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
4753{
4754 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
James Smart4cc0e562010-01-26 23:09:48 -05004755 int rc;
James Smartf1c3b0f2009-07-19 10:01:32 -04004756
4757 switch (command) {
4758 case LPFC_BSG_VENDOR_SET_CT_EVENT:
James Smart4cc0e562010-01-26 23:09:48 -05004759 rc = lpfc_bsg_hba_set_event(job);
James Smartf1c3b0f2009-07-19 10:01:32 -04004760 break;
James Smartf1c3b0f2009-07-19 10:01:32 -04004761 case LPFC_BSG_VENDOR_GET_CT_EVENT:
James Smart4cc0e562010-01-26 23:09:48 -05004762 rc = lpfc_bsg_hba_get_event(job);
James Smartf1c3b0f2009-07-19 10:01:32 -04004763 break;
James Smart3b5dd522010-01-26 23:10:15 -05004764 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
4765 rc = lpfc_bsg_send_mgmt_rsp(job);
4766 break;
4767 case LPFC_BSG_VENDOR_DIAG_MODE:
James Smart7ad20aa2011-05-24 11:44:28 -04004768 rc = lpfc_bsg_diag_loopback_mode(job);
James Smart3b5dd522010-01-26 23:10:15 -05004769 break;
James Smart7ad20aa2011-05-24 11:44:28 -04004770 case LPFC_BSG_VENDOR_DIAG_MODE_END:
4771 rc = lpfc_sli4_bsg_diag_mode_end(job);
4772 break;
4773 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
4774 rc = lpfc_bsg_diag_loopback_run(job);
4775 break;
4776 case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
4777 rc = lpfc_sli4_bsg_link_diag_test(job);
James Smart3b5dd522010-01-26 23:10:15 -05004778 break;
4779 case LPFC_BSG_VENDOR_GET_MGMT_REV:
4780 rc = lpfc_bsg_get_dfc_rev(job);
4781 break;
4782 case LPFC_BSG_VENDOR_MBOX:
4783 rc = lpfc_bsg_mbox_cmd(job);
4784 break;
James Smarte2aed292010-02-26 14:15:00 -05004785 case LPFC_BSG_VENDOR_MENLO_CMD:
4786 case LPFC_BSG_VENDOR_MENLO_DATA:
4787 rc = lpfc_menlo_cmd(job);
4788 break;
James Smartf1c3b0f2009-07-19 10:01:32 -04004789 default:
James Smart4cc0e562010-01-26 23:09:48 -05004790 rc = -EINVAL;
4791 job->reply->reply_payload_rcv_len = 0;
4792 /* make error code available to userspace */
4793 job->reply->result = rc;
4794 break;
James Smartf1c3b0f2009-07-19 10:01:32 -04004795 }
James Smart4cc0e562010-01-26 23:09:48 -05004796
4797 return rc;
James Smartf1c3b0f2009-07-19 10:01:32 -04004798}
4799
4800/**
4801 * lpfc_bsg_request - handle a bsg request from the FC transport
4802 * @job: fc_bsg_job to handle
James Smart3b5dd522010-01-26 23:10:15 -05004803 **/
James Smartf1c3b0f2009-07-19 10:01:32 -04004804int
4805lpfc_bsg_request(struct fc_bsg_job *job)
4806{
4807 uint32_t msgcode;
James Smart4cc0e562010-01-26 23:09:48 -05004808 int rc;
James Smartf1c3b0f2009-07-19 10:01:32 -04004809
4810 msgcode = job->request->msgcode;
James Smartf1c3b0f2009-07-19 10:01:32 -04004811 switch (msgcode) {
4812 case FC_BSG_HST_VENDOR:
4813 rc = lpfc_bsg_hst_vendor(job);
4814 break;
4815 case FC_BSG_RPT_ELS:
4816 rc = lpfc_bsg_rport_els(job);
4817 break;
4818 case FC_BSG_RPT_CT:
James Smart4cc0e562010-01-26 23:09:48 -05004819 rc = lpfc_bsg_send_mgmt_cmd(job);
James Smartf1c3b0f2009-07-19 10:01:32 -04004820 break;
4821 default:
James Smart4cc0e562010-01-26 23:09:48 -05004822 rc = -EINVAL;
4823 job->reply->reply_payload_rcv_len = 0;
4824 /* make error code available to userspace */
4825 job->reply->result = rc;
James Smartf1c3b0f2009-07-19 10:01:32 -04004826 break;
4827 }
4828
4829 return rc;
4830}
4831
4832/**
4833 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
4834 * @job: fc_bsg_job that has timed out
4835 *
4836 * This function just aborts the job's IOCB. The aborted IOCB will return to
4837 * the waiting function which will handle passing the error back to userspace
James Smart3b5dd522010-01-26 23:10:15 -05004838 **/
James Smartf1c3b0f2009-07-19 10:01:32 -04004839int
4840lpfc_bsg_timeout(struct fc_bsg_job *job)
4841{
4842 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4843 struct lpfc_hba *phba = vport->phba;
James Smart4cc0e562010-01-26 23:09:48 -05004844 struct lpfc_iocbq *cmdiocb;
4845 struct lpfc_bsg_event *evt;
4846 struct lpfc_bsg_iocb *iocb;
James Smart3b5dd522010-01-26 23:10:15 -05004847 struct lpfc_bsg_mbox *mbox;
James Smarte2aed292010-02-26 14:15:00 -05004848 struct lpfc_bsg_menlo *menlo;
James Smartf1c3b0f2009-07-19 10:01:32 -04004849 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
James Smart4cc0e562010-01-26 23:09:48 -05004850 struct bsg_job_data *dd_data;
4851 unsigned long flags;
James Smartf1c3b0f2009-07-19 10:01:32 -04004852
James Smart4cc0e562010-01-26 23:09:48 -05004853 spin_lock_irqsave(&phba->ct_ev_lock, flags);
4854 dd_data = (struct bsg_job_data *)job->dd_data;
4855 /* timeout and completion crossed paths if no dd_data */
4856 if (!dd_data) {
4857 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4858 return 0;
4859 }
4860
4861 switch (dd_data->type) {
4862 case TYPE_IOCB:
4863 iocb = &dd_data->context_un.iocb;
4864 cmdiocb = iocb->cmdiocbq;
4865 /* hint to completion handler that the job timed out */
4866 job->reply->result = -EAGAIN;
4867 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4868 /* this will call our completion handler */
4869 spin_lock_irq(&phba->hbalock);
James Smartf1c3b0f2009-07-19 10:01:32 -04004870 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
James Smart4cc0e562010-01-26 23:09:48 -05004871 spin_unlock_irq(&phba->hbalock);
4872 break;
4873 case TYPE_EVT:
4874 evt = dd_data->context_un.evt;
4875 /* this event has no job anymore */
4876 evt->set_job = NULL;
4877 job->dd_data = NULL;
4878 job->reply->reply_payload_rcv_len = 0;
4879 /* Return -EAGAIN which is our way of signallying the
4880 * app to retry.
4881 */
4882 job->reply->result = -EAGAIN;
4883 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4884 job->job_done(job);
4885 break;
James Smart3b5dd522010-01-26 23:10:15 -05004886 case TYPE_MBOX:
4887 mbox = &dd_data->context_un.mbox;
4888 /* this mbox has no job anymore */
4889 mbox->set_job = NULL;
4890 job->dd_data = NULL;
4891 job->reply->reply_payload_rcv_len = 0;
4892 job->reply->result = -EAGAIN;
James Smart7a470272010-03-15 11:25:20 -04004893 /* the mbox completion handler can now be run */
James Smart3b5dd522010-01-26 23:10:15 -05004894 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4895 job->job_done(job);
James Smart7ad20aa2011-05-24 11:44:28 -04004896 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4897 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
James Smart3b5dd522010-01-26 23:10:15 -05004898 break;
James Smarte2aed292010-02-26 14:15:00 -05004899 case TYPE_MENLO:
4900 menlo = &dd_data->context_un.menlo;
4901 cmdiocb = menlo->cmdiocbq;
4902 /* hint to completion handler that the job timed out */
4903 job->reply->result = -EAGAIN;
4904 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4905 /* this will call our completion handler */
4906 spin_lock_irq(&phba->hbalock);
4907 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
4908 spin_unlock_irq(&phba->hbalock);
4909 break;
James Smart4cc0e562010-01-26 23:09:48 -05004910 default:
4911 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4912 break;
4913 }
James Smartf1c3b0f2009-07-19 10:01:32 -04004914
James Smart4cc0e562010-01-26 23:09:48 -05004915 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
4916 * otherwise an error message will be displayed on the console
4917 * so always return success (zero)
4918 */
James Smartf1c3b0f2009-07-19 10:01:32 -04004919 return 0;
4920}