blob: fb7aefaba129102628f7ce3dcfd5d5f42483c08a [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * bfad_im.c Linux driver IM module.
20 */
21
22#include "bfad_drv.h"
23#include "bfad_im.h"
24#include "bfad_trcmod.h"
25#include "bfa_cb_ioim_macros.h"
26#include <fcb/bfa_fcb_fcpim.h>
27
28BFA_TRC_FILE(LDRV, IM);
29
30DEFINE_IDR(bfad_im_port_index);
31struct scsi_transport_template *bfad_im_scsi_transport_template;
32static void bfad_im_itnim_work_handler(struct work_struct *work);
33static int bfad_im_queuecommand(struct scsi_cmnd *cmnd,
34 void (*done)(struct scsi_cmnd *));
35static int bfad_im_slave_alloc(struct scsi_device *sdev);
36
37void
38bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
39 enum bfi_ioim_status io_status, u8 scsi_status,
40 int sns_len, u8 *sns_info, s32 residue)
41{
42 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
43 struct bfad_s *bfad = drv;
44 struct bfad_itnim_data_s *itnim_data;
45 struct bfad_itnim_s *itnim;
Krishna Gudipati95aa0602010-03-05 19:38:27 -080046 u8 host_status = DID_OK;
Jing Huang7725ccf2009-09-23 17:46:15 -070047
48 switch (io_status) {
49 case BFI_IOIM_STS_OK:
50 bfa_trc(bfad, scsi_status);
Jing Huang7725ccf2009-09-23 17:46:15 -070051 scsi_set_resid(cmnd, 0);
52
53 if (sns_len > 0) {
54 bfa_trc(bfad, sns_len);
55 if (sns_len > SCSI_SENSE_BUFFERSIZE)
56 sns_len = SCSI_SENSE_BUFFERSIZE;
57 memcpy(cmnd->sense_buffer, sns_info, sns_len);
58 }
Krishna Gudipati95aa0602010-03-05 19:38:27 -080059 if (residue > 0) {
60 bfa_trc(bfad, residue);
Jing Huang7725ccf2009-09-23 17:46:15 -070061 scsi_set_resid(cmnd, residue);
Krishna Gudipati95aa0602010-03-05 19:38:27 -080062 if (!sns_len && (scsi_status == SAM_STAT_GOOD) &&
63 (scsi_bufflen(cmnd) - residue) <
64 cmnd->underflow) {
65 bfa_trc(bfad, 0);
66 host_status = DID_ERROR;
67 }
68 }
69 cmnd->result = ScsiResult(host_status, scsi_status);
70
Jing Huang7725ccf2009-09-23 17:46:15 -070071 break;
72
73 case BFI_IOIM_STS_ABORTED:
74 case BFI_IOIM_STS_TIMEDOUT:
75 case BFI_IOIM_STS_PATHTOV:
76 default:
77 cmnd->result = ScsiResult(DID_ERROR, 0);
78 }
79
80 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
81 if (cmnd->device->host != NULL)
82 scsi_dma_unmap(cmnd);
83
84 cmnd->host_scribble = NULL;
85 bfa_trc(bfad, cmnd->result);
86
87 itnim_data = cmnd->device->hostdata;
88 if (itnim_data) {
89 itnim = itnim_data->itnim;
90 if (!cmnd->result && itnim &&
91 (bfa_lun_queue_depth > cmnd->device->queue_depth)) {
92 /* Queue depth adjustment for good status completion */
93 bfad_os_ramp_up_qdepth(itnim, cmnd->device);
94 } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) {
95 /* qfull handling */
96 bfad_os_handle_qfull(itnim, cmnd->device);
97 }
98 }
99
100 cmnd->scsi_done(cmnd);
101}
102
103void
104bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
105{
106 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
107 struct bfad_itnim_data_s *itnim_data;
108 struct bfad_itnim_s *itnim;
109
110 cmnd->result = ScsiResult(DID_OK, SCSI_STATUS_GOOD);
111
112 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
113 if (cmnd->device->host != NULL)
114 scsi_dma_unmap(cmnd);
115
116 cmnd->host_scribble = NULL;
117
118 /* Queue depth adjustment */
119 if (bfa_lun_queue_depth > cmnd->device->queue_depth) {
120 itnim_data = cmnd->device->hostdata;
121 if (itnim_data) {
122 itnim = itnim_data->itnim;
123 if (itnim)
124 bfad_os_ramp_up_qdepth(itnim, cmnd->device);
125 }
126 }
127
128 cmnd->scsi_done(cmnd);
129}
130
131void
132bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio)
133{
134 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
135 struct bfad_s *bfad = drv;
136
137 cmnd->result = ScsiResult(DID_ERROR, 0);
138
139 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
140 if (cmnd->device->host != NULL)
141 scsi_dma_unmap(cmnd);
142
143 bfa_trc(bfad, cmnd->result);
144 cmnd->host_scribble = NULL;
145}
146
147void
148bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
149 enum bfi_tskim_status tsk_status)
150{
151 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk;
152 wait_queue_head_t *wq;
153
154 cmnd->SCp.Status |= tsk_status << 1;
155 set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status);
156 wq = (wait_queue_head_t *) cmnd->SCp.ptr;
157 cmnd->SCp.ptr = NULL;
158
159 if (wq)
160 wake_up(wq);
161}
162
163void
164bfa_cb_ioim_resfree(void *drv)
165{
166}
167
168/**
169 * Scsi_Host_template SCSI host template
170 */
171/**
172 * Scsi_Host template entry, returns BFAD PCI info.
173 */
174static const char *
175bfad_im_info(struct Scsi_Host *shost)
176{
177 static char bfa_buf[256];
178 struct bfad_im_port_s *im_port =
179 (struct bfad_im_port_s *) shost->hostdata[0];
Jing Huang7725ccf2009-09-23 17:46:15 -0700180 struct bfad_s *bfad = im_port->bfad;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -0800181 char model[BFA_ADAPTER_MODEL_NAME_LEN];
Jing Huang7725ccf2009-09-23 17:46:15 -0700182
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -0800183 bfa_get_adapter_model(&bfad->bfa, model);
Jing Huang7725ccf2009-09-23 17:46:15 -0700184
185 memset(bfa_buf, 0, sizeof(bfa_buf));
186 snprintf(bfa_buf, sizeof(bfa_buf),
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -0800187 "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s",
188 model, bfad->pci_name, BFAD_DRIVER_VERSION);
Jing Huang7725ccf2009-09-23 17:46:15 -0700189 return bfa_buf;
190}
191
192/**
193 * Scsi_Host template entry, aborts the specified SCSI command.
194 *
195 * Returns: SUCCESS or FAILED.
196 */
197static int
198bfad_im_abort_handler(struct scsi_cmnd *cmnd)
199{
200 struct Scsi_Host *shost = cmnd->device->host;
201 struct bfad_im_port_s *im_port =
202 (struct bfad_im_port_s *) shost->hostdata[0];
203 struct bfad_s *bfad = im_port->bfad;
204 struct bfa_ioim_s *hal_io;
205 unsigned long flags;
206 u32 timeout;
207 int rc = FAILED;
208
209 spin_lock_irqsave(&bfad->bfad_lock, flags);
210 hal_io = (struct bfa_ioim_s *) cmnd->host_scribble;
211 if (!hal_io) {
212 /* IO has been completed, retrun success */
213 rc = SUCCESS;
214 goto out;
215 }
216 if (hal_io->dio != (struct bfad_ioim_s *) cmnd) {
217 rc = FAILED;
218 goto out;
219 }
220
221 bfa_trc(bfad, hal_io->iotag);
222 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT,
223 im_port->shost->host_no, cmnd, hal_io->iotag);
224 bfa_ioim_abort(hal_io);
225 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
226
227 /* Need to wait until the command get aborted */
228 timeout = 10;
229 while ((struct bfa_ioim_s *) cmnd->host_scribble == hal_io) {
230 set_current_state(TASK_UNINTERRUPTIBLE);
231 schedule_timeout(timeout);
232 if (timeout < 4 * HZ)
233 timeout *= 2;
234 }
235
236 cmnd->scsi_done(cmnd);
237 bfa_trc(bfad, hal_io->iotag);
238 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT_COMP,
239 im_port->shost->host_no, cmnd, hal_io->iotag);
240 return SUCCESS;
241out:
242 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
243 return rc;
244}
245
246static bfa_status_t
247bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
248 struct bfad_itnim_s *itnim)
249{
250 struct bfa_tskim_s *tskim;
251 struct bfa_itnim_s *bfa_itnim;
252 bfa_status_t rc = BFA_STATUS_OK;
253
254 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
255 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
256 if (!tskim) {
257 BFA_DEV_PRINTF(bfad, BFA_ERR,
258 "target reset, fail to allocate tskim\n");
259 rc = BFA_STATUS_FAILED;
260 goto out;
261 }
262
263 /*
264 * Set host_scribble to NULL to avoid aborting a task command if
265 * happens.
266 */
267 cmnd->host_scribble = NULL;
268 cmnd->SCp.Status = 0;
269 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
270 bfa_tskim_start(tskim, bfa_itnim, (lun_t)0,
271 FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO);
272out:
273 return rc;
274}
275
276/**
277 * Scsi_Host template entry, resets a LUN and abort its all commands.
278 *
279 * Returns: SUCCESS or FAILED.
280 *
281 */
282static int
283bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
284{
285 struct Scsi_Host *shost = cmnd->device->host;
286 struct bfad_im_port_s *im_port =
287 (struct bfad_im_port_s *) shost->hostdata[0];
288 struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
289 struct bfad_s *bfad = im_port->bfad;
290 struct bfa_tskim_s *tskim;
291 struct bfad_itnim_s *itnim;
292 struct bfa_itnim_s *bfa_itnim;
293 DECLARE_WAIT_QUEUE_HEAD(wq);
294 int rc = SUCCESS;
295 unsigned long flags;
296 enum bfi_tskim_status task_status;
297
298 spin_lock_irqsave(&bfad->bfad_lock, flags);
299 itnim = itnim_data->itnim;
300 if (!itnim) {
301 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
302 rc = FAILED;
303 goto out;
304 }
305
306 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
307 if (!tskim) {
308 BFA_DEV_PRINTF(bfad, BFA_ERR,
309 "LUN reset, fail to allocate tskim");
310 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
311 rc = FAILED;
312 goto out;
313 }
314
315 /**
316 * Set host_scribble to NULL to avoid aborting a task command
317 * if happens.
318 */
319 cmnd->host_scribble = NULL;
320 cmnd->SCp.ptr = (char *)&wq;
321 cmnd->SCp.Status = 0;
322 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
323 bfa_tskim_start(tskim, bfa_itnim,
324 bfad_int_to_lun(cmnd->device->lun),
325 FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
326 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
327
328 wait_event(wq, test_bit(IO_DONE_BIT,
329 (unsigned long *)&cmnd->SCp.Status));
330
331 task_status = cmnd->SCp.Status >> 1;
332 if (task_status != BFI_TSKIM_STS_OK) {
333 BFA_DEV_PRINTF(bfad, BFA_ERR, "LUN reset failure, status: %d\n",
334 task_status);
335 rc = FAILED;
336 }
337
338out:
339 return rc;
340}
341
342/**
343 * Scsi_Host template entry, resets the bus and abort all commands.
344 */
345static int
346bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
347{
348 struct Scsi_Host *shost = cmnd->device->host;
349 struct bfad_im_port_s *im_port =
350 (struct bfad_im_port_s *) shost->hostdata[0];
351 struct bfad_s *bfad = im_port->bfad;
352 struct bfad_itnim_s *itnim;
353 unsigned long flags;
354 u32 i, rc, err_cnt = 0;
355 DECLARE_WAIT_QUEUE_HEAD(wq);
356 enum bfi_tskim_status task_status;
357
358 spin_lock_irqsave(&bfad->bfad_lock, flags);
359 for (i = 0; i < MAX_FCP_TARGET; i++) {
360 itnim = bfad_os_get_itnim(im_port, i);
361 if (itnim) {
362 cmnd->SCp.ptr = (char *)&wq;
363 rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
364 if (rc != BFA_STATUS_OK) {
365 err_cnt++;
366 continue;
367 }
368
369 /* wait target reset to complete */
370 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
371 wait_event(wq, test_bit(IO_DONE_BIT,
372 (unsigned long *)&cmnd->SCp.Status));
373 spin_lock_irqsave(&bfad->bfad_lock, flags);
374
375 task_status = cmnd->SCp.Status >> 1;
376 if (task_status != BFI_TSKIM_STS_OK) {
377 BFA_DEV_PRINTF(bfad, BFA_ERR,
378 "target reset failure,"
379 " status: %d\n", task_status);
380 err_cnt++;
381 }
382 }
383 }
384 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
385
386 if (err_cnt)
387 return FAILED;
388
389 return SUCCESS;
390}
391
392/**
393 * Scsi_Host template entry slave_destroy.
394 */
395static void
396bfad_im_slave_destroy(struct scsi_device *sdev)
397{
398 sdev->hostdata = NULL;
399 return;
400}
401
402/**
403 * BFA FCS itnim callbacks
404 */
405
406/**
407 * BFA FCS itnim alloc callback, after successful PRLI
408 * Context: Interrupt
409 */
410void
411bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
412 struct bfad_itnim_s **itnim_drv)
413{
414 *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC);
415 if (*itnim_drv == NULL)
416 return;
417
418 (*itnim_drv)->im = bfad->im;
419 *itnim = &(*itnim_drv)->fcs_itnim;
420 (*itnim_drv)->state = ITNIM_STATE_NONE;
421
422 /*
423 * Initiaze the itnim_work
424 */
425 INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler);
426 bfad->bfad_flags |= BFAD_RPORT_ONLINE;
427}
428
429/**
430 * BFA FCS itnim free callback.
431 * Context: Interrupt. bfad_lock is held
432 */
433void
434bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
435{
436 struct bfad_port_s *port;
437 wwn_t wwpn;
438 u32 fcid;
439 char wwpn_str[32], fcid_str[16];
440
441 /* online to free state transtion should not happen */
442 bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE);
443
444 itnim_drv->queue_work = 1;
445 /* offline request is not yet done, use the same request to free */
446 if (itnim_drv->state == ITNIM_STATE_OFFLINE_PENDING)
447 itnim_drv->queue_work = 0;
448
449 itnim_drv->state = ITNIM_STATE_FREE;
450 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
451 itnim_drv->im_port = port->im_port;
452 wwpn = bfa_fcs_itnim_get_pwwn(&itnim_drv->fcs_itnim);
453 fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim);
454 wwn2str(wwpn_str, wwpn);
455 fcid2str(fcid_str, fcid);
456 bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_FREE,
457 port->im_port->shost->host_no,
458 fcid_str, wwpn_str);
459 bfad_os_itnim_process(itnim_drv);
460}
461
462/**
463 * BFA FCS itnim online callback.
464 * Context: Interrupt. bfad_lock is held
465 */
466void
467bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
468{
469 struct bfad_port_s *port;
470
471 itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim);
472 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
473 itnim_drv->state = ITNIM_STATE_ONLINE;
474 itnim_drv->queue_work = 1;
475 itnim_drv->im_port = port->im_port;
476 bfad_os_itnim_process(itnim_drv);
477}
478
479/**
480 * BFA FCS itnim offline callback.
481 * Context: Interrupt. bfad_lock is held
482 */
483void
484bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
485{
486 struct bfad_port_s *port;
487 struct bfad_s *bfad;
488
489 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
490 bfad = port->bfad;
491 if ((bfad->pport.flags & BFAD_PORT_DELETE) ||
492 (port->flags & BFAD_PORT_DELETE)) {
493 itnim_drv->state = ITNIM_STATE_OFFLINE;
494 return;
495 }
496 itnim_drv->im_port = port->im_port;
497 itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING;
498 itnim_drv->queue_work = 1;
499 bfad_os_itnim_process(itnim_drv);
500}
501
502/**
503 * BFA FCS itnim timeout callback.
504 * Context: Interrupt. bfad_lock is held
505 */
506void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
507{
508 itnim->state = ITNIM_STATE_TIMEOUT;
509}
510
511/**
512 * Path TOV processing begin notification -- dummy for linux
513 */
514void
515bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim)
516{
517}
518
519
520
521/**
522 * Allocate a Scsi_Host for a port.
523 */
524int
525bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
526{
527 int error = 1;
528
529 if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) {
530 printk(KERN_WARNING "idr_pre_get failure\n");
531 goto out;
532 }
533
534 error = idr_get_new(&bfad_im_port_index, im_port,
535 &im_port->idr_id);
536 if (error) {
537 printk(KERN_WARNING "idr_get_new failure\n");
538 goto out;
539 }
540
541 im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad);
542 if (!im_port->shost) {
543 error = 1;
544 goto out_free_idr;
545 }
546
547 im_port->shost->hostdata[0] = (unsigned long)im_port;
548 im_port->shost->unique_id = im_port->idr_id;
549 im_port->shost->this_id = -1;
550 im_port->shost->max_id = MAX_FCP_TARGET;
551 im_port->shost->max_lun = MAX_FCP_LUN;
552 im_port->shost->max_cmd_len = 16;
553 im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth;
554 im_port->shost->transportt = bfad_im_scsi_transport_template;
555
556 error = bfad_os_scsi_add_host(im_port->shost, im_port, bfad);
557 if (error) {
558 printk(KERN_WARNING "bfad_os_scsi_add_host failure %d\n",
559 error);
560 goto out_fc_rel;
561 }
562
563 /* setup host fixed attribute if the lk supports */
564 bfad_os_fc_host_init(im_port);
565
566 return 0;
567
568out_fc_rel:
569 scsi_host_put(im_port->shost);
570out_free_idr:
571 idr_remove(&bfad_im_port_index, im_port->idr_id);
572out:
573 return error;
574}
575
576void
577bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
578{
579 unsigned long flags;
580
581 bfa_trc(bfad, bfad->inst_no);
582 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_HOST_FREE,
583 im_port->shost->host_no);
584
585 fc_remove_host(im_port->shost);
586
587 scsi_remove_host(im_port->shost);
588 scsi_host_put(im_port->shost);
589
590 spin_lock_irqsave(&bfad->bfad_lock, flags);
591 idr_remove(&bfad_im_port_index, im_port->idr_id);
592 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
593}
594
595static void
596bfad_im_port_delete_handler(struct work_struct *work)
597{
598 struct bfad_im_port_s *im_port =
599 container_of(work, struct bfad_im_port_s, port_delete_work);
600
601 bfad_im_scsi_host_free(im_port->bfad, im_port);
602 bfad_im_port_clean(im_port);
603 kfree(im_port);
604}
605
606bfa_status_t
607bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port)
608{
609 int rc = BFA_STATUS_OK;
610 struct bfad_im_port_s *im_port;
611
612 im_port = kzalloc(sizeof(struct bfad_im_port_s), GFP_ATOMIC);
613 if (im_port == NULL) {
614 rc = BFA_STATUS_ENOMEM;
615 goto ext;
616 }
617 port->im_port = im_port;
618 im_port->port = port;
619 im_port->bfad = bfad;
620
621 INIT_WORK(&im_port->port_delete_work, bfad_im_port_delete_handler);
622 INIT_LIST_HEAD(&im_port->itnim_mapped_list);
623 INIT_LIST_HEAD(&im_port->binding_list);
624
625ext:
626 return rc;
627}
628
629void
630bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
631{
632 struct bfad_im_port_s *im_port = port->im_port;
633
634 queue_work(bfad->im->drv_workq,
635 &im_port->port_delete_work);
636}
637
638void
639bfad_im_port_clean(struct bfad_im_port_s *im_port)
640{
641 struct bfad_fcp_binding *bp, *bp_new;
642 unsigned long flags;
643 struct bfad_s *bfad = im_port->bfad;
644
645 spin_lock_irqsave(&bfad->bfad_lock, flags);
646 list_for_each_entry_safe(bp, bp_new, &im_port->binding_list,
647 list_entry) {
648 list_del(&bp->list_entry);
649 kfree(bp);
650 }
651
652 /* the itnim_mapped_list must be empty at this time */
653 bfa_assert(list_empty(&im_port->itnim_mapped_list));
654
655 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
656}
657
658void
659bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port)
660{
661}
662
663void
664bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port)
665{
666}
667
668bfa_status_t
669bfad_im_probe(struct bfad_s *bfad)
670{
671 struct bfad_im_s *im;
672 bfa_status_t rc = BFA_STATUS_OK;
673
674 im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL);
675 if (im == NULL) {
676 rc = BFA_STATUS_ENOMEM;
677 goto ext;
678 }
679
680 bfad->im = im;
681 im->bfad = bfad;
682
683 if (bfad_os_thread_workq(bfad) != BFA_STATUS_OK) {
684 kfree(im);
685 rc = BFA_STATUS_FAILED;
686 }
687
688ext:
689 return rc;
690}
691
692void
693bfad_im_probe_undo(struct bfad_s *bfad)
694{
695 if (bfad->im) {
696 bfad_os_destroy_workq(bfad->im);
697 kfree(bfad->im);
698 bfad->im = NULL;
699 }
700}
701
702
703
704
705int
706bfad_os_scsi_add_host(struct Scsi_Host *shost, struct bfad_im_port_s *im_port,
707 struct bfad_s *bfad)
708{
709 struct device *dev;
710
711 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
712 dev = &bfad->pcidev->dev;
713 else
714 dev = &bfad->pport.im_port->shost->shost_gendev;
715
716 return scsi_add_host(shost, dev);
717}
718
719struct Scsi_Host *
720bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
721{
722 struct scsi_host_template *sht;
723
724 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
725 sht = &bfad_im_scsi_host_template;
726 else
727 sht = &bfad_im_vport_template;
728
729 sht->sg_tablesize = bfad->cfg_data.io_max_sge;
730
731 return scsi_host_alloc(sht, sizeof(unsigned long));
732}
733
734void
735bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
736{
737 flush_workqueue(bfad->im->drv_workq);
738 bfad_im_scsi_host_free(im_port->bfad, im_port);
739 bfad_im_port_clean(im_port);
740 kfree(im_port);
741}
742
743void
744bfad_os_destroy_workq(struct bfad_im_s *im)
745{
746 if (im && im->drv_workq) {
747 destroy_workqueue(im->drv_workq);
748 im->drv_workq = NULL;
749 }
750}
751
752bfa_status_t
753bfad_os_thread_workq(struct bfad_s *bfad)
754{
755 struct bfad_im_s *im = bfad->im;
756
757 bfa_trc(bfad, 0);
758 snprintf(im->drv_workq_name, BFAD_KOBJ_NAME_LEN, "bfad_wq_%d",
759 bfad->inst_no);
760 im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
761 if (!im->drv_workq)
762 return BFA_STATUS_FAILED;
763
764 return BFA_STATUS_OK;
765}
766
767/**
768 * Scsi_Host template entry.
769 *
770 * Description:
771 * OS entry point to adjust the queue_depths on a per-device basis.
772 * Called once per device during the bus scan.
773 * Return non-zero if fails.
774 */
775static int
776bfad_im_slave_configure(struct scsi_device *sdev)
777{
778 if (sdev->tagged_supported)
779 scsi_activate_tcq(sdev, bfa_lun_queue_depth);
780 else
781 scsi_deactivate_tcq(sdev, bfa_lun_queue_depth);
782
783 return 0;
784}
785
786struct scsi_host_template bfad_im_scsi_host_template = {
787 .module = THIS_MODULE,
788 .name = BFAD_DRIVER_NAME,
789 .info = bfad_im_info,
790 .queuecommand = bfad_im_queuecommand,
791 .eh_abort_handler = bfad_im_abort_handler,
792 .eh_device_reset_handler = bfad_im_reset_lun_handler,
793 .eh_bus_reset_handler = bfad_im_reset_bus_handler,
794
795 .slave_alloc = bfad_im_slave_alloc,
796 .slave_configure = bfad_im_slave_configure,
797 .slave_destroy = bfad_im_slave_destroy,
798
799 .this_id = -1,
800 .sg_tablesize = BFAD_IO_MAX_SGE,
801 .cmd_per_lun = 3,
802 .use_clustering = ENABLE_CLUSTERING,
803 .shost_attrs = bfad_im_host_attrs,
804 .max_sectors = 0xFFFF,
805};
806
807struct scsi_host_template bfad_im_vport_template = {
808 .module = THIS_MODULE,
809 .name = BFAD_DRIVER_NAME,
810 .info = bfad_im_info,
811 .queuecommand = bfad_im_queuecommand,
812 .eh_abort_handler = bfad_im_abort_handler,
813 .eh_device_reset_handler = bfad_im_reset_lun_handler,
814 .eh_bus_reset_handler = bfad_im_reset_bus_handler,
815
816 .slave_alloc = bfad_im_slave_alloc,
817 .slave_configure = bfad_im_slave_configure,
818 .slave_destroy = bfad_im_slave_destroy,
819
820 .this_id = -1,
821 .sg_tablesize = BFAD_IO_MAX_SGE,
822 .cmd_per_lun = 3,
823 .use_clustering = ENABLE_CLUSTERING,
824 .shost_attrs = bfad_im_vport_attrs,
825 .max_sectors = 0xFFFF,
826};
827
828void
829bfad_im_probe_post(struct bfad_im_s *im)
830{
831 flush_workqueue(im->drv_workq);
832}
833
834bfa_status_t
835bfad_im_module_init(void)
836{
837 bfad_im_scsi_transport_template =
838 fc_attach_transport(&bfad_im_fc_function_template);
839 if (!bfad_im_scsi_transport_template)
840 return BFA_STATUS_ENOMEM;
841
842 return BFA_STATUS_OK;
843}
844
845void
846bfad_im_module_exit(void)
847{
848 if (bfad_im_scsi_transport_template)
849 fc_release_transport(bfad_im_scsi_transport_template);
850}
851
852void
853bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv)
854{
855 struct bfad_im_s *im = itnim_drv->im;
856
857 if (itnim_drv->queue_work)
858 queue_work(im->drv_workq, &itnim_drv->itnim_work);
859}
860
861void
862bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
863{
864 struct scsi_device *tmp_sdev;
865
866 if (((jiffies - itnim->last_ramp_up_time) >
867 BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) &&
868 ((jiffies - itnim->last_queue_full_time) >
869 BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) {
870 shost_for_each_device(tmp_sdev, sdev->host) {
871 if (bfa_lun_queue_depth > tmp_sdev->queue_depth) {
872 if (tmp_sdev->id != sdev->id)
873 continue;
874 if (tmp_sdev->ordered_tags)
875 scsi_adjust_queue_depth(tmp_sdev,
876 MSG_ORDERED_TAG,
877 tmp_sdev->queue_depth + 1);
878 else
879 scsi_adjust_queue_depth(tmp_sdev,
880 MSG_SIMPLE_TAG,
881 tmp_sdev->queue_depth + 1);
882
883 itnim->last_ramp_up_time = jiffies;
884 }
885 }
886 }
887}
888
889void
890bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
891{
892 struct scsi_device *tmp_sdev;
893
894 itnim->last_queue_full_time = jiffies;
895
896 shost_for_each_device(tmp_sdev, sdev->host) {
897 if (tmp_sdev->id != sdev->id)
898 continue;
899 scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
900 }
901}
902
903
904
905
906struct bfad_itnim_s *
907bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
908{
909 struct bfad_itnim_s *itnim = NULL;
910
911 /* Search the mapped list for this target ID */
912 list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) {
913 if (id == itnim->scsi_tgt_id)
914 return itnim;
915 }
916
917 return NULL;
918}
919
920/**
921 * Scsi_Host template entry slave_alloc
922 */
923static int
924bfad_im_slave_alloc(struct scsi_device *sdev)
925{
926 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
927
928 if (!rport || fc_remote_port_chkready(rport))
929 return -ENXIO;
930
931 sdev->hostdata = rport->dd_data;
932
933 return 0;
934}
935
936void
937bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
938{
939 struct Scsi_Host *host = im_port->shost;
940 struct bfad_s *bfad = im_port->bfad;
941 struct bfad_port_s *port = im_port->port;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -0800942 struct bfa_pport_attr_s pattr;
943 char model[BFA_ADAPTER_MODEL_NAME_LEN];
944 char fw_ver[BFA_VERSION_LEN];
Jing Huang7725ccf2009-09-23 17:46:15 -0700945
946 fc_host_node_name(host) =
947 bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port)));
948 fc_host_port_name(host) =
949 bfa_os_htonll((bfa_fcs_port_get_pwwn(port->fcs_port)));
950
951 fc_host_supported_classes(host) = FC_COS_CLASS3;
952
953 memset(fc_host_supported_fc4s(host), 0,
954 sizeof(fc_host_supported_fc4s(host)));
955 if (bfad_supported_fc4s & (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM))
956 /* For FCP type 0x08 */
957 fc_host_supported_fc4s(host)[2] = 1;
Roel Kluin7542fa72009-10-15 20:15:17 +0200958 if (bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
Jing Huang7725ccf2009-09-23 17:46:15 -0700959 /* For LLC/SNAP type 0x05 */
960 fc_host_supported_fc4s(host)[3] = 0x20;
961 /* For fibre channel services type 0x20 */
962 fc_host_supported_fc4s(host)[7] = 1;
963
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -0800964 bfa_get_adapter_model(&bfad->bfa, model);
965 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
Jing Huang7725ccf2009-09-23 17:46:15 -0700966 sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s",
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -0800967 model, fw_ver, BFAD_DRIVER_VERSION);
Jing Huang7725ccf2009-09-23 17:46:15 -0700968
969 fc_host_supported_speeds(host) = 0;
970 fc_host_supported_speeds(host) |=
971 FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
972 FC_PORTSPEED_1GBIT;
973
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -0800974 bfa_fcport_get_attr(&bfad->bfa, &pattr);
975 fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
Jing Huang7725ccf2009-09-23 17:46:15 -0700976}
977
978static void
979bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
980{
981 struct fc_rport_identifiers rport_ids;
982 struct fc_rport *fc_rport;
983 struct bfad_itnim_data_s *itnim_data;
984
985 rport_ids.node_name =
986 bfa_os_htonll(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
987 rport_ids.port_name =
988 bfa_os_htonll(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
989 rport_ids.port_id =
990 bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
991 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
992
993 itnim->fc_rport = fc_rport =
994 fc_remote_port_add(im_port->shost, 0, &rport_ids);
995
996 if (!fc_rport)
997 return;
998
999 fc_rport->maxframe_size =
1000 bfa_fcs_itnim_get_maxfrsize(&itnim->fcs_itnim);
1001 fc_rport->supported_classes = bfa_fcs_itnim_get_cos(&itnim->fcs_itnim);
1002
1003 itnim_data = fc_rport->dd_data;
1004 itnim_data->itnim = itnim;
1005
1006 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1007
1008 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1009 fc_remote_port_rolechg(fc_rport, rport_ids.roles);
1010
1011 if ((fc_rport->scsi_target_id != -1)
1012 && (fc_rport->scsi_target_id < MAX_FCP_TARGET))
1013 itnim->scsi_tgt_id = fc_rport->scsi_target_id;
1014
1015 return;
1016}
1017
1018/**
1019 * Work queue handler using FC transport service
1020* Context: kernel
1021 */
1022static void
1023bfad_im_itnim_work_handler(struct work_struct *work)
1024{
1025 struct bfad_itnim_s *itnim = container_of(work, struct bfad_itnim_s,
1026 itnim_work);
1027 struct bfad_im_s *im = itnim->im;
1028 struct bfad_s *bfad = im->bfad;
1029 struct bfad_im_port_s *im_port;
1030 unsigned long flags;
1031 struct fc_rport *fc_rport;
1032 wwn_t wwpn;
1033 u32 fcid;
1034 char wwpn_str[32], fcid_str[16];
1035
1036 spin_lock_irqsave(&bfad->bfad_lock, flags);
1037 im_port = itnim->im_port;
1038 bfa_trc(bfad, itnim->state);
1039 switch (itnim->state) {
1040 case ITNIM_STATE_ONLINE:
1041 if (!itnim->fc_rport) {
1042 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1043 bfad_im_fc_rport_add(im_port, itnim);
1044 spin_lock_irqsave(&bfad->bfad_lock, flags);
1045 wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
1046 fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
1047 wwn2str(wwpn_str, wwpn);
1048 fcid2str(fcid_str, fcid);
1049 list_add_tail(&itnim->list_entry,
1050 &im_port->itnim_mapped_list);
1051 bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_ONLINE,
1052 im_port->shost->host_no,
1053 itnim->scsi_tgt_id,
1054 fcid_str, wwpn_str);
1055 } else {
1056 printk(KERN_WARNING
1057 "%s: itnim %llx is already in online state\n",
Jing Huangf8ceafd2009-09-25 12:29:54 -07001058 __func__,
Jing Huang7725ccf2009-09-23 17:46:15 -07001059 bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
1060 }
1061
1062 break;
1063 case ITNIM_STATE_OFFLINE_PENDING:
1064 itnim->state = ITNIM_STATE_OFFLINE;
1065 if (itnim->fc_rport) {
1066 fc_rport = itnim->fc_rport;
1067 ((struct bfad_itnim_data_s *)
1068 fc_rport->dd_data)->itnim = NULL;
1069 itnim->fc_rport = NULL;
1070 if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
1071 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1072 fc_rport->dev_loss_tmo =
1073 bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
1074 fc_remote_port_delete(fc_rport);
1075 spin_lock_irqsave(&bfad->bfad_lock, flags);
1076 }
1077 wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
1078 fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
1079 wwn2str(wwpn_str, wwpn);
1080 fcid2str(fcid_str, fcid);
1081 list_del(&itnim->list_entry);
1082 bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_OFFLINE,
1083 im_port->shost->host_no,
1084 itnim->scsi_tgt_id,
1085 fcid_str, wwpn_str);
1086 }
1087 break;
1088 case ITNIM_STATE_FREE:
1089 if (itnim->fc_rport) {
1090 fc_rport = itnim->fc_rport;
1091 ((struct bfad_itnim_data_s *)
1092 fc_rport->dd_data)->itnim = NULL;
1093 itnim->fc_rport = NULL;
1094 if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
1095 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1096 fc_rport->dev_loss_tmo =
1097 bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
1098 fc_remote_port_delete(fc_rport);
1099 spin_lock_irqsave(&bfad->bfad_lock, flags);
1100 }
1101 list_del(&itnim->list_entry);
1102 }
1103
1104 kfree(itnim);
1105 break;
1106 default:
1107 bfa_assert(0);
1108 break;
1109 }
1110
1111 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1112}
1113
1114/**
1115 * Scsi_Host template entry, queue a SCSI command to the BFAD.
1116 */
1117static int
1118bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1119{
1120 struct bfad_im_port_s *im_port =
1121 (struct bfad_im_port_s *) cmnd->device->host->hostdata[0];
1122 struct bfad_s *bfad = im_port->bfad;
1123 struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
1124 struct bfad_itnim_s *itnim;
1125 struct bfa_ioim_s *hal_io;
1126 unsigned long flags;
1127 int rc;
1128 s16 sg_cnt = 0;
1129 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1130
1131 rc = fc_remote_port_chkready(rport);
1132 if (rc) {
1133 cmnd->result = rc;
1134 done(cmnd);
1135 return 0;
1136 }
1137
1138 sg_cnt = scsi_dma_map(cmnd);
1139
1140 if (sg_cnt < 0)
1141 return SCSI_MLQUEUE_HOST_BUSY;
1142
1143 cmnd->scsi_done = done;
1144
1145 spin_lock_irqsave(&bfad->bfad_lock, flags);
1146 if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) {
1147 printk(KERN_WARNING
1148 "bfad%d, queuecommand %p %x failed, BFA stopped\n",
1149 bfad->inst_no, cmnd, cmnd->cmnd[0]);
1150 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
1151 goto out_fail_cmd;
1152 }
1153
1154 itnim = itnim_data->itnim;
1155 if (!itnim) {
1156 cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
1157 goto out_fail_cmd;
1158 }
1159
1160 hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd,
1161 itnim->bfa_itnim, sg_cnt);
1162 if (!hal_io) {
1163 printk(KERN_WARNING "hal_io failure\n");
1164 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1165 scsi_dma_unmap(cmnd);
1166 return SCSI_MLQUEUE_HOST_BUSY;
1167 }
1168
1169 cmnd->host_scribble = (char *)hal_io;
1170 bfa_trc_fp(bfad, hal_io->iotag);
1171 bfa_ioim_start(hal_io);
1172 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1173
1174 return 0;
1175
1176out_fail_cmd:
1177 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1178 scsi_dma_unmap(cmnd);
1179 if (done)
1180 done(cmnd);
1181
1182 return 0;
1183}
1184
1185void
1186bfad_os_rport_online_wait(struct bfad_s *bfad)
1187{
1188 int i;
1189 int rport_delay = 10;
1190
1191 for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE)
1192 && i < bfa_linkup_delay; i++)
1193 schedule_timeout_uninterruptible(HZ);
1194
1195 if (bfad->bfad_flags & BFAD_PORT_ONLINE) {
1196 rport_delay = rport_delay < bfa_linkup_delay ?
1197 rport_delay : bfa_linkup_delay;
1198 for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE)
1199 && i < rport_delay; i++)
1200 schedule_timeout_uninterruptible(HZ);
1201
1202 if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE))
1203 schedule_timeout_uninterruptible(rport_delay * HZ);
1204 }
1205}
1206
1207int
1208bfad_os_get_linkup_delay(struct bfad_s *bfad)
1209{
1210
1211 u8 nwwns = 0;
1212 wwn_t *wwns;
1213 int ldelay;
1214
1215 /*
1216 * Querying for the boot target port wwns
1217 * -- read from boot information in flash.
1218 * If nwwns > 0 => boot over SAN and set bfa_linkup_delay = 30
1219 * else => local boot machine set bfa_linkup_delay = 10
1220 */
1221
1222 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, &wwns);
1223
1224 if (nwwns > 0) {
1225 /* If boot over SAN; linkup_delay = 30sec */
1226 ldelay = 30;
1227 } else {
1228 /* If local boot; linkup_delay = 10sec */
1229 ldelay = 0;
1230 }
1231
1232 return ldelay;
1233}
1234
1235