blob: e15120d21aaaadf872a70f1d6735a31d6c8434ce [file] [log] [blame]
dea31012005-04-17 16:05:31 -05001/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04003 * Fibre Channel Host Bus Adapters. *
Jamie Wellnitz41415862006-02-28 19:25:27 -05004 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04005 * EMULEX and SLI are trademarks of Emulex. *
dea31012005-04-17 16:05:31 -05006 * www.emulex.com *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04007 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea31012005-04-17 16:05:31 -05008 * *
9 * This program is free software; you can redistribute it and/or *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -040010 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea31012005-04-17 16:05:31 -050020 *******************************************************************/
21
dea31012005-04-17 16:05:31 -050022#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/kthread.h>
25#include <linux/interrupt.h>
26
James.Smart@Emulex.Com91886522005-08-10 15:03:09 -040027#include <scsi/scsi.h>
dea31012005-04-17 16:05:31 -050028#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31
32#include "lpfc_hw.h"
33#include "lpfc_disc.h"
34#include "lpfc_sli.h"
35#include "lpfc_scsi.h"
36#include "lpfc.h"
37#include "lpfc_logmsg.h"
38#include "lpfc_crtn.h"
39
40/* AlpaArray for assignment of scsid for scan-down and bind_method */
41static uint8_t lpfcAlpaArray[] = {
42 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
43 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
44 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
45 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
46 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
47 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
48 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
49 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
50 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
51 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
52 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
53 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
54 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
55};
56
57static void lpfc_disc_timeout_handler(struct lpfc_hba *);
58
59static void
60lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
61{
James Smart488d1462006-03-07 15:02:37 -050062 uint8_t *name = (uint8_t *)&ndlp->nlp_portname;
James.Smart@Emulex.Com6e8215e2005-06-25 10:34:04 -040063 int warn_on = 0;
dea31012005-04-17 16:05:31 -050064
65 spin_lock_irq(phba->host->host_lock);
66 if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) {
67 spin_unlock_irq(phba->host->host_lock);
68 return;
69 }
70
71 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
72
73 if (ndlp->nlp_sid != NLP_NO_SID) {
James.Smart@Emulex.Com6e8215e2005-06-25 10:34:04 -040074 warn_on = 1;
dea31012005-04-17 16:05:31 -050075 /* flush the target */
76 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
77 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
78 }
79 spin_unlock_irq(phba->host->host_lock);
80
James.Smart@Emulex.Com6e8215e2005-06-25 10:34:04 -040081 if (warn_on) {
82 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
James Smart488d1462006-03-07 15:02:37 -050083 "%d:0203 Nodev timeout on "
84 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
85 "NPort x%x Data: x%x x%x x%x\n",
86 phba->brd_no,
87 *name, *(name+1), *(name+2), *(name+3),
88 *(name+4), *(name+5), *(name+6), *(name+7),
89 ndlp->nlp_DID, ndlp->nlp_flag,
James.Smart@Emulex.Com6e8215e2005-06-25 10:34:04 -040090 ndlp->nlp_state, ndlp->nlp_rpi);
91 } else {
92 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
James Smart488d1462006-03-07 15:02:37 -050093 "%d:0204 Nodev timeout on "
94 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
95 "NPort x%x Data: x%x x%x x%x\n",
96 phba->brd_no,
97 *name, *(name+1), *(name+2), *(name+3),
98 *(name+4), *(name+5), *(name+6), *(name+7),
99 ndlp->nlp_DID, ndlp->nlp_flag,
James.Smart@Emulex.Com6e8215e2005-06-25 10:34:04 -0400100 ndlp->nlp_state, ndlp->nlp_rpi);
101 }
102
dea31012005-04-17 16:05:31 -0500103 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
104 return;
105}
106
107static void
108lpfc_work_list_done(struct lpfc_hba * phba)
109{
110 struct lpfc_work_evt *evtp = NULL;
111 struct lpfc_nodelist *ndlp;
112 int free_evt;
113
114 spin_lock_irq(phba->host->host_lock);
115 while(!list_empty(&phba->work_list)) {
116 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
117 evt_listp);
118 spin_unlock_irq(phba->host->host_lock);
119 free_evt = 1;
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -0500120 switch (evtp->evt) {
dea31012005-04-17 16:05:31 -0500121 case LPFC_EVT_NODEV_TMO:
122 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
123 lpfc_process_nodev_timeout(phba, ndlp);
124 free_evt = 0;
125 break;
126 case LPFC_EVT_ELS_RETRY:
127 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
128 lpfc_els_retry_delay_handler(ndlp);
129 free_evt = 0;
130 break;
131 case LPFC_EVT_ONLINE:
Jamie Wellnitz41415862006-02-28 19:25:27 -0500132 if (phba->hba_state < LPFC_LINK_DOWN)
133 *(int *)(evtp->evt_arg1) = lpfc_online(phba);
134 else
135 *(int *)(evtp->evt_arg1) = 0;
dea31012005-04-17 16:05:31 -0500136 complete((struct completion *)(evtp->evt_arg2));
137 break;
138 case LPFC_EVT_OFFLINE:
Jamie Wellnitz41415862006-02-28 19:25:27 -0500139 if (phba->hba_state >= LPFC_LINK_DOWN)
140 lpfc_offline(phba);
141 lpfc_sli_brdrestart(phba);
142 *(int *)(evtp->evt_arg1) =
143 lpfc_sli_brdready(phba,HS_FFRDY | HS_MBRDY);
144 complete((struct completion *)(evtp->evt_arg2));
145 break;
146 case LPFC_EVT_WARM_START:
147 if (phba->hba_state >= LPFC_LINK_DOWN)
148 lpfc_offline(phba);
149 lpfc_sli_brdreset(phba);
150 lpfc_hba_down_post(phba);
151 *(int *)(evtp->evt_arg1) =
152 lpfc_sli_brdready(phba, HS_MBRDY);
153 complete((struct completion *)(evtp->evt_arg2));
154 break;
155 case LPFC_EVT_KILL:
156 if (phba->hba_state >= LPFC_LINK_DOWN)
157 lpfc_offline(phba);
158 *(int *)(evtp->evt_arg1) = lpfc_sli_brdkill(phba);
dea31012005-04-17 16:05:31 -0500159 complete((struct completion *)(evtp->evt_arg2));
160 break;
161 }
162 if (free_evt)
163 kfree(evtp);
164 spin_lock_irq(phba->host->host_lock);
165 }
166 spin_unlock_irq(phba->host->host_lock);
167
168}
169
170static void
171lpfc_work_done(struct lpfc_hba * phba)
172{
173 struct lpfc_sli_ring *pring;
174 int i;
175 uint32_t ha_copy;
176 uint32_t control;
177 uint32_t work_hba_events;
178
179 spin_lock_irq(phba->host->host_lock);
180 ha_copy = phba->work_ha;
181 phba->work_ha = 0;
182 work_hba_events=phba->work_hba_events;
183 spin_unlock_irq(phba->host->host_lock);
184
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -0500185 if (ha_copy & HA_ERATT)
dea31012005-04-17 16:05:31 -0500186 lpfc_handle_eratt(phba);
187
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -0500188 if (ha_copy & HA_MBATT)
dea31012005-04-17 16:05:31 -0500189 lpfc_sli_handle_mb_event(phba);
190
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -0500191 if (ha_copy & HA_LATT)
dea31012005-04-17 16:05:31 -0500192 lpfc_handle_latt(phba);
193
194 if (work_hba_events & WORKER_DISC_TMO)
195 lpfc_disc_timeout_handler(phba);
196
197 if (work_hba_events & WORKER_ELS_TMO)
198 lpfc_els_timeout_handler(phba);
199
200 if (work_hba_events & WORKER_MBOX_TMO)
201 lpfc_mbox_timeout_handler(phba);
202
203 if (work_hba_events & WORKER_FDMI_TMO)
204 lpfc_fdmi_tmo_handler(phba);
205
206 spin_lock_irq(phba->host->host_lock);
207 phba->work_hba_events &= ~work_hba_events;
208 spin_unlock_irq(phba->host->host_lock);
209
210 for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
211 pring = &phba->sli.ring[i];
212 if ((ha_copy & HA_RXATT)
213 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
214 if (pring->flag & LPFC_STOP_IOCB_MASK) {
215 pring->flag |= LPFC_DEFERRED_RING_EVENT;
216 } else {
217 lpfc_sli_handle_slow_ring_event(phba, pring,
218 (ha_copy &
219 HA_RXMASK));
220 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
221 }
222 /*
223 * Turn on Ring interrupts
224 */
225 spin_lock_irq(phba->host->host_lock);
226 control = readl(phba->HCregaddr);
227 control |= (HC_R0INT_ENA << i);
228 writel(control, phba->HCregaddr);
229 readl(phba->HCregaddr); /* flush */
230 spin_unlock_irq(phba->host->host_lock);
231 }
232 }
233
234 lpfc_work_list_done (phba);
235
236}
237
238static int
239check_work_wait_done(struct lpfc_hba *phba) {
240
241 spin_lock_irq(phba->host->host_lock);
242 if (phba->work_ha ||
243 phba->work_hba_events ||
244 (!list_empty(&phba->work_list)) ||
245 kthread_should_stop()) {
246 spin_unlock_irq(phba->host->host_lock);
247 return 1;
248 } else {
249 spin_unlock_irq(phba->host->host_lock);
250 return 0;
251 }
252}
253
254int
255lpfc_do_work(void *p)
256{
257 struct lpfc_hba *phba = p;
258 int rc;
259 DECLARE_WAIT_QUEUE_HEAD(work_waitq);
260
261 set_user_nice(current, -20);
262 phba->work_wait = &work_waitq;
263
264 while (1) {
265
266 rc = wait_event_interruptible(work_waitq,
267 check_work_wait_done(phba));
268 BUG_ON(rc);
269
270 if (kthread_should_stop())
271 break;
272
273 lpfc_work_done(phba);
274
275 }
276 phba->work_wait = NULL;
277 return 0;
278}
279
280/*
281 * This is only called to handle FC worker events. Since this a rare
282 * occurance, we allocate a struct lpfc_work_evt structure here instead of
283 * embedding it in the IOCB.
284 */
285int
286lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
287 uint32_t evt)
288{
289 struct lpfc_work_evt *evtp;
290
291 /*
292 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
293 * be queued to worker thread for processing
294 */
295 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
296 if (!evtp)
297 return 0;
298
299 evtp->evt_arg1 = arg1;
300 evtp->evt_arg2 = arg2;
301 evtp->evt = evt;
302
303 list_add_tail(&evtp->evt_listp, &phba->work_list);
304 spin_lock_irq(phba->host->host_lock);
305 if (phba->work_wait)
306 wake_up(phba->work_wait);
307 spin_unlock_irq(phba->host->host_lock);
308
309 return 1;
310}
311
312int
313lpfc_linkdown(struct lpfc_hba * phba)
314{
315 struct lpfc_sli *psli;
316 struct lpfc_nodelist *ndlp, *next_ndlp;
Jamie Wellnitzc9f87352006-02-28 19:25:23 -0500317 struct list_head *listp, *node_list[7];
dea31012005-04-17 16:05:31 -0500318 LPFC_MBOXQ_t *mb;
319 int rc, i;
320
321 psli = &phba->sli;
Jamie Wellnitzc9f87352006-02-28 19:25:23 -0500322 /* sysfs or selective reset may call this routine to clean up */
Jamie Wellnitz5024ab12006-02-28 19:25:28 -0500323 if (phba->hba_state >= LPFC_LINK_DOWN) {
324 if (phba->hba_state == LPFC_LINK_DOWN)
325 return 0;
326
Jamie Wellnitzc9f87352006-02-28 19:25:23 -0500327 spin_lock_irq(phba->host->host_lock);
328 phba->hba_state = LPFC_LINK_DOWN;
329 spin_unlock_irq(phba->host->host_lock);
330 }
dea31012005-04-17 16:05:31 -0500331
332 /* Clean up any firmware default rpi's */
333 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
334 lpfc_unreg_did(phba, 0xffffffff, mb);
335 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
336 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
337 == MBX_NOT_FINISHED) {
338 mempool_free( mb, phba->mbox_mem_pool);
339 }
340 }
341
342 /* Cleanup any outstanding RSCN activity */
343 lpfc_els_flush_rscn(phba);
344
345 /* Cleanup any outstanding ELS commands */
346 lpfc_els_flush_cmd(phba);
347
348 /* Issue a LINK DOWN event to all nodes */
349 node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
350 node_list[1] = &phba->fc_nlpmap_list;
351 node_list[2] = &phba->fc_nlpunmap_list;
352 node_list[3] = &phba->fc_prli_list;
353 node_list[4] = &phba->fc_reglogin_list;
354 node_list[5] = &phba->fc_adisc_list;
355 node_list[6] = &phba->fc_plogi_list;
356 for (i = 0; i < 7; i++) {
357 listp = node_list[i];
358 if (list_empty(listp))
359 continue;
360
361 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
dea31012005-04-17 16:05:31 -0500362
Jamie Wellnitzc9f87352006-02-28 19:25:23 -0500363 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
364 NLP_EVT_DEVICE_RECOVERY);
dea31012005-04-17 16:05:31 -0500365
Jamie Wellnitzc9f87352006-02-28 19:25:23 -0500366 /* Check config parameter use-adisc or FCP-2 */
367 if ((rc != NLP_STE_FREED_NODE) &&
368 (phba->cfg_use_adisc == 0) &&
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -0500369 !(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) {
Jamie Wellnitzc9f87352006-02-28 19:25:23 -0500370 /* We know we will have to relogin, so
371 * unreglogin the rpi right now to fail
372 * any outstanding I/Os quickly.
373 */
374 lpfc_unreg_rpi(phba, ndlp);
dea31012005-04-17 16:05:31 -0500375 }
376 }
377 }
378
379 /* free any ndlp's on unused list */
380 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
381 nlp_listp) {
382 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
383 }
384
385 /* Setup myDID for link up if we are in pt2pt mode */
386 if (phba->fc_flag & FC_PT2PT) {
387 phba->fc_myDID = 0;
388 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
389 lpfc_config_link(phba, mb);
390 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
391 if (lpfc_sli_issue_mbox
392 (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
393 == MBX_NOT_FINISHED) {
394 mempool_free( mb, phba->mbox_mem_pool);
395 }
396 }
397 spin_lock_irq(phba->host->host_lock);
398 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
399 spin_unlock_irq(phba->host->host_lock);
400 }
401 spin_lock_irq(phba->host->host_lock);
402 phba->fc_flag &= ~FC_LBIT;
403 spin_unlock_irq(phba->host->host_lock);
404
405 /* Turn off discovery timer if its running */
406 lpfc_can_disctmo(phba);
407
408 /* Must process IOCBs on all rings to handle ABORTed I/Os */
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -0500409 return 0;
dea31012005-04-17 16:05:31 -0500410}
411
412static int
413lpfc_linkup(struct lpfc_hba * phba)
414{
415 struct lpfc_nodelist *ndlp, *next_ndlp;
Jamie Wellnitzc9f87352006-02-28 19:25:23 -0500416 struct list_head *listp, *node_list[7];
417 int i;
dea31012005-04-17 16:05:31 -0500418
419 spin_lock_irq(phba->host->host_lock);
420 phba->hba_state = LPFC_LINK_UP;
421 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
422 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
423 phba->fc_flag |= FC_NDISC_ACTIVE;
424 phba->fc_ns_retry = 0;
425 spin_unlock_irq(phba->host->host_lock);
426
427
Jamie Wellnitzc9f87352006-02-28 19:25:23 -0500428 node_list[0] = &phba->fc_plogi_list;
429 node_list[1] = &phba->fc_adisc_list;
430 node_list[2] = &phba->fc_reglogin_list;
431 node_list[3] = &phba->fc_prli_list;
432 node_list[4] = &phba->fc_nlpunmap_list;
433 node_list[5] = &phba->fc_nlpmap_list;
434 node_list[6] = &phba->fc_npr_list;
435 for (i = 0; i < 7; i++) {
436 listp = node_list[i];
437 if (list_empty(listp))
438 continue;
439
440 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
441 if (phba->fc_flag & FC_LBIT) {
442 if (ndlp->nlp_type & NLP_FABRIC) {
443 /* On Linkup its safe to clean up the
444 * ndlp from Fabric connections.
445 */
446 lpfc_nlp_list(phba, ndlp,
447 NLP_UNUSED_LIST);
448 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
449 /* Fail outstanding IO now since device
450 * is marked for PLOGI.
451 */
452 lpfc_unreg_rpi(phba, ndlp);
453 }
454 }
dea31012005-04-17 16:05:31 -0500455 }
456 }
457
458 /* free any ndlp's on unused list */
459 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
460 nlp_listp) {
461 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
462 }
463
464 return 0;
465}
466
467/*
468 * This routine handles processing a CLEAR_LA mailbox
469 * command upon completion. It is setup in the LPFC_MBOXQ
470 * as the completion routine when the command is
471 * handed off to the SLI layer.
472 */
473void
474lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
475{
476 struct lpfc_sli *psli;
477 MAILBOX_t *mb;
478 uint32_t control;
479
480 psli = &phba->sli;
481 mb = &pmb->mb;
482 /* Since we don't do discovery right now, turn these off here */
483 psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
484 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
485 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
486
487 /* Check for error */
488 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
489 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
490 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
491 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
492 "state x%x\n",
493 phba->brd_no, mb->mbxStatus, phba->hba_state);
494
495 phba->hba_state = LPFC_HBA_ERROR;
496 goto out;
497 }
498
499 if (phba->fc_flag & FC_ABORT_DISCOVERY)
500 goto out;
501
502 phba->num_disc_nodes = 0;
503 /* go thru NPR list and issue ELS PLOGIs */
504 if (phba->fc_npr_cnt) {
505 lpfc_els_disc_plogi(phba);
506 }
507
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -0500508 if (!phba->num_disc_nodes) {
dea31012005-04-17 16:05:31 -0500509 spin_lock_irq(phba->host->host_lock);
510 phba->fc_flag &= ~FC_NDISC_ACTIVE;
511 spin_unlock_irq(phba->host->host_lock);
512 }
513
514 phba->hba_state = LPFC_HBA_READY;
515
516out:
517 /* Device Discovery completes */
518 lpfc_printf_log(phba,
519 KERN_INFO,
520 LOG_DISCOVERY,
521 "%d:0225 Device Discovery completes\n",
522 phba->brd_no);
523
524 mempool_free( pmb, phba->mbox_mem_pool);
525
526 spin_lock_irq(phba->host->host_lock);
527 phba->fc_flag &= ~FC_ABORT_DISCOVERY;
528 if (phba->fc_flag & FC_ESTABLISH_LINK) {
529 phba->fc_flag &= ~FC_ESTABLISH_LINK;
530 }
531 spin_unlock_irq(phba->host->host_lock);
532
533 del_timer_sync(&phba->fc_estabtmo);
534
535 lpfc_can_disctmo(phba);
536
537 /* turn on Link Attention interrupts */
538 spin_lock_irq(phba->host->host_lock);
539 psli->sli_flag |= LPFC_PROCESS_LA;
540 control = readl(phba->HCregaddr);
541 control |= HC_LAINT_ENA;
542 writel(control, phba->HCregaddr);
543 readl(phba->HCregaddr); /* flush */
544 spin_unlock_irq(phba->host->host_lock);
545
546 return;
547}
548
549static void
Jamie Wellnitz25594c62006-02-28 19:25:34 -0500550lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea31012005-04-17 16:05:31 -0500551{
Jamie Wellnitz25594c62006-02-28 19:25:34 -0500552 struct lpfc_sli *psli = &phba->sli;
553 int rc;
dea31012005-04-17 16:05:31 -0500554
Jamie Wellnitz25594c62006-02-28 19:25:34 -0500555 if (pmb->mb.mbxStatus)
dea31012005-04-17 16:05:31 -0500556 goto out;
dea31012005-04-17 16:05:31 -0500557
Jamie Wellnitz25594c62006-02-28 19:25:34 -0500558 mempool_free(pmb, phba->mbox_mem_pool);
559
560 if (phba->fc_topology == TOPOLOGY_LOOP &&
561 phba->fc_flag & FC_PUBLIC_LOOP &&
562 !(phba->fc_flag & FC_LBIT)) {
563 /* Need to wait for FAN - use discovery timer
564 * for timeout. hba_state is identically
565 * LPFC_LOCAL_CFG_LINK while waiting for FAN
566 */
567 lpfc_set_disctmo(phba);
568 return;
dea31012005-04-17 16:05:31 -0500569 }
570
Jamie Wellnitz25594c62006-02-28 19:25:34 -0500571 /* Start discovery by sending a FLOGI. hba_state is identically
572 * LPFC_FLOGI while waiting for FLOGI cmpl
573 */
574 phba->hba_state = LPFC_FLOGI;
575 lpfc_set_disctmo(phba);
576 lpfc_initial_flogi(phba);
577 return;
dea31012005-04-17 16:05:31 -0500578
579out:
Jamie Wellnitz25594c62006-02-28 19:25:34 -0500580 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
581 "%d:0306 CONFIG_LINK mbxStatus error x%x "
582 "HBA state x%x\n",
583 phba->brd_no, pmb->mb.mbxStatus, phba->hba_state);
584
585 lpfc_linkdown(phba);
586
587 phba->hba_state = LPFC_HBA_ERROR;
588
589 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
dea31012005-04-17 16:05:31 -0500590 "%d:0200 CONFIG_LINK bad hba state x%x\n",
591 phba->brd_no, phba->hba_state);
592
Jamie Wellnitz25594c62006-02-28 19:25:34 -0500593 lpfc_clear_la(phba, pmb);
594 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
595 rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
596 if (rc == MBX_NOT_FINISHED) {
597 mempool_free(pmb, phba->mbox_mem_pool);
598 lpfc_disc_flush_list(phba);
599 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
600 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
601 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
602 phba->hba_state = LPFC_HBA_READY;
dea31012005-04-17 16:05:31 -0500603 }
604 return;
605}
606
607static void
608lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
609{
610 struct lpfc_sli *psli = &phba->sli;
611 MAILBOX_t *mb = &pmb->mb;
612 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
613
614
615 /* Check for error */
616 if (mb->mbxStatus) {
617 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
618 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
619 "%d:0319 READ_SPARAM mbxStatus error x%x "
620 "hba state x%x>\n",
621 phba->brd_no, mb->mbxStatus, phba->hba_state);
622
623 lpfc_linkdown(phba);
624 phba->hba_state = LPFC_HBA_ERROR;
625 goto out;
626 }
627
628 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
629 sizeof (struct serv_parm));
630 memcpy((uint8_t *) & phba->fc_nodename,
631 (uint8_t *) & phba->fc_sparam.nodeName,
632 sizeof (struct lpfc_name));
633 memcpy((uint8_t *) & phba->fc_portname,
634 (uint8_t *) & phba->fc_sparam.portName,
635 sizeof (struct lpfc_name));
636 lpfc_mbuf_free(phba, mp->virt, mp->phys);
637 kfree(mp);
638 mempool_free( pmb, phba->mbox_mem_pool);
639 return;
640
641out:
642 pmb->context1 = NULL;
643 lpfc_mbuf_free(phba, mp->virt, mp->phys);
644 kfree(mp);
645 if (phba->hba_state != LPFC_CLEAR_LA) {
646 lpfc_clear_la(phba, pmb);
647 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
648 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
649 == MBX_NOT_FINISHED) {
650 mempool_free( pmb, phba->mbox_mem_pool);
651 lpfc_disc_flush_list(phba);
652 psli->ring[(psli->ip_ring)].flag &=
653 ~LPFC_STOP_IOCB_EVENT;
654 psli->ring[(psli->fcp_ring)].flag &=
655 ~LPFC_STOP_IOCB_EVENT;
656 psli->ring[(psli->next_ring)].flag &=
657 ~LPFC_STOP_IOCB_EVENT;
658 phba->hba_state = LPFC_HBA_READY;
659 }
660 } else {
661 mempool_free( pmb, phba->mbox_mem_pool);
662 }
663 return;
664}
665
666static void
667lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
668{
669 int i;
670 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
671 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
672 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
673
674 spin_lock_irq(phba->host->host_lock);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -0500675 switch (la->UlnkSpeed) {
dea31012005-04-17 16:05:31 -0500676 case LA_1GHZ_LINK:
677 phba->fc_linkspeed = LA_1GHZ_LINK;
678 break;
679 case LA_2GHZ_LINK:
680 phba->fc_linkspeed = LA_2GHZ_LINK;
681 break;
682 case LA_4GHZ_LINK:
683 phba->fc_linkspeed = LA_4GHZ_LINK;
684 break;
685 default:
686 phba->fc_linkspeed = LA_UNKNW_LINK;
687 break;
688 }
689
690 phba->fc_topology = la->topology;
691
692 if (phba->fc_topology == TOPOLOGY_LOOP) {
693 /* Get Loop Map information */
694
695 if (la->il)
696 phba->fc_flag |= FC_LBIT;
697
698 phba->fc_myDID = la->granted_AL_PA;
699 i = la->un.lilpBde64.tus.f.bdeSize;
700
701 if (i == 0) {
702 phba->alpa_map[0] = 0;
703 } else {
704 if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
705 int numalpa, j, k;
706 union {
707 uint8_t pamap[16];
708 struct {
709 uint32_t wd1;
710 uint32_t wd2;
711 uint32_t wd3;
712 uint32_t wd4;
713 } pa;
714 } un;
715 numalpa = phba->alpa_map[0];
716 j = 0;
717 while (j < numalpa) {
718 memset(un.pamap, 0, 16);
719 for (k = 1; j < numalpa; k++) {
720 un.pamap[k - 1] =
721 phba->alpa_map[j + 1];
722 j++;
723 if (k == 16)
724 break;
725 }
726 /* Link Up Event ALPA map */
727 lpfc_printf_log(phba,
728 KERN_WARNING,
729 LOG_LINK_EVENT,
730 "%d:1304 Link Up Event "
731 "ALPA map Data: x%x "
732 "x%x x%x x%x\n",
733 phba->brd_no,
734 un.pa.wd1, un.pa.wd2,
735 un.pa.wd3, un.pa.wd4);
736 }
737 }
738 }
739 } else {
740 phba->fc_myDID = phba->fc_pref_DID;
741 phba->fc_flag |= FC_LBIT;
742 }
743 spin_unlock_irq(phba->host->host_lock);
744
745 lpfc_linkup(phba);
746 if (sparam_mbox) {
747 lpfc_read_sparam(phba, sparam_mbox);
748 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
749 lpfc_sli_issue_mbox(phba, sparam_mbox,
750 (MBX_NOWAIT | MBX_STOP_IOCB));
751 }
752
753 if (cfglink_mbox) {
754 phba->hba_state = LPFC_LOCAL_CFG_LINK;
755 lpfc_config_link(phba, cfglink_mbox);
Jamie Wellnitz25594c62006-02-28 19:25:34 -0500756 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
dea31012005-04-17 16:05:31 -0500757 lpfc_sli_issue_mbox(phba, cfglink_mbox,
758 (MBX_NOWAIT | MBX_STOP_IOCB));
759 }
760}
761
762static void
763lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
764 uint32_t control;
765 struct lpfc_sli *psli = &phba->sli;
766
767 lpfc_linkdown(phba);
768
769 /* turn on Link Attention interrupts - no CLEAR_LA needed */
770 spin_lock_irq(phba->host->host_lock);
771 psli->sli_flag |= LPFC_PROCESS_LA;
772 control = readl(phba->HCregaddr);
773 control |= HC_LAINT_ENA;
774 writel(control, phba->HCregaddr);
775 readl(phba->HCregaddr); /* flush */
776 spin_unlock_irq(phba->host->host_lock);
777}
778
779/*
780 * This routine handles processing a READ_LA mailbox
781 * command upon completion. It is setup in the LPFC_MBOXQ
782 * as the completion routine when the command is
783 * handed off to the SLI layer.
784 */
785void
786lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
787{
788 READ_LA_VAR *la;
789 MAILBOX_t *mb = &pmb->mb;
790 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
791
792 /* Check for error */
793 if (mb->mbxStatus) {
794 lpfc_printf_log(phba,
795 KERN_INFO,
796 LOG_LINK_EVENT,
797 "%d:1307 READ_LA mbox error x%x state x%x\n",
798 phba->brd_no,
799 mb->mbxStatus, phba->hba_state);
800 lpfc_mbx_issue_link_down(phba);
801 phba->hba_state = LPFC_HBA_ERROR;
802 goto lpfc_mbx_cmpl_read_la_free_mbuf;
803 }
804
805 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
806
807 memcpy(&phba->alpa_map[0], mp->virt, 128);
808
Jamie Wellnitzc9f87352006-02-28 19:25:23 -0500809 spin_lock_irq(phba->host->host_lock);
810 if (la->pb)
811 phba->fc_flag |= FC_BYPASSED_MODE;
812 else
813 phba->fc_flag &= ~FC_BYPASSED_MODE;
814 spin_unlock_irq(phba->host->host_lock);
815
dea31012005-04-17 16:05:31 -0500816 if (((phba->fc_eventTag + 1) < la->eventTag) ||
817 (phba->fc_eventTag == la->eventTag)) {
818 phba->fc_stat.LinkMultiEvent++;
819 if (la->attType == AT_LINK_UP) {
820 if (phba->fc_eventTag != 0)
821 lpfc_linkdown(phba);
822 }
823 }
824
825 phba->fc_eventTag = la->eventTag;
826
827 if (la->attType == AT_LINK_UP) {
828 phba->fc_stat.LinkUp++;
829 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
830 "%d:1303 Link Up Event x%x received "
831 "Data: x%x x%x x%x x%x\n",
832 phba->brd_no, la->eventTag, phba->fc_eventTag,
833 la->granted_AL_PA, la->UlnkSpeed,
834 phba->alpa_map[0]);
835 lpfc_mbx_process_link_up(phba, la);
836 } else {
837 phba->fc_stat.LinkDown++;
838 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
839 "%d:1305 Link Down Event x%x received "
840 "Data: x%x x%x x%x\n",
841 phba->brd_no, la->eventTag, phba->fc_eventTag,
842 phba->hba_state, phba->fc_flag);
843 lpfc_mbx_issue_link_down(phba);
844 }
845
846lpfc_mbx_cmpl_read_la_free_mbuf:
847 lpfc_mbuf_free(phba, mp->virt, mp->phys);
848 kfree(mp);
849 mempool_free(pmb, phba->mbox_mem_pool);
850 return;
851}
852
853/*
854 * This routine handles processing a REG_LOGIN mailbox
855 * command upon completion. It is setup in the LPFC_MBOXQ
856 * as the completion routine when the command is
857 * handed off to the SLI layer.
858 */
859void
860lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
861{
862 struct lpfc_sli *psli;
863 MAILBOX_t *mb;
864 struct lpfc_dmabuf *mp;
865 struct lpfc_nodelist *ndlp;
866
867 psli = &phba->sli;
868 mb = &pmb->mb;
869
870 ndlp = (struct lpfc_nodelist *) pmb->context2;
871 mp = (struct lpfc_dmabuf *) (pmb->context1);
872
873 pmb->context1 = NULL;
874
875 /* Good status, call state machine */
876 lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
877 lpfc_mbuf_free(phba, mp->virt, mp->phys);
878 kfree(mp);
879 mempool_free( pmb, phba->mbox_mem_pool);
880
881 return;
882}
883
884/*
885 * This routine handles processing a Fabric REG_LOGIN mailbox
886 * command upon completion. It is setup in the LPFC_MBOXQ
887 * as the completion routine when the command is
888 * handed off to the SLI layer.
889 */
890void
891lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
892{
893 struct lpfc_sli *psli;
894 MAILBOX_t *mb;
895 struct lpfc_dmabuf *mp;
896 struct lpfc_nodelist *ndlp;
897 struct lpfc_nodelist *ndlp_fdmi;
898
899
900 psli = &phba->sli;
901 mb = &pmb->mb;
902
903 ndlp = (struct lpfc_nodelist *) pmb->context2;
904 mp = (struct lpfc_dmabuf *) (pmb->context1);
905
906 if (mb->mbxStatus) {
907 lpfc_mbuf_free(phba, mp->virt, mp->phys);
908 kfree(mp);
909 mempool_free( pmb, phba->mbox_mem_pool);
910 mempool_free( ndlp, phba->nlp_mem_pool);
911
912 /* FLOGI failed, so just use loop map to make discovery list */
913 lpfc_disc_list_loopmap(phba);
914
915 /* Start discovery */
916 lpfc_disc_start(phba);
917 return;
918 }
919
920 pmb->context1 = NULL;
921
dea31012005-04-17 16:05:31 -0500922 ndlp->nlp_rpi = mb->un.varWords[0];
dea31012005-04-17 16:05:31 -0500923 ndlp->nlp_type |= NLP_FABRIC;
924 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
925 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
926
927 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
928 /* This NPort has been assigned an NPort_ID by the fabric as a
929 * result of the completed fabric login. Issue a State Change
930 * Registration (SCR) ELS request to the fabric controller
931 * (SCR_DID) so that this NPort gets RSCN events from the
932 * fabric.
933 */
934 lpfc_issue_els_scr(phba, SCR_DID, 0);
935
Jamie Wellnitzc9f87352006-02-28 19:25:23 -0500936 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
937 if (!ndlp) {
938 /* Allocate a new node instance. If the pool is empty,
939 * start the discovery process and skip the Nameserver
940 * login process. This is attempted again later on.
941 * Otherwise, issue a Port Login (PLOGI) to NameServer.
942 */
943 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
944 if (!ndlp) {
945 lpfc_disc_start(phba);
946 lpfc_mbuf_free(phba, mp->virt, mp->phys);
947 kfree(mp);
948 mempool_free( pmb, phba->mbox_mem_pool);
949 return;
950 } else {
951 lpfc_nlp_init(phba, ndlp, NameServer_DID);
952 ndlp->nlp_type |= NLP_FABRIC;
953 }
954 }
955 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
956 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
James Smart488d1462006-03-07 15:02:37 -0500957 lpfc_issue_els_plogi(phba, NameServer_DID, 0);
Jamie Wellnitzc9f87352006-02-28 19:25:23 -0500958 if (phba->cfg_fdmi_on) {
959 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
960 GFP_KERNEL);
961 if (ndlp_fdmi) {
962 lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID);
963 ndlp_fdmi->nlp_type |= NLP_FABRIC;
964 ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
James Smart488d1462006-03-07 15:02:37 -0500965 lpfc_issue_els_plogi(phba, FDMI_DID, 0);
dea31012005-04-17 16:05:31 -0500966 }
967 }
968 }
969
970 lpfc_mbuf_free(phba, mp->virt, mp->phys);
971 kfree(mp);
972 mempool_free( pmb, phba->mbox_mem_pool);
dea31012005-04-17 16:05:31 -0500973 return;
974}
975
976/*
977 * This routine handles processing a NameServer REG_LOGIN mailbox
978 * command upon completion. It is setup in the LPFC_MBOXQ
979 * as the completion routine when the command is
980 * handed off to the SLI layer.
981 */
982void
983lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
984{
985 struct lpfc_sli *psli;
986 MAILBOX_t *mb;
987 struct lpfc_dmabuf *mp;
988 struct lpfc_nodelist *ndlp;
989
990 psli = &phba->sli;
991 mb = &pmb->mb;
992
993 ndlp = (struct lpfc_nodelist *) pmb->context2;
994 mp = (struct lpfc_dmabuf *) (pmb->context1);
995
996 if (mb->mbxStatus) {
997 lpfc_mbuf_free(phba, mp->virt, mp->phys);
998 kfree(mp);
999 mempool_free( pmb, phba->mbox_mem_pool);
1000 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1001
1002 /* RegLogin failed, so just use loop map to make discovery
1003 list */
1004 lpfc_disc_list_loopmap(phba);
1005
1006 /* Start discovery */
1007 lpfc_disc_start(phba);
1008 return;
1009 }
1010
1011 pmb->context1 = NULL;
1012
dea31012005-04-17 16:05:31 -05001013 ndlp->nlp_rpi = mb->un.varWords[0];
dea31012005-04-17 16:05:31 -05001014 ndlp->nlp_type |= NLP_FABRIC;
1015 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1016 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1017
1018 if (phba->hba_state < LPFC_HBA_READY) {
1019 /* Link up discovery requires Fabrib registration. */
1020 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
1021 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
1022 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
1023 }
1024
1025 phba->fc_ns_retry = 0;
1026 /* Good status, issue CT Request to NameServer */
1027 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
1028 /* Cannot issue NameServer Query, so finish up discovery */
1029 lpfc_disc_start(phba);
1030 }
1031
1032 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1033 kfree(mp);
1034 mempool_free( pmb, phba->mbox_mem_pool);
1035
1036 return;
1037}
1038
1039static void
1040lpfc_register_remote_port(struct lpfc_hba * phba,
1041 struct lpfc_nodelist * ndlp)
1042{
1043 struct fc_rport *rport;
1044 struct lpfc_rport_data *rdata;
1045 struct fc_rport_identifiers rport_ids;
dea31012005-04-17 16:05:31 -05001046
1047 /* Remote port has reappeared. Re-register w/ FC transport */
Andrew Morton68ce1eb2005-09-21 09:46:54 -07001048 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1049 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
dea31012005-04-17 16:05:31 -05001050 rport_ids.port_id = ndlp->nlp_DID;
1051 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
dea31012005-04-17 16:05:31 -05001052
1053 ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
1054 if (!rport) {
1055 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1056 "Warning: fc_remote_port_add failed\n");
1057 return;
1058 }
1059
1060 /* initialize static port data */
1061 rport->maxframe_size = ndlp->nlp_maxframe;
1062 rport->supported_classes = ndlp->nlp_class_sup;
1063 if ((rport->scsi_target_id != -1) &&
1064 (rport->scsi_target_id < MAX_FCP_TARGET)) {
1065 ndlp->nlp_sid = rport->scsi_target_id;
1066 }
1067 rdata = rport->dd_data;
1068 rdata->pnode = ndlp;
James.Smart@Emulex.Com23dc04f2005-11-28 11:41:44 -05001069
1070 if (ndlp->nlp_type & NLP_FCP_TARGET)
1071 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1072 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1073 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1074
1075
1076 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1077 fc_remote_port_rolechg(rport, rport_ids.roles);
1078
James.Smart@Emulex.Com19a7b4a2005-10-18 12:03:35 -04001079
1080 return;
1081}
1082
1083static void
1084lpfc_unregister_remote_port(struct lpfc_hba * phba,
1085 struct lpfc_nodelist * ndlp)
1086{
1087 struct fc_rport *rport = ndlp->rport;
1088 struct lpfc_rport_data *rdata = rport->dd_data;
1089
1090 ndlp->rport = NULL;
1091 rdata->pnode = NULL;
James.Smart@Emulex.Com19a7b4a2005-10-18 12:03:35 -04001092 fc_remote_port_delete(rport);
dea31012005-04-17 16:05:31 -05001093
1094 return;
1095}
1096
1097int
1098lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1099{
1100 enum { none, unmapped, mapped } rport_add = none, rport_del = none;
1101 struct lpfc_sli *psli;
1102
1103 psli = &phba->sli;
1104 /* Sanity check to ensure we are not moving to / from the same list */
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001105 if ((nlp->nlp_flag & NLP_LIST_MASK) == list)
dea31012005-04-17 16:05:31 -05001106 if (list != NLP_NO_LIST)
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001107 return 0;
dea31012005-04-17 16:05:31 -05001108
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001109 spin_lock_irq(phba->host->host_lock);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001110 switch (nlp->nlp_flag & NLP_LIST_MASK) {
dea31012005-04-17 16:05:31 -05001111 case NLP_NO_LIST: /* Not on any list */
1112 break;
1113 case NLP_UNUSED_LIST:
1114 phba->fc_unused_cnt--;
1115 list_del(&nlp->nlp_listp);
1116 break;
1117 case NLP_PLOGI_LIST:
1118 phba->fc_plogi_cnt--;
1119 list_del(&nlp->nlp_listp);
1120 break;
1121 case NLP_ADISC_LIST:
1122 phba->fc_adisc_cnt--;
1123 list_del(&nlp->nlp_listp);
1124 break;
1125 case NLP_REGLOGIN_LIST:
1126 phba->fc_reglogin_cnt--;
1127 list_del(&nlp->nlp_listp);
1128 break;
1129 case NLP_PRLI_LIST:
1130 phba->fc_prli_cnt--;
1131 list_del(&nlp->nlp_listp);
1132 break;
1133 case NLP_UNMAPPED_LIST:
1134 phba->fc_unmap_cnt--;
1135 list_del(&nlp->nlp_listp);
dea31012005-04-17 16:05:31 -05001136 nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1137 nlp->nlp_type &= ~NLP_FC_NODE;
dea31012005-04-17 16:05:31 -05001138 phba->nport_event_cnt++;
1139 if (nlp->rport)
1140 rport_del = unmapped;
1141 break;
1142 case NLP_MAPPED_LIST:
1143 phba->fc_map_cnt--;
1144 list_del(&nlp->nlp_listp);
1145 phba->nport_event_cnt++;
1146 if (nlp->rport)
1147 rport_del = mapped;
1148 break;
1149 case NLP_NPR_LIST:
1150 phba->fc_npr_cnt--;
1151 list_del(&nlp->nlp_listp);
1152 /* Stop delay tmo if taking node off NPR list */
1153 if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
1154 (list != NLP_NPR_LIST)) {
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001155 spin_unlock_irq(phba->host->host_lock);
James Smartfdcebe22006-03-07 15:04:01 -05001156 lpfc_cancel_retry_delay_tmo(phba, nlp);
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001157 spin_lock_irq(phba->host->host_lock);
dea31012005-04-17 16:05:31 -05001158 }
1159 break;
1160 }
1161
dea31012005-04-17 16:05:31 -05001162 nlp->nlp_flag &= ~NLP_LIST_MASK;
dea31012005-04-17 16:05:31 -05001163
1164 /* Add NPort <did> to <num> list */
1165 lpfc_printf_log(phba,
1166 KERN_INFO,
1167 LOG_NODE,
1168 "%d:0904 Add NPort x%x to %d list Data: x%x\n",
1169 phba->brd_no,
1170 nlp->nlp_DID, list, nlp->nlp_flag);
1171
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001172 switch (list) {
dea31012005-04-17 16:05:31 -05001173 case NLP_NO_LIST: /* No list, just remove it */
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001174 spin_unlock_irq(phba->host->host_lock);
dea31012005-04-17 16:05:31 -05001175 lpfc_nlp_remove(phba, nlp);
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001176 spin_lock_irq(phba->host->host_lock);
James.Smart@Emulex.Com8cbdc5f2005-08-10 15:02:50 -04001177 /* as node removed - stop further transport calls */
1178 rport_del = none;
dea31012005-04-17 16:05:31 -05001179 break;
1180 case NLP_UNUSED_LIST:
dea31012005-04-17 16:05:31 -05001181 nlp->nlp_flag |= list;
dea31012005-04-17 16:05:31 -05001182 /* Put it at the end of the unused list */
1183 list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
1184 phba->fc_unused_cnt++;
1185 break;
1186 case NLP_PLOGI_LIST:
dea31012005-04-17 16:05:31 -05001187 nlp->nlp_flag |= list;
dea31012005-04-17 16:05:31 -05001188 /* Put it at the end of the plogi list */
1189 list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
1190 phba->fc_plogi_cnt++;
1191 break;
1192 case NLP_ADISC_LIST:
dea31012005-04-17 16:05:31 -05001193 nlp->nlp_flag |= list;
dea31012005-04-17 16:05:31 -05001194 /* Put it at the end of the adisc list */
1195 list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
1196 phba->fc_adisc_cnt++;
1197 break;
1198 case NLP_REGLOGIN_LIST:
dea31012005-04-17 16:05:31 -05001199 nlp->nlp_flag |= list;
dea31012005-04-17 16:05:31 -05001200 /* Put it at the end of the reglogin list */
1201 list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
1202 phba->fc_reglogin_cnt++;
1203 break;
1204 case NLP_PRLI_LIST:
dea31012005-04-17 16:05:31 -05001205 nlp->nlp_flag |= list;
dea31012005-04-17 16:05:31 -05001206 /* Put it at the end of the prli list */
1207 list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
1208 phba->fc_prli_cnt++;
1209 break;
1210 case NLP_UNMAPPED_LIST:
1211 rport_add = unmapped;
1212 /* ensure all vestiges of "mapped" significance are gone */
1213 nlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
dea31012005-04-17 16:05:31 -05001214 nlp->nlp_flag |= list;
dea31012005-04-17 16:05:31 -05001215 /* Put it at the end of the unmap list */
1216 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1217 phba->fc_unmap_cnt++;
1218 phba->nport_event_cnt++;
1219 /* stop nodev tmo if running */
1220 if (nlp->nlp_flag & NLP_NODEV_TMO) {
dea31012005-04-17 16:05:31 -05001221 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1222 spin_unlock_irq(phba->host->host_lock);
1223 del_timer_sync(&nlp->nlp_tmofunc);
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001224 spin_lock_irq(phba->host->host_lock);
dea31012005-04-17 16:05:31 -05001225 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1226 list_del_init(&nlp->nodev_timeout_evt.
1227 evt_listp);
1228
1229 }
1230 nlp->nlp_type |= NLP_FC_NODE;
1231 break;
1232 case NLP_MAPPED_LIST:
1233 rport_add = mapped;
dea31012005-04-17 16:05:31 -05001234 nlp->nlp_flag |= list;
dea31012005-04-17 16:05:31 -05001235 /* Put it at the end of the map list */
1236 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1237 phba->fc_map_cnt++;
1238 phba->nport_event_cnt++;
1239 /* stop nodev tmo if running */
1240 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1241 nlp->nlp_flag &= ~NLP_NODEV_TMO;
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001242 spin_unlock_irq(phba->host->host_lock);
dea31012005-04-17 16:05:31 -05001243 del_timer_sync(&nlp->nlp_tmofunc);
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001244 spin_lock_irq(phba->host->host_lock);
dea31012005-04-17 16:05:31 -05001245 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1246 list_del_init(&nlp->nodev_timeout_evt.
1247 evt_listp);
1248
1249 }
1250 break;
1251 case NLP_NPR_LIST:
dea31012005-04-17 16:05:31 -05001252 nlp->nlp_flag |= list;
dea31012005-04-17 16:05:31 -05001253 /* Put it at the end of the npr list */
1254 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1255 phba->fc_npr_cnt++;
1256
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001257 if (!(nlp->nlp_flag & NLP_NODEV_TMO))
dea31012005-04-17 16:05:31 -05001258 mod_timer(&nlp->nlp_tmofunc,
Jamie Wellnitzc9f87352006-02-28 19:25:23 -05001259 jiffies + HZ * phba->cfg_nodev_tmo);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001260
dea31012005-04-17 16:05:31 -05001261 nlp->nlp_flag |= NLP_NODEV_TMO;
1262 nlp->nlp_flag &= ~NLP_RCV_PLOGI;
dea31012005-04-17 16:05:31 -05001263 break;
1264 case NLP_JUST_DQ:
1265 break;
1266 }
1267
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001268 spin_unlock_irq(phba->host->host_lock);
1269
dea31012005-04-17 16:05:31 -05001270 /*
1271 * We make all the calls into the transport after we have
1272 * moved the node between lists. This so that we don't
1273 * release the lock while in-between lists.
1274 */
1275
1276 /* Don't upcall midlayer if we're unloading */
1277 if (!(phba->fc_flag & FC_UNLOADING)) {
1278 /*
1279 * We revalidate the rport pointer as the "add" function
1280 * may have removed the remote port.
1281 */
1282 if ((rport_del != none) && nlp->rport)
James.Smart@Emulex.Com19a7b4a2005-10-18 12:03:35 -04001283 lpfc_unregister_remote_port(phba, nlp);
dea31012005-04-17 16:05:31 -05001284
1285 if (rport_add != none) {
1286 /*
1287 * Tell the fc transport about the port, if we haven't
1288 * already. If we have, and it's a scsi entity, be
1289 * sure to unblock any attached scsi devices
1290 */
1291 if (!nlp->rport)
1292 lpfc_register_remote_port(phba, nlp);
dea31012005-04-17 16:05:31 -05001293
1294 /*
1295 * if we added to Mapped list, but the remote port
1296 * registration failed or assigned a target id outside
1297 * our presentable range - move the node to the
1298 * Unmapped List
1299 */
1300 if ((rport_add == mapped) &&
1301 ((!nlp->rport) ||
1302 (nlp->rport->scsi_target_id == -1) ||
1303 (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) {
1304 nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1305 spin_lock_irq(phba->host->host_lock);
1306 nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1307 spin_unlock_irq(phba->host->host_lock);
1308 lpfc_nlp_list(phba, nlp, NLP_UNMAPPED_LIST);
1309 }
1310 }
1311 }
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001312 return 0;
dea31012005-04-17 16:05:31 -05001313}
1314
1315/*
1316 * Start / ReStart rescue timer for Discovery / RSCN handling
1317 */
1318void
1319lpfc_set_disctmo(struct lpfc_hba * phba)
1320{
1321 uint32_t tmo;
1322
Jamie Wellnitzc9f87352006-02-28 19:25:23 -05001323 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
1324 /* For FAN, timeout should be greater then edtov */
1325 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1326 } else {
1327 /* Normal discovery timeout should be > then ELS/CT timeout
1328 * FC spec states we need 3 * ratov for CT requests
1329 */
1330 tmo = ((phba->fc_ratov * 3) + 3);
1331 }
dea31012005-04-17 16:05:31 -05001332
1333 mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
1334 spin_lock_irq(phba->host->host_lock);
1335 phba->fc_flag |= FC_DISC_TMO;
1336 spin_unlock_irq(phba->host->host_lock);
1337
1338 /* Start Discovery Timer state <hba_state> */
1339 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1340 "%d:0247 Start Discovery Timer state x%x "
1341 "Data: x%x x%lx x%x x%x\n",
1342 phba->brd_no,
1343 phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
1344 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1345
1346 return;
1347}
1348
1349/*
1350 * Cancel rescue timer for Discovery / RSCN handling
1351 */
1352int
1353lpfc_can_disctmo(struct lpfc_hba * phba)
1354{
1355 /* Turn off discovery timer if its running */
1356 if (phba->fc_flag & FC_DISC_TMO) {
1357 spin_lock_irq(phba->host->host_lock);
1358 phba->fc_flag &= ~FC_DISC_TMO;
1359 spin_unlock_irq(phba->host->host_lock);
1360 del_timer_sync(&phba->fc_disctmo);
1361 phba->work_hba_events &= ~WORKER_DISC_TMO;
1362 }
1363
1364 /* Cancel Discovery Timer state <hba_state> */
1365 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1366 "%d:0248 Cancel Discovery Timer state x%x "
1367 "Data: x%x x%x x%x\n",
1368 phba->brd_no, phba->hba_state, phba->fc_flag,
1369 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1370
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001371 return 0;
dea31012005-04-17 16:05:31 -05001372}
1373
1374/*
1375 * Check specified ring for outstanding IOCB on the SLI queue
1376 * Return true if iocb matches the specified nport
1377 */
1378int
1379lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1380 struct lpfc_sli_ring * pring,
1381 struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
1382{
1383 struct lpfc_sli *psli;
1384 IOCB_t *icmd;
1385
1386 psli = &phba->sli;
1387 icmd = &iocb->iocb;
1388 if (pring->ringno == LPFC_ELS_RING) {
1389 switch (icmd->ulpCommand) {
1390 case CMD_GEN_REQUEST64_CR:
1391 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001392 return 1;
dea31012005-04-17 16:05:31 -05001393 case CMD_ELS_REQUEST64_CR:
1394 case CMD_XMIT_ELS_RSP64_CX:
1395 if (iocb->context1 == (uint8_t *) ndlp)
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001396 return 1;
dea31012005-04-17 16:05:31 -05001397 }
1398 } else if (pring->ringno == psli->ip_ring) {
1399
1400 } else if (pring->ringno == psli->fcp_ring) {
1401 /* Skip match check if waiting to relogin to FCP target */
1402 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1403 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001404 return 0;
dea31012005-04-17 16:05:31 -05001405 }
1406 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001407 return 1;
dea31012005-04-17 16:05:31 -05001408 }
1409 } else if (pring->ringno == psli->next_ring) {
1410
1411 }
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001412 return 0;
dea31012005-04-17 16:05:31 -05001413}
1414
1415/*
1416 * Free resources / clean up outstanding I/Os
1417 * associated with nlp_rpi in the LPFC_NODELIST entry.
1418 */
1419static int
1420lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1421{
1422 struct lpfc_sli *psli;
1423 struct lpfc_sli_ring *pring;
1424 struct lpfc_iocbq *iocb, *next_iocb;
1425 IOCB_t *icmd;
1426 uint32_t rpi, i;
1427
1428 /*
1429 * Everything that matches on txcmplq will be returned
1430 * by firmware with a no rpi error.
1431 */
1432 psli = &phba->sli;
1433 rpi = ndlp->nlp_rpi;
1434 if (rpi) {
1435 /* Now process each ring */
1436 for (i = 0; i < psli->num_rings; i++) {
1437 pring = &psli->ring[i];
1438
1439 spin_lock_irq(phba->host->host_lock);
1440 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1441 list) {
1442 /*
1443 * Check to see if iocb matches the nport we are
1444 * looking for
1445 */
1446 if ((lpfc_check_sli_ndlp
1447 (phba, pring, iocb, ndlp))) {
1448 /* It matches, so deque and call compl
1449 with an error */
1450 list_del(&iocb->list);
1451 pring->txq_cnt--;
1452 if (iocb->iocb_cmpl) {
1453 icmd = &iocb->iocb;
1454 icmd->ulpStatus =
1455 IOSTAT_LOCAL_REJECT;
1456 icmd->un.ulpWord[4] =
1457 IOERR_SLI_ABORTED;
1458 spin_unlock_irq(phba->host->
1459 host_lock);
1460 (iocb->iocb_cmpl) (phba,
1461 iocb, iocb);
1462 spin_lock_irq(phba->host->
1463 host_lock);
James Bottomley604a3e32005-10-29 10:28:33 -05001464 } else
1465 lpfc_sli_release_iocbq(phba,
1466 iocb);
dea31012005-04-17 16:05:31 -05001467 }
1468 }
1469 spin_unlock_irq(phba->host->host_lock);
1470
1471 }
1472 }
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001473 return 0;
dea31012005-04-17 16:05:31 -05001474}
1475
1476/*
1477 * Free rpi associated with LPFC_NODELIST entry.
1478 * This routine is called from lpfc_freenode(), when we are removing
1479 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1480 * LOGO that completes successfully, and we are waiting to PLOGI back
1481 * to the remote NPort. In addition, it is called after we receive
1482 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1483 * we are waiting to PLOGI back to the remote NPort.
1484 */
1485int
1486lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1487{
1488 LPFC_MBOXQ_t *mbox;
1489 int rc;
1490
1491 if (ndlp->nlp_rpi) {
1492 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1493 lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
1494 mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
1495 rc = lpfc_sli_issue_mbox
1496 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1497 if (rc == MBX_NOT_FINISHED)
1498 mempool_free( mbox, phba->mbox_mem_pool);
1499 }
dea31012005-04-17 16:05:31 -05001500 lpfc_no_rpi(phba, ndlp);
1501 ndlp->nlp_rpi = 0;
1502 return 1;
1503 }
1504 return 0;
1505}
1506
1507/*
1508 * Free resources associated with LPFC_NODELIST entry
1509 * so it can be freed.
1510 */
1511static int
1512lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1513{
1514 LPFC_MBOXQ_t *mb;
1515 LPFC_MBOXQ_t *nextmb;
1516 struct lpfc_dmabuf *mp;
dea31012005-04-17 16:05:31 -05001517
1518 /* Cleanup node for NPort <nlp_DID> */
1519 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1520 "%d:0900 Cleanup node for NPort x%x "
1521 "Data: x%x x%x x%x\n",
1522 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1523 ndlp->nlp_state, ndlp->nlp_rpi);
1524
1525 lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
1526
1527 /*
1528 * if unloading the driver - just leave the remote port in place.
1529 * The driver unload will force the attached devices to detach
1530 * and flush cache's w/o generating flush errors.
1531 */
1532 if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
James.Smart@Emulex.Com19a7b4a2005-10-18 12:03:35 -04001533 lpfc_unregister_remote_port(phba, ndlp);
dea31012005-04-17 16:05:31 -05001534 ndlp->nlp_sid = NLP_NO_SID;
1535 }
1536
1537 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1538 if ((mb = phba->sli.mbox_active)) {
1539 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1540 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1541 mb->context2 = NULL;
1542 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1543 }
1544 }
1545 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1546 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1547 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1548 mp = (struct lpfc_dmabuf *) (mb->context1);
1549 if (mp) {
1550 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1551 kfree(mp);
1552 }
1553 list_del(&mb->list);
1554 mempool_free(mb, phba->mbox_mem_pool);
1555 }
1556 }
1557
1558 lpfc_els_abort(phba,ndlp,0);
1559 spin_lock_irq(phba->host->host_lock);
1560 ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO);
1561 spin_unlock_irq(phba->host->host_lock);
1562 del_timer_sync(&ndlp->nlp_tmofunc);
1563
Jamie Wellnitz5024ab12006-02-28 19:25:28 -05001564 ndlp->nlp_last_elscmd = 0;
dea31012005-04-17 16:05:31 -05001565 del_timer_sync(&ndlp->nlp_delayfunc);
1566
1567 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1568 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1569 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1570 list_del_init(&ndlp->els_retry_evt.evt_listp);
1571
1572 lpfc_unreg_rpi(phba, ndlp);
1573
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001574 return 0;
dea31012005-04-17 16:05:31 -05001575}
1576
1577/*
1578 * Check to see if we can free the nlp back to the freelist.
1579 * If we are in the middle of using the nlp in the discovery state
1580 * machine, defer the free till we reach the end of the state machine.
1581 */
1582int
1583lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1584{
1585 if (ndlp->nlp_flag & NLP_NODEV_TMO) {
1586 spin_lock_irq(phba->host->host_lock);
1587 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
1588 spin_unlock_irq(phba->host->host_lock);
1589 del_timer_sync(&ndlp->nlp_tmofunc);
1590 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1591 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1592
1593 }
1594
1595
1596 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
James Smartfdcebe22006-03-07 15:04:01 -05001597 lpfc_cancel_retry_delay_tmo(phba, ndlp);
dea31012005-04-17 16:05:31 -05001598 }
1599
1600 if (ndlp->nlp_disc_refcnt) {
1601 spin_lock_irq(phba->host->host_lock);
1602 ndlp->nlp_flag |= NLP_DELAY_REMOVE;
1603 spin_unlock_irq(phba->host->host_lock);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001604 } else {
dea31012005-04-17 16:05:31 -05001605 lpfc_freenode(phba, ndlp);
1606 mempool_free( ndlp, phba->nlp_mem_pool);
1607 }
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001608 return 0;
dea31012005-04-17 16:05:31 -05001609}
1610
1611static int
1612lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
1613{
1614 D_ID mydid;
1615 D_ID ndlpdid;
1616 D_ID matchdid;
1617
1618 if (did == Bcast_DID)
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001619 return 0;
dea31012005-04-17 16:05:31 -05001620
1621 if (ndlp->nlp_DID == 0) {
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001622 return 0;
dea31012005-04-17 16:05:31 -05001623 }
1624
1625 /* First check for Direct match */
1626 if (ndlp->nlp_DID == did)
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001627 return 1;
dea31012005-04-17 16:05:31 -05001628
1629 /* Next check for area/domain identically equals 0 match */
1630 mydid.un.word = phba->fc_myDID;
1631 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001632 return 0;
dea31012005-04-17 16:05:31 -05001633 }
1634
1635 matchdid.un.word = did;
1636 ndlpdid.un.word = ndlp->nlp_DID;
1637 if (matchdid.un.b.id == ndlpdid.un.b.id) {
1638 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
1639 (mydid.un.b.area == matchdid.un.b.area)) {
1640 if ((ndlpdid.un.b.domain == 0) &&
1641 (ndlpdid.un.b.area == 0)) {
1642 if (ndlpdid.un.b.id)
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001643 return 1;
dea31012005-04-17 16:05:31 -05001644 }
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001645 return 0;
dea31012005-04-17 16:05:31 -05001646 }
1647
1648 matchdid.un.word = ndlp->nlp_DID;
1649 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
1650 (mydid.un.b.area == ndlpdid.un.b.area)) {
1651 if ((matchdid.un.b.domain == 0) &&
1652 (matchdid.un.b.area == 0)) {
1653 if (matchdid.un.b.id)
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001654 return 1;
dea31012005-04-17 16:05:31 -05001655 }
1656 }
1657 }
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001658 return 0;
dea31012005-04-17 16:05:31 -05001659}
1660
1661/* Search for a nodelist entry on a specific list */
1662struct lpfc_nodelist *
1663lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1664{
1665 struct lpfc_nodelist *ndlp, *next_ndlp;
1666 uint32_t data1;
1667
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001668 spin_lock_irq(phba->host->host_lock);
dea31012005-04-17 16:05:31 -05001669 if (order & NLP_SEARCH_UNMAPPED) {
1670 list_for_each_entry_safe(ndlp, next_ndlp,
1671 &phba->fc_nlpunmap_list, nlp_listp) {
1672 if (lpfc_matchdid(phba, ndlp, did)) {
1673 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1674 ((uint32_t) ndlp->nlp_xri << 16) |
1675 ((uint32_t) ndlp->nlp_type << 8) |
1676 ((uint32_t) ndlp->nlp_rpi & 0xff));
1677 /* FIND node DID unmapped */
1678 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1679 "%d:0929 FIND node DID unmapped"
1680 " Data: x%p x%x x%x x%x\n",
1681 phba->brd_no,
1682 ndlp, ndlp->nlp_DID,
1683 ndlp->nlp_flag, data1);
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001684 spin_unlock_irq(phba->host->host_lock);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001685 return ndlp;
dea31012005-04-17 16:05:31 -05001686 }
1687 }
1688 }
1689
1690 if (order & NLP_SEARCH_MAPPED) {
1691 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1692 nlp_listp) {
1693 if (lpfc_matchdid(phba, ndlp, did)) {
1694
1695 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1696 ((uint32_t) ndlp->nlp_xri << 16) |
1697 ((uint32_t) ndlp->nlp_type << 8) |
1698 ((uint32_t) ndlp->nlp_rpi & 0xff));
1699 /* FIND node DID mapped */
1700 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1701 "%d:0930 FIND node DID mapped "
1702 "Data: x%p x%x x%x x%x\n",
1703 phba->brd_no,
1704 ndlp, ndlp->nlp_DID,
1705 ndlp->nlp_flag, data1);
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001706 spin_unlock_irq(phba->host->host_lock);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001707 return ndlp;
dea31012005-04-17 16:05:31 -05001708 }
1709 }
1710 }
1711
1712 if (order & NLP_SEARCH_PLOGI) {
1713 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
1714 nlp_listp) {
1715 if (lpfc_matchdid(phba, ndlp, did)) {
1716
1717 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1718 ((uint32_t) ndlp->nlp_xri << 16) |
1719 ((uint32_t) ndlp->nlp_type << 8) |
1720 ((uint32_t) ndlp->nlp_rpi & 0xff));
1721 /* LOG change to PLOGI */
1722 /* FIND node DID plogi */
1723 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1724 "%d:0908 FIND node DID plogi "
1725 "Data: x%p x%x x%x x%x\n",
1726 phba->brd_no,
1727 ndlp, ndlp->nlp_DID,
1728 ndlp->nlp_flag, data1);
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001729 spin_unlock_irq(phba->host->host_lock);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001730 return ndlp;
dea31012005-04-17 16:05:31 -05001731 }
1732 }
1733 }
1734
1735 if (order & NLP_SEARCH_ADISC) {
1736 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1737 nlp_listp) {
1738 if (lpfc_matchdid(phba, ndlp, did)) {
1739
1740 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1741 ((uint32_t) ndlp->nlp_xri << 16) |
1742 ((uint32_t) ndlp->nlp_type << 8) |
1743 ((uint32_t) ndlp->nlp_rpi & 0xff));
1744 /* LOG change to ADISC */
1745 /* FIND node DID adisc */
1746 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1747 "%d:0931 FIND node DID adisc "
1748 "Data: x%p x%x x%x x%x\n",
1749 phba->brd_no,
1750 ndlp, ndlp->nlp_DID,
1751 ndlp->nlp_flag, data1);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001752 return ndlp;
dea31012005-04-17 16:05:31 -05001753 }
1754 }
1755 }
1756
1757 if (order & NLP_SEARCH_REGLOGIN) {
1758 list_for_each_entry_safe(ndlp, next_ndlp,
1759 &phba->fc_reglogin_list, nlp_listp) {
1760 if (lpfc_matchdid(phba, ndlp, did)) {
1761
1762 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1763 ((uint32_t) ndlp->nlp_xri << 16) |
1764 ((uint32_t) ndlp->nlp_type << 8) |
1765 ((uint32_t) ndlp->nlp_rpi & 0xff));
1766 /* LOG change to REGLOGIN */
1767 /* FIND node DID reglogin */
1768 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1769 "%d:0931 FIND node DID reglogin"
1770 " Data: x%p x%x x%x x%x\n",
1771 phba->brd_no,
1772 ndlp, ndlp->nlp_DID,
1773 ndlp->nlp_flag, data1);
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001774 spin_unlock_irq(phba->host->host_lock);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001775 return ndlp;
dea31012005-04-17 16:05:31 -05001776 }
1777 }
1778 }
1779
1780 if (order & NLP_SEARCH_PRLI) {
1781 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
1782 nlp_listp) {
1783 if (lpfc_matchdid(phba, ndlp, did)) {
1784
1785 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1786 ((uint32_t) ndlp->nlp_xri << 16) |
1787 ((uint32_t) ndlp->nlp_type << 8) |
1788 ((uint32_t) ndlp->nlp_rpi & 0xff));
1789 /* LOG change to PRLI */
1790 /* FIND node DID prli */
1791 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1792 "%d:0931 FIND node DID prli "
1793 "Data: x%p x%x x%x x%x\n",
1794 phba->brd_no,
1795 ndlp, ndlp->nlp_DID,
1796 ndlp->nlp_flag, data1);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001797 return ndlp;
dea31012005-04-17 16:05:31 -05001798 }
1799 }
1800 }
1801
1802 if (order & NLP_SEARCH_NPR) {
1803 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1804 nlp_listp) {
1805 if (lpfc_matchdid(phba, ndlp, did)) {
1806
1807 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1808 ((uint32_t) ndlp->nlp_xri << 16) |
1809 ((uint32_t) ndlp->nlp_type << 8) |
1810 ((uint32_t) ndlp->nlp_rpi & 0xff));
1811 /* LOG change to NPR */
1812 /* FIND node DID npr */
1813 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1814 "%d:0931 FIND node DID npr "
1815 "Data: x%p x%x x%x x%x\n",
1816 phba->brd_no,
1817 ndlp, ndlp->nlp_DID,
1818 ndlp->nlp_flag, data1);
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001819 spin_unlock_irq(phba->host->host_lock);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001820 return ndlp;
dea31012005-04-17 16:05:31 -05001821 }
1822 }
1823 }
1824
1825 if (order & NLP_SEARCH_UNUSED) {
1826 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1827 nlp_listp) {
1828 if (lpfc_matchdid(phba, ndlp, did)) {
1829
1830 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1831 ((uint32_t) ndlp->nlp_xri << 16) |
1832 ((uint32_t) ndlp->nlp_type << 8) |
1833 ((uint32_t) ndlp->nlp_rpi & 0xff));
1834 /* LOG change to UNUSED */
1835 /* FIND node DID unused */
1836 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1837 "%d:0931 FIND node DID unused "
1838 "Data: x%p x%x x%x x%x\n",
1839 phba->brd_no,
1840 ndlp, ndlp->nlp_DID,
1841 ndlp->nlp_flag, data1);
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001842 spin_unlock_irq(phba->host->host_lock);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001843 return ndlp;
dea31012005-04-17 16:05:31 -05001844 }
1845 }
1846 }
1847
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05001848 spin_unlock_irq(phba->host->host_lock);
1849
dea31012005-04-17 16:05:31 -05001850 /* FIND node did <did> NOT FOUND */
1851 lpfc_printf_log(phba,
1852 KERN_INFO,
1853 LOG_NODE,
1854 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1855 phba->brd_no, did, order);
1856
1857 /* no match found */
1858 return NULL;
1859}
1860
1861struct lpfc_nodelist *
1862lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1863{
1864 struct lpfc_nodelist *ndlp;
1865 uint32_t flg;
1866
Jamie Wellnitzc9f87352006-02-28 19:25:23 -05001867 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did);
1868 if (!ndlp) {
1869 if ((phba->fc_flag & FC_RSCN_MODE) &&
dea31012005-04-17 16:05:31 -05001870 ((lpfc_rscn_payload_check(phba, did) == 0)))
1871 return NULL;
1872 ndlp = (struct lpfc_nodelist *)
1873 mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1874 if (!ndlp)
1875 return NULL;
1876 lpfc_nlp_init(phba, ndlp, did);
1877 ndlp->nlp_state = NLP_STE_NPR_NODE;
1878 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1879 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1880 return ndlp;
1881 }
Jamie Wellnitzc9f87352006-02-28 19:25:23 -05001882 if (phba->fc_flag & FC_RSCN_MODE) {
dea31012005-04-17 16:05:31 -05001883 if (lpfc_rscn_payload_check(phba, did)) {
1884 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
Jamie Wellnitzc9f87352006-02-28 19:25:23 -05001885
1886 /* Since this node is marked for discovery,
1887 * delay timeout is not needed.
1888 */
James Smartfdcebe22006-03-07 15:04:01 -05001889 if (ndlp->nlp_flag & NLP_DELAY_TMO)
1890 lpfc_cancel_retry_delay_tmo(phba, ndlp);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001891 } else {
dea31012005-04-17 16:05:31 -05001892 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1893 ndlp = NULL;
1894 }
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05001895 } else {
dea31012005-04-17 16:05:31 -05001896 flg = ndlp->nlp_flag & NLP_LIST_MASK;
Jamie Wellnitzc9f87352006-02-28 19:25:23 -05001897 if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST))
dea31012005-04-17 16:05:31 -05001898 return NULL;
dea31012005-04-17 16:05:31 -05001899 ndlp->nlp_state = NLP_STE_NPR_NODE;
1900 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1901 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1902 }
1903 return ndlp;
1904}
1905
1906/* Build a list of nodes to discover based on the loopmap */
1907void
1908lpfc_disc_list_loopmap(struct lpfc_hba * phba)
1909{
1910 int j;
1911 uint32_t alpa, index;
1912
1913 if (phba->hba_state <= LPFC_LINK_DOWN) {
1914 return;
1915 }
1916 if (phba->fc_topology != TOPOLOGY_LOOP) {
1917 return;
1918 }
1919
1920 /* Check for loop map present or not */
1921 if (phba->alpa_map[0]) {
1922 for (j = 1; j <= phba->alpa_map[0]; j++) {
1923 alpa = phba->alpa_map[j];
1924
1925 if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
1926 continue;
1927 }
1928 lpfc_setup_disc_node(phba, alpa);
1929 }
1930 } else {
1931 /* No alpamap, so try all alpa's */
1932 for (j = 0; j < FC_MAXLOOP; j++) {
1933 /* If cfg_scan_down is set, start from highest
1934 * ALPA (0xef) to lowest (0x1).
1935 */
1936 if (phba->cfg_scan_down)
1937 index = j;
1938 else
1939 index = FC_MAXLOOP - j - 1;
1940 alpa = lpfcAlpaArray[index];
1941 if ((phba->fc_myDID & 0xff) == alpa) {
1942 continue;
1943 }
1944
1945 lpfc_setup_disc_node(phba, alpa);
1946 }
1947 }
1948 return;
1949}
1950
1951/* Start Link up / RSCN discovery on NPR list */
1952void
1953lpfc_disc_start(struct lpfc_hba * phba)
1954{
1955 struct lpfc_sli *psli;
1956 LPFC_MBOXQ_t *mbox;
1957 struct lpfc_nodelist *ndlp, *next_ndlp;
1958 uint32_t did_changed, num_sent;
1959 uint32_t clear_la_pending;
1960 int rc;
1961
1962 psli = &phba->sli;
1963
1964 if (phba->hba_state <= LPFC_LINK_DOWN) {
1965 return;
1966 }
1967 if (phba->hba_state == LPFC_CLEAR_LA)
1968 clear_la_pending = 1;
1969 else
1970 clear_la_pending = 0;
1971
1972 if (phba->hba_state < LPFC_HBA_READY) {
1973 phba->hba_state = LPFC_DISC_AUTH;
1974 }
1975 lpfc_set_disctmo(phba);
1976
1977 if (phba->fc_prevDID == phba->fc_myDID) {
1978 did_changed = 0;
1979 } else {
1980 did_changed = 1;
1981 }
1982 phba->fc_prevDID = phba->fc_myDID;
1983 phba->num_disc_nodes = 0;
1984
1985 /* Start Discovery state <hba_state> */
1986 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1987 "%d:0202 Start Discovery hba state x%x "
1988 "Data: x%x x%x x%x\n",
1989 phba->brd_no, phba->hba_state, phba->fc_flag,
1990 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1991
1992 /* If our did changed, we MUST do PLOGI */
1993 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1994 nlp_listp) {
1995 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1996 if (did_changed) {
1997 spin_lock_irq(phba->host->host_lock);
1998 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1999 spin_unlock_irq(phba->host->host_lock);
2000 }
2001 }
2002 }
2003
2004 /* First do ADISCs - if any */
2005 num_sent = lpfc_els_disc_adisc(phba);
2006
2007 if (num_sent)
2008 return;
2009
2010 if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
2011 /* If we get here, there is nothing to ADISC */
2012 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
2013 phba->hba_state = LPFC_CLEAR_LA;
2014 lpfc_clear_la(phba, mbox);
2015 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2016 rc = lpfc_sli_issue_mbox(phba, mbox,
2017 (MBX_NOWAIT | MBX_STOP_IOCB));
2018 if (rc == MBX_NOT_FINISHED) {
2019 mempool_free( mbox, phba->mbox_mem_pool);
2020 lpfc_disc_flush_list(phba);
2021 psli->ring[(psli->ip_ring)].flag &=
2022 ~LPFC_STOP_IOCB_EVENT;
2023 psli->ring[(psli->fcp_ring)].flag &=
2024 ~LPFC_STOP_IOCB_EVENT;
2025 psli->ring[(psli->next_ring)].flag &=
2026 ~LPFC_STOP_IOCB_EVENT;
2027 phba->hba_state = LPFC_HBA_READY;
2028 }
2029 }
2030 } else {
2031 /* Next do PLOGIs - if any */
2032 num_sent = lpfc_els_disc_plogi(phba);
2033
2034 if (num_sent)
2035 return;
2036
2037 if (phba->fc_flag & FC_RSCN_MODE) {
2038 /* Check to see if more RSCNs came in while we
2039 * were processing this one.
2040 */
2041 if ((phba->fc_rscn_id_cnt == 0) &&
2042 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
2043 spin_lock_irq(phba->host->host_lock);
2044 phba->fc_flag &= ~FC_RSCN_MODE;
2045 spin_unlock_irq(phba->host->host_lock);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05002046 } else
dea31012005-04-17 16:05:31 -05002047 lpfc_els_handle_rscn(phba);
2048 }
2049 }
2050 return;
2051}
2052
2053/*
2054 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2055 * ring the match the sppecified nodelist.
2056 */
2057static void
2058lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
2059{
2060 struct lpfc_sli *psli;
2061 IOCB_t *icmd;
2062 struct lpfc_iocbq *iocb, *next_iocb;
2063 struct lpfc_sli_ring *pring;
2064 struct lpfc_dmabuf *mp;
2065
2066 psli = &phba->sli;
2067 pring = &psli->ring[LPFC_ELS_RING];
2068
2069 /* Error matching iocb on txq or txcmplq
2070 * First check the txq.
2071 */
2072 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2073 if (iocb->context1 != ndlp) {
2074 continue;
2075 }
2076 icmd = &iocb->iocb;
2077 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2078 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2079
2080 list_del(&iocb->list);
2081 pring->txq_cnt--;
2082 lpfc_els_free_iocb(phba, iocb);
2083 }
2084 }
2085
2086 /* Next check the txcmplq */
2087 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2088 if (iocb->context1 != ndlp) {
2089 continue;
2090 }
2091 icmd = &iocb->iocb;
2092 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2093 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2094
2095 iocb->iocb_cmpl = NULL;
2096 /* context2 = cmd, context2->next = rsp, context3 =
2097 bpl */
2098 if (iocb->context2) {
2099 /* Free the response IOCB before handling the
2100 command. */
2101
2102 mp = (struct lpfc_dmabuf *) (iocb->context2);
2103 mp = list_get_first(&mp->list,
2104 struct lpfc_dmabuf,
2105 list);
2106 if (mp) {
2107 /* Delay before releasing rsp buffer to
2108 * give UNREG mbox a chance to take
2109 * effect.
2110 */
2111 list_add(&mp->list,
2112 &phba->freebufList);
2113 }
2114 lpfc_mbuf_free(phba,
2115 ((struct lpfc_dmabuf *)
2116 iocb->context2)->virt,
2117 ((struct lpfc_dmabuf *)
2118 iocb->context2)->phys);
2119 kfree(iocb->context2);
2120 }
2121
2122 if (iocb->context3) {
2123 lpfc_mbuf_free(phba,
2124 ((struct lpfc_dmabuf *)
2125 iocb->context3)->virt,
2126 ((struct lpfc_dmabuf *)
2127 iocb->context3)->phys);
2128 kfree(iocb->context3);
2129 }
2130 }
2131 }
2132
2133 return;
2134}
2135
2136void
2137lpfc_disc_flush_list(struct lpfc_hba * phba)
2138{
2139 struct lpfc_nodelist *ndlp, *next_ndlp;
2140
2141 if (phba->fc_plogi_cnt) {
2142 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
2143 nlp_listp) {
2144 lpfc_free_tx(phba, ndlp);
2145 lpfc_nlp_remove(phba, ndlp);
2146 }
2147 }
2148 if (phba->fc_adisc_cnt) {
2149 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
2150 nlp_listp) {
2151 lpfc_free_tx(phba, ndlp);
2152 lpfc_nlp_remove(phba, ndlp);
2153 }
2154 }
2155 return;
2156}
2157
2158/*****************************************************************************/
2159/*
2160 * NAME: lpfc_disc_timeout
2161 *
2162 * FUNCTION: Fibre Channel driver discovery timeout routine.
2163 *
2164 * EXECUTION ENVIRONMENT: interrupt only
2165 *
2166 * CALLED FROM:
2167 * Timer function
2168 *
2169 * RETURNS:
2170 * none
2171 */
2172/*****************************************************************************/
2173void
2174lpfc_disc_timeout(unsigned long ptr)
2175{
2176 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2177 unsigned long flags = 0;
2178
2179 if (unlikely(!phba))
2180 return;
2181
2182 spin_lock_irqsave(phba->host->host_lock, flags);
2183 if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
2184 phba->work_hba_events |= WORKER_DISC_TMO;
2185 if (phba->work_wait)
2186 wake_up(phba->work_wait);
2187 }
2188 spin_unlock_irqrestore(phba->host->host_lock, flags);
2189 return;
2190}
2191
2192static void
2193lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2194{
2195 struct lpfc_sli *psli;
Jamie Wellnitzc9f87352006-02-28 19:25:23 -05002196 struct lpfc_nodelist *ndlp, *next_ndlp;
dea31012005-04-17 16:05:31 -05002197 LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
2198 int rc, clrlaerr = 0;
2199
2200 if (unlikely(!phba))
2201 return;
2202
2203 if (!(phba->fc_flag & FC_DISC_TMO))
2204 return;
2205
2206 psli = &phba->sli;
2207
2208 spin_lock_irq(phba->host->host_lock);
2209 phba->fc_flag &= ~FC_DISC_TMO;
2210 spin_unlock_irq(phba->host->host_lock);
2211
2212 switch (phba->hba_state) {
2213
2214 case LPFC_LOCAL_CFG_LINK:
2215 /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2216 /* FAN timeout */
2217 lpfc_printf_log(phba,
2218 KERN_WARNING,
2219 LOG_DISCOVERY,
2220 "%d:0221 FAN timeout\n",
2221 phba->brd_no);
2222
Jamie Wellnitzc9f87352006-02-28 19:25:23 -05002223 /* Start discovery by sending FLOGI, clean up old rpis */
2224 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2225 nlp_listp) {
2226 if (ndlp->nlp_type & NLP_FABRIC) {
2227 /* Clean up the ndlp on Fabric connections */
2228 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05002229 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
Jamie Wellnitzc9f87352006-02-28 19:25:23 -05002230 /* Fail outstanding IO now since device
2231 * is marked for PLOGI.
2232 */
2233 lpfc_unreg_rpi(phba, ndlp);
2234 }
2235 }
dea31012005-04-17 16:05:31 -05002236 phba->hba_state = LPFC_FLOGI;
2237 lpfc_set_disctmo(phba);
2238 lpfc_initial_flogi(phba);
2239 break;
2240
2241 case LPFC_FLOGI:
2242 /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2243 /* Initial FLOGI timeout */
2244 lpfc_printf_log(phba,
2245 KERN_ERR,
2246 LOG_DISCOVERY,
2247 "%d:0222 Initial FLOGI timeout\n",
2248 phba->brd_no);
2249
2250 /* Assume no Fabric and go on with discovery.
2251 * Check for outstanding ELS FLOGI to abort.
2252 */
2253
2254 /* FLOGI failed, so just use loop map to make discovery list */
2255 lpfc_disc_list_loopmap(phba);
2256
2257 /* Start discovery */
2258 lpfc_disc_start(phba);
2259 break;
2260
2261 case LPFC_FABRIC_CFG_LINK:
2262 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2263 NameServer login */
2264 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2265 "%d:0223 Timeout while waiting for NameServer "
2266 "login\n", phba->brd_no);
2267
2268 /* Next look for NameServer ndlp */
2269 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
2270 if (ndlp)
2271 lpfc_nlp_remove(phba, ndlp);
2272 /* Start discovery */
2273 lpfc_disc_start(phba);
2274 break;
2275
2276 case LPFC_NS_QRY:
2277 /* Check for wait for NameServer Rsp timeout */
2278 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2279 "%d:0224 NameServer Query timeout "
2280 "Data: x%x x%x\n",
2281 phba->brd_no,
2282 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2283
2284 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
2285 NameServer_DID);
2286 if (ndlp) {
2287 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2288 /* Try it one more time */
2289 rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
2290 if (rc == 0)
2291 break;
2292 }
2293 phba->fc_ns_retry = 0;
2294 }
2295
2296 /* Nothing to authenticate, so CLEAR_LA right now */
2297 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2298 if (!clearlambox) {
2299 clrlaerr = 1;
2300 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2301 "%d:0226 Device Discovery "
2302 "completion error\n",
2303 phba->brd_no);
2304 phba->hba_state = LPFC_HBA_ERROR;
2305 break;
2306 }
2307
2308 phba->hba_state = LPFC_CLEAR_LA;
2309 lpfc_clear_la(phba, clearlambox);
2310 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2311 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2312 (MBX_NOWAIT | MBX_STOP_IOCB));
2313 if (rc == MBX_NOT_FINISHED) {
2314 mempool_free(clearlambox, phba->mbox_mem_pool);
2315 clrlaerr = 1;
2316 break;
2317 }
2318
2319 /* Setup and issue mailbox INITIALIZE LINK command */
2320 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2321 if (!initlinkmbox) {
2322 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2323 "%d:0226 Device Discovery "
2324 "completion error\n",
2325 phba->brd_no);
2326 phba->hba_state = LPFC_HBA_ERROR;
2327 break;
2328 }
2329
2330 lpfc_linkdown(phba);
2331 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2332 phba->cfg_link_speed);
2333 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2334 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2335 (MBX_NOWAIT | MBX_STOP_IOCB));
2336 if (rc == MBX_NOT_FINISHED)
2337 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2338
2339 break;
2340
2341 case LPFC_DISC_AUTH:
2342 /* Node Authentication timeout */
2343 lpfc_printf_log(phba,
2344 KERN_ERR,
2345 LOG_DISCOVERY,
2346 "%d:0227 Node Authentication timeout\n",
2347 phba->brd_no);
2348 lpfc_disc_flush_list(phba);
2349 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2350 if (!clearlambox) {
2351 clrlaerr = 1;
2352 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2353 "%d:0226 Device Discovery "
2354 "completion error\n",
2355 phba->brd_no);
2356 phba->hba_state = LPFC_HBA_ERROR;
2357 break;
2358 }
2359 phba->hba_state = LPFC_CLEAR_LA;
2360 lpfc_clear_la(phba, clearlambox);
2361 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2362 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2363 (MBX_NOWAIT | MBX_STOP_IOCB));
2364 if (rc == MBX_NOT_FINISHED) {
2365 mempool_free(clearlambox, phba->mbox_mem_pool);
2366 clrlaerr = 1;
2367 }
2368 break;
2369
2370 case LPFC_CLEAR_LA:
2371 /* CLEAR LA timeout */
2372 lpfc_printf_log(phba,
2373 KERN_ERR,
2374 LOG_DISCOVERY,
2375 "%d:0228 CLEAR LA timeout\n",
2376 phba->brd_no);
2377 clrlaerr = 1;
2378 break;
2379
2380 case LPFC_HBA_READY:
2381 if (phba->fc_flag & FC_RSCN_MODE) {
2382 lpfc_printf_log(phba,
2383 KERN_ERR,
2384 LOG_DISCOVERY,
2385 "%d:0231 RSCN timeout Data: x%x x%x\n",
2386 phba->brd_no,
2387 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2388
2389 /* Cleanup any outstanding ELS commands */
2390 lpfc_els_flush_cmd(phba);
2391
2392 lpfc_els_flush_rscn(phba);
2393 lpfc_disc_flush_list(phba);
2394 }
2395 break;
2396 }
2397
2398 if (clrlaerr) {
2399 lpfc_disc_flush_list(phba);
2400 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2401 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2402 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2403 phba->hba_state = LPFC_HBA_READY;
2404 }
2405
2406 return;
2407}
2408
2409static void
2410lpfc_nodev_timeout(unsigned long ptr)
2411{
2412 struct lpfc_hba *phba;
2413 struct lpfc_nodelist *ndlp;
2414 unsigned long iflag;
2415 struct lpfc_work_evt *evtp;
2416
2417 ndlp = (struct lpfc_nodelist *)ptr;
2418 phba = ndlp->nlp_phba;
2419 evtp = &ndlp->nodev_timeout_evt;
2420 spin_lock_irqsave(phba->host->host_lock, iflag);
2421
2422 if (!list_empty(&evtp->evt_listp)) {
2423 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2424 return;
2425 }
2426 evtp->evt_arg1 = ndlp;
2427 evtp->evt = LPFC_EVT_NODEV_TMO;
2428 list_add_tail(&evtp->evt_listp, &phba->work_list);
2429 if (phba->work_wait)
2430 wake_up(phba->work_wait);
2431
2432 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2433 return;
2434}
2435
2436
2437/*
2438 * This routine handles processing a NameServer REG_LOGIN mailbox
2439 * command upon completion. It is setup in the LPFC_MBOXQ
2440 * as the completion routine when the command is
2441 * handed off to the SLI layer.
2442 */
2443void
2444lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2445{
2446 struct lpfc_sli *psli;
2447 MAILBOX_t *mb;
2448 struct lpfc_dmabuf *mp;
2449 struct lpfc_nodelist *ndlp;
2450
2451 psli = &phba->sli;
2452 mb = &pmb->mb;
2453
2454 ndlp = (struct lpfc_nodelist *) pmb->context2;
2455 mp = (struct lpfc_dmabuf *) (pmb->context1);
2456
2457 pmb->context1 = NULL;
2458
dea31012005-04-17 16:05:31 -05002459 ndlp->nlp_rpi = mb->un.varWords[0];
dea31012005-04-17 16:05:31 -05002460 ndlp->nlp_type |= NLP_FABRIC;
2461 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
2462 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
2463
2464 /* Start issuing Fabric-Device Management Interface (FDMI)
2465 * command to 0xfffffa (FDMI well known port)
2466 */
2467 if (phba->cfg_fdmi_on == 1) {
2468 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
2469 } else {
2470 /*
2471 * Delay issuing FDMI command if fdmi-on=2
2472 * (supporting RPA/hostnmae)
2473 */
2474 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
2475 }
2476
2477 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2478 kfree(mp);
2479 mempool_free( pmb, phba->mbox_mem_pool);
2480
2481 return;
2482}
2483
2484/*
James.Smart@Emulex.Com21568f52005-10-28 20:29:36 -04002485 * This routine looks up the ndlp lists
2486 * for the given RPI. If rpi found
dea31012005-04-17 16:05:31 -05002487 * it return the node list pointer
James.Smart@Emulex.Com21568f52005-10-28 20:29:36 -04002488 * else return NULL.
dea31012005-04-17 16:05:31 -05002489 */
2490struct lpfc_nodelist *
2491lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
2492{
James.Smart@Emulex.Com21568f52005-10-28 20:29:36 -04002493 struct lpfc_nodelist *ndlp;
2494 struct list_head * lists[]={&phba->fc_nlpunmap_list,
2495 &phba->fc_nlpmap_list,
2496 &phba->fc_plogi_list,
2497 &phba->fc_adisc_list,
2498 &phba->fc_reglogin_list};
2499 int i;
dea31012005-04-17 16:05:31 -05002500
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05002501 spin_lock_irq(phba->host->host_lock);
James.Smart@Emulex.Com21568f52005-10-28 20:29:36 -04002502 for (i = 0; i < ARRAY_SIZE(lists); i++ )
2503 list_for_each_entry(ndlp, lists[i], nlp_listp)
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05002504 if (ndlp->nlp_rpi == rpi) {
2505 spin_unlock_irq(phba->host->host_lock);
Jamie Wellnitz2fe165b2006-02-28 19:25:31 -05002506 return ndlp;
Jamie Wellnitz66a9ed62006-02-28 22:33:10 -05002507 }
2508 spin_unlock_irq(phba->host->host_lock);
James.Smart@Emulex.Com21568f52005-10-28 20:29:36 -04002509 return NULL;
dea31012005-04-17 16:05:31 -05002510}
2511
James Smart488d1462006-03-07 15:02:37 -05002512/*
2513 * This routine looks up the ndlp lists
2514 * for the given WWPN. If WWPN found
2515 * it return the node list pointer
2516 * else return NULL.
2517 */
2518struct lpfc_nodelist *
2519lpfc_findnode_wwpn(struct lpfc_hba * phba, uint32_t order,
2520 struct lpfc_name * wwpn)
2521{
2522 struct lpfc_nodelist *ndlp;
2523 struct list_head * lists[]={&phba->fc_nlpunmap_list,
2524 &phba->fc_nlpmap_list,
2525 &phba->fc_npr_list,
2526 &phba->fc_plogi_list,
2527 &phba->fc_adisc_list,
2528 &phba->fc_reglogin_list,
2529 &phba->fc_prli_list};
2530 uint32_t search[]={NLP_SEARCH_UNMAPPED,
2531 NLP_SEARCH_MAPPED,
2532 NLP_SEARCH_NPR,
2533 NLP_SEARCH_PLOGI,
2534 NLP_SEARCH_ADISC,
2535 NLP_SEARCH_REGLOGIN,
2536 NLP_SEARCH_PRLI};
2537 int i;
2538
2539 spin_lock_irq(phba->host->host_lock);
2540 for (i = 0; i < ARRAY_SIZE(lists); i++ ) {
2541 if (!(order & search[i]))
2542 continue;
2543 list_for_each_entry(ndlp, lists[i], nlp_listp) {
2544 if (memcmp(&ndlp->nlp_portname, wwpn,
2545 sizeof(struct lpfc_name)) == 0) {
2546 spin_unlock_irq(phba->host->host_lock);
2547 return ndlp;
2548 }
2549 }
2550 }
2551 spin_unlock_irq(phba->host->host_lock);
2552 return NULL;
2553}
2554
dea31012005-04-17 16:05:31 -05002555void
2556lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2557 uint32_t did)
2558{
2559 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2560 INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
2561 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2562 init_timer(&ndlp->nlp_tmofunc);
2563 ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
2564 ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
2565 init_timer(&ndlp->nlp_delayfunc);
2566 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2567 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2568 ndlp->nlp_DID = did;
2569 ndlp->nlp_phba = phba;
2570 ndlp->nlp_sid = NLP_NO_SID;
2571 return;
2572}