blob: c2d4ff57c5c3f0ab2381ef6b6a773cc1699b7efe [file] [log] [blame]
Bradley Grove26780d92013-08-23 10:35:45 -04001/*
2 * linux/drivers/scsi/esas2r/esas2r_int.c
3 * esas2r interrupt handling
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 */
8/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
9/*
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29 *
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 *
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42 */
43/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
44
45#include "esas2r.h"
46
47/* Local function prototypes */
48static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell);
49static void esas2r_get_outbound_responses(struct esas2r_adapter *a);
50static void esas2r_process_bus_reset(struct esas2r_adapter *a);
51
52/*
53 * Poll the adapter for interrupts and service them.
54 * This function handles both legacy interrupts and MSI.
55 */
56void esas2r_polled_interrupt(struct esas2r_adapter *a)
57{
58 u32 intstat;
59 u32 doorbell;
60
61 esas2r_disable_chip_interrupts(a);
62
63 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
64
65 if (intstat & MU_INTSTAT_POST_OUT) {
66 /* clear the interrupt */
67
68 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
69 MU_OLIS_INT);
70 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
71
72 esas2r_get_outbound_responses(a);
73 }
74
75 if (intstat & MU_INTSTAT_DRBL) {
76 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
77 if (doorbell != 0)
78 esas2r_doorbell_interrupt(a, doorbell);
79 }
80
81 esas2r_enable_chip_interrupts(a);
82
83 if (atomic_read(&a->disable_cnt) == 0)
84 esas2r_do_deferred_processes(a);
85}
86
87/*
88 * Legacy and MSI interrupt handlers. Note that the legacy interrupt handler
89 * schedules a TASKLET to process events, whereas the MSI handler just
90 * processes interrupt events directly.
91 */
92irqreturn_t esas2r_interrupt(int irq, void *dev_id)
93{
94 struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
95
96 if (!esas2r_adapter_interrupt_pending(a))
97 return IRQ_NONE;
98
99 esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING);
100 esas2r_schedule_tasklet(a);
101
102 return IRQ_HANDLED;
103}
104
105void esas2r_adapter_interrupt(struct esas2r_adapter *a)
106{
107 u32 doorbell;
108
109 if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) {
110 /* clear the interrupt */
111 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
112 MU_OLIS_INT);
113 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
114 esas2r_get_outbound_responses(a);
115 }
116
117 if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) {
118 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
119 if (doorbell != 0)
120 esas2r_doorbell_interrupt(a, doorbell);
121 }
122
123 a->int_mask = ESAS2R_INT_STS_MASK;
124
125 esas2r_enable_chip_interrupts(a);
126
127 if (likely(atomic_read(&a->disable_cnt) == 0))
128 esas2r_do_deferred_processes(a);
129}
130
131irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id)
132{
133 struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
134 u32 intstat;
135 u32 doorbell;
136
137 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
138
139 if (likely(intstat & MU_INTSTAT_POST_OUT)) {
140 /* clear the interrupt */
141
142 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
143 MU_OLIS_INT);
144 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
145
146 esas2r_get_outbound_responses(a);
147 }
148
149 if (unlikely(intstat & MU_INTSTAT_DRBL)) {
150 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
151 if (doorbell != 0)
152 esas2r_doorbell_interrupt(a, doorbell);
153 }
154
155 /*
156 * Work around a chip bug and force a new MSI to be sent if one is
157 * still pending.
158 */
159 esas2r_disable_chip_interrupts(a);
160 esas2r_enable_chip_interrupts(a);
161
162 if (likely(atomic_read(&a->disable_cnt) == 0))
163 esas2r_do_deferred_processes(a);
164
165 esas2r_do_tasklet_tasks(a);
166
167 return 1;
168}
169
170
171
172static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a,
173 struct esas2r_request *rq,
174 struct atto_vda_ob_rsp *rsp)
175{
176
177 /*
178 * For I/O requests, only copy the response if an error
179 * occurred and setup a callback to do error processing.
180 */
181 if (unlikely(rq->req_stat != RS_SUCCESS)) {
182 memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp));
183
184 if (rq->req_stat == RS_ABORTED) {
185 if (rq->timeout > RQ_MAX_TIMEOUT)
186 rq->req_stat = RS_TIMEOUT;
187 } else if (rq->req_stat == RS_SCSI_ERROR) {
188 u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat;
189
190 esas2r_trace("scsistatus: %x", scsistatus);
191
192 /* Any of these are a good result. */
193 if (scsistatus == SAM_STAT_GOOD || scsistatus ==
194 SAM_STAT_CONDITION_MET || scsistatus ==
195 SAM_STAT_INTERMEDIATE || scsistatus ==
196 SAM_STAT_INTERMEDIATE_CONDITION_MET) {
197 rq->req_stat = RS_SUCCESS;
198 rq->func_rsp.scsi_rsp.scsi_stat =
199 SAM_STAT_GOOD;
200 }
201 }
202 }
203}
204
205static void esas2r_get_outbound_responses(struct esas2r_adapter *a)
206{
207 struct atto_vda_ob_rsp *rsp;
208 u32 rspput_ptr;
209 u32 rspget_ptr;
210 struct esas2r_request *rq;
211 u32 handle;
212 unsigned long flags;
213
214 LIST_HEAD(comp_list);
215
216 esas2r_trace_enter();
217
218 spin_lock_irqsave(&a->queue_lock, flags);
219
220 /* Get the outbound limit and pointers */
221 rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR;
222 rspget_ptr = a->last_read;
223
224 esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr);
225
226 /* If we don't have anything to process, get out */
227 if (unlikely(rspget_ptr == rspput_ptr)) {
228 spin_unlock_irqrestore(&a->queue_lock, flags);
229 esas2r_trace_exit();
230 return;
231 }
232
233 /* Make sure the firmware is healthy */
234 if (unlikely(rspput_ptr >= a->list_size)) {
235 spin_unlock_irqrestore(&a->queue_lock, flags);
236 esas2r_bugon();
237 esas2r_local_reset_adapter(a);
238 esas2r_trace_exit();
239 return;
240 }
241
242 do {
243 rspget_ptr++;
244
245 if (rspget_ptr >= a->list_size)
246 rspget_ptr = 0;
247
248 rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr
249 + rspget_ptr;
250
251 handle = rsp->handle;
252
253 /* Verify the handle range */
254 if (unlikely(LOWORD(handle) == 0
255 || LOWORD(handle) > num_requests +
256 num_ae_requests + 1)) {
257 esas2r_bugon();
258 continue;
259 }
260
261 /* Get the request for this handle */
262 rq = a->req_table[LOWORD(handle)];
263
264 if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) {
265 esas2r_bugon();
266 continue;
267 }
268
269 list_del(&rq->req_list);
270
271 /* Get the completion status */
272 rq->req_stat = rsp->req_stat;
273
274 esas2r_trace("handle: %x", handle);
275 esas2r_trace("rq: %p", rq);
276 esas2r_trace("req_status: %x", rq->req_stat);
277
278 if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
279 esas2r_handle_outbound_rsp_err(a, rq, rsp);
280 } else {
281 /*
282 * Copy the outbound completion struct for non-I/O
283 * requests.
284 */
285 memcpy(&rq->func_rsp, &rsp->func_rsp,
286 sizeof(rsp->func_rsp));
287 }
288
289 /* Queue the request for completion. */
290 list_add_tail(&rq->comp_list, &comp_list);
291
292 } while (rspget_ptr != rspput_ptr);
293
294 a->last_read = rspget_ptr;
295 spin_unlock_irqrestore(&a->queue_lock, flags);
296
297 esas2r_comp_list_drain(a, &comp_list);
298 esas2r_trace_exit();
299}
300
301/*
302 * Perform all deferred processes for the adapter. Deferred
303 * processes can only be done while the current interrupt
304 * disable_cnt for the adapter is zero.
305 */
306void esas2r_do_deferred_processes(struct esas2r_adapter *a)
307{
308 int startreqs = 2;
309 struct esas2r_request *rq;
310 unsigned long flags;
311
312 /*
313 * startreqs is used to control starting requests
314 * that are on the deferred queue
315 * = 0 - do not start any requests
316 * = 1 - can start discovery requests
317 * = 2 - can start any request
318 */
319
320 if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING))
321 startreqs = 0;
322 else if (a->flags & AF_DISC_PENDING)
323 startreqs = 1;
324
325 atomic_inc(&a->disable_cnt);
326
327 /* Clear off the completed list to be processed later. */
328
329 if (esas2r_is_tasklet_pending(a)) {
330 esas2r_schedule_tasklet(a);
331
332 startreqs = 0;
333 }
334
335 /*
336 * If we can start requests then traverse the defer queue
337 * looking for requests to start or complete
338 */
339 if (startreqs && !list_empty(&a->defer_list)) {
340 LIST_HEAD(comp_list);
341 struct list_head *element, *next;
342
343 spin_lock_irqsave(&a->queue_lock, flags);
344
345 list_for_each_safe(element, next, &a->defer_list) {
346 rq = list_entry(element, struct esas2r_request,
347 req_list);
348
349 if (rq->req_stat != RS_PENDING) {
350 list_del(element);
351 list_add_tail(&rq->comp_list, &comp_list);
352 }
353 /*
354 * Process discovery and OS requests separately. We
355 * can't hold up discovery requests when discovery is
356 * pending. In general, there may be different sets of
357 * conditions for starting different types of requests.
358 */
359 else if (rq->req_type == RT_DISC_REQ) {
360 list_del(element);
361 esas2r_disc_local_start_request(a, rq);
362 } else if (startreqs == 2) {
363 list_del(element);
364 esas2r_local_start_request(a, rq);
365
366 /*
367 * Flashing could have been set by last local
368 * start
369 */
370 if (a->flags & AF_FLASHING)
371 break;
372 }
373 }
374
375 spin_unlock_irqrestore(&a->queue_lock, flags);
376 esas2r_comp_list_drain(a, &comp_list);
377 }
378
379 atomic_dec(&a->disable_cnt);
380}
381
382/*
383 * Process an adapter reset (or one that is about to happen)
384 * by making sure all outstanding requests are completed that
385 * haven't been already.
386 */
387void esas2r_process_adapter_reset(struct esas2r_adapter *a)
388{
389 struct esas2r_request *rq = &a->general_req;
390 unsigned long flags;
391 struct esas2r_disc_context *dc;
392
393 LIST_HEAD(comp_list);
394 struct list_head *element;
395
396 esas2r_trace_enter();
397
398 spin_lock_irqsave(&a->queue_lock, flags);
399
400 /* abort the active discovery, if any. */
401
402 if (rq->interrupt_cx) {
403 dc = (struct esas2r_disc_context *)rq->interrupt_cx;
404
405 dc->disc_evt = 0;
406
407 esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
408 }
409
410 /*
411 * just clear the interrupt callback for now. it will be dequeued if
412 * and when we find it on the active queue and we don't want the
413 * callback called. also set the dummy completion callback in case we
414 * were doing an I/O request.
415 */
416
417 rq->interrupt_cx = NULL;
418 rq->interrupt_cb = NULL;
419
420 rq->comp_cb = esas2r_dummy_complete;
421
422 /* Reset the read and write pointers */
423
424 *a->outbound_copy =
425 a->last_write =
426 a->last_read = a->list_size - 1;
427
428 esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
429
430 /* Kill all the requests on the active list */
431 list_for_each(element, &a->defer_list) {
432 rq = list_entry(element, struct esas2r_request, req_list);
433
434 if (rq->req_stat == RS_STARTED)
435 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
436 list_add_tail(&rq->comp_list, &comp_list);
437 }
438
439 spin_unlock_irqrestore(&a->queue_lock, flags);
440 esas2r_comp_list_drain(a, &comp_list);
441 esas2r_process_bus_reset(a);
442 esas2r_trace_exit();
443}
444
445static void esas2r_process_bus_reset(struct esas2r_adapter *a)
446{
447 struct esas2r_request *rq;
448 struct list_head *element;
449 unsigned long flags;
450
451 LIST_HEAD(comp_list);
452
453 esas2r_trace_enter();
454
455 esas2r_hdebug("reset detected");
456
457 spin_lock_irqsave(&a->queue_lock, flags);
458
459 /* kill all the requests on the deferred queue */
460 list_for_each(element, &a->defer_list) {
461 rq = list_entry(element, struct esas2r_request, req_list);
462 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
463 list_add_tail(&rq->comp_list, &comp_list);
464 }
465
466 spin_unlock_irqrestore(&a->queue_lock, flags);
467
468 esas2r_comp_list_drain(a, &comp_list);
469
470 if (atomic_read(&a->disable_cnt) == 0)
471 esas2r_do_deferred_processes(a);
472
473 esas2r_lock_clear_flags(&a->flags, AF_OS_RESET);
474
475 esas2r_trace_exit();
476}
477
478static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
479{
480
481 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED);
482 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
483 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
484 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
485 /*
486 * Make sure we don't get attempt more than 3 resets
487 * when the uptime between resets does not exceed one
488 * minute. This will stop any situation where there is
489 * really something wrong with the hardware. The way
490 * this works is that we start with uptime ticks at 0.
491 * Each time we do a reset, we add 20 seconds worth to
492 * the count. Each time a timer tick occurs, as long
493 * as a chip reset is not pending, we decrement the
494 * tick count. If the uptime ticks ever gets to 60
495 * seconds worth, we disable the adapter from that
496 * point forward. Three strikes, you're out.
497 */
498 if (!esas2r_is_adapter_present(a) || (a->chip_uptime >=
499 ESAS2R_CHP_UPTIME_MAX)) {
500 esas2r_hdebug("*** adapter disabled ***");
501
502 /*
503 * Ok, some kind of hard failure. Make sure we
504 * exit this loop with chip interrupts
505 * permanently disabled so we don't lock up the
506 * entire system. Also flag degraded mode to
507 * prevent the heartbeat from trying to recover.
508 */
509
510 esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
511 esas2r_lock_set_flags(&a->flags, AF_DISABLED);
512 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
513 esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
514
515 esas2r_disable_chip_interrupts(a);
516 a->int_mask = 0;
517 esas2r_process_adapter_reset(a);
518
519 esas2r_log(ESAS2R_LOG_CRIT,
520 "Adapter disabled because of hardware failure");
521 } else {
522 u32 flags =
523 esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED);
524
525 if (!(flags & AF_CHPRST_STARTED))
526 /*
527 * Only disable interrupts if this is
528 * the first reset attempt.
529 */
530 esas2r_disable_chip_interrupts(a);
531
532 if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) &&
533 !(flags & AF_CHPRST_STARTED)) {
534 /*
535 * Don't reset the chip on the first
536 * deferred power up attempt.
537 */
538 } else {
539 esas2r_hdebug("*** resetting chip ***");
540 esas2r_reset_chip(a);
541 }
542
543 /* Kick off the reinitialization */
544 a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
545 a->chip_init_time = jiffies_to_msecs(jiffies);
546 if (!(a->flags & AF_POWER_MGT)) {
547 esas2r_process_adapter_reset(a);
548
549 if (!(flags & AF_CHPRST_STARTED)) {
550 /* Remove devices now that I/O is cleaned up. */
551 a->prev_dev_cnt =
552 esas2r_targ_db_get_tgt_cnt(a);
553 esas2r_targ_db_remove_all(a, false);
554 }
555 }
556
557 a->int_mask = 0;
558 }
559}
560
561static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
562{
563 while (a->flags & AF_CHPRST_DETECTED) {
564 /*
565 * Balance the enable in esas2r_initadapter_hw.
566 * Esas2r_power_down already took care of it for power
567 * management.
568 */
569 if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags &
570 AF_POWER_MGT))
571 esas2r_disable_chip_interrupts(a);
572
573 /* Reinitialize the chip. */
574 esas2r_check_adapter(a);
575 esas2r_init_adapter_hw(a, 0);
576
577 if (a->flags & AF_CHPRST_NEEDED)
578 break;
579
580 if (a->flags & AF_POWER_MGT) {
581 /* Recovery from power management. */
582 if (a->flags & AF_FIRST_INIT) {
583 /* Chip reset during normal power up */
584 esas2r_log(ESAS2R_LOG_CRIT,
585 "The firmware was reset during a normal power-up sequence");
586 } else {
587 /* Deferred power up complete. */
588 esas2r_lock_clear_flags(&a->flags,
589 AF_POWER_MGT);
590 esas2r_send_reset_ae(a, true);
591 }
592 } else {
593 /* Recovery from online chip reset. */
594 if (a->flags & AF_FIRST_INIT) {
595 /* Chip reset during driver load */
596 } else {
597 /* Chip reset after driver load */
598 esas2r_send_reset_ae(a, false);
599 }
600
601 esas2r_log(ESAS2R_LOG_CRIT,
602 "Recovering from a chip reset while the chip was online");
603 }
604
605 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED);
606 esas2r_enable_chip_interrupts(a);
607
608 /*
609 * Clear this flag last! this indicates that the chip has been
610 * reset already during initialization.
611 */
612 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED);
613 }
614}
615
616
617/* Perform deferred tasks when chip interrupts are disabled */
618void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
619{
620 if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) {
621 if (a->flags & AF_CHPRST_NEEDED)
622 esas2r_chip_rst_needed_during_tasklet(a);
623
624 esas2r_handle_chip_rst_during_tasklet(a);
625 }
626
627 if (a->flags & AF_BUSRST_NEEDED) {
628 esas2r_hdebug("hard resetting bus");
629
630 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
631
632 if (a->flags & AF_FLASHING)
633 esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
634 else
635 esas2r_write_register_dword(a, MU_DOORBELL_IN,
636 DRBL_RESET_BUS);
637 }
638
639 if (a->flags & AF_BUSRST_DETECTED) {
640 esas2r_process_bus_reset(a);
641
642 esas2r_log_dev(ESAS2R_LOG_WARN,
643 &(a->host->shost_gendev),
644 "scsi_report_bus_reset() called");
645
646 scsi_report_bus_reset(a->host, 0);
647
648 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
649 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
650
651 esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
652 }
653
654 if (a->flags & AF_PORT_CHANGE) {
655 esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE);
656
657 esas2r_targ_db_report_changes(a);
658 }
659
660 if (atomic_read(&a->disable_cnt) == 0)
661 esas2r_do_deferred_processes(a);
662}
663
664static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
665{
666 if (!(doorbell & DRBL_FORCE_INT)) {
667 esas2r_trace_enter();
668 esas2r_trace("doorbell: %x", doorbell);
669 }
670
671 /* First clear the doorbell bits */
672 esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
673
674 if (doorbell & DRBL_RESET_BUS)
675 esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
676
677 if (doorbell & DRBL_FORCE_INT)
678 esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
679
680 if (doorbell & DRBL_PANIC_REASON_MASK) {
681 esas2r_hdebug("*** Firmware Panic ***");
682 esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked");
683 }
684
685 if (doorbell & DRBL_FW_RESET) {
686 esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL);
687 esas2r_local_reset_adapter(a);
688 }
689
690 if (!(doorbell & DRBL_FORCE_INT))
691 esas2r_trace_exit();
692}
693
694void esas2r_force_interrupt(struct esas2r_adapter *a)
695{
696 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT |
697 DRBL_DRV_VER);
698}
699
700
701static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae,
702 u16 target, u32 length)
703{
704 struct esas2r_target *t = a->targetdb + target;
705 u32 cplen = length;
706 unsigned long flags;
707
708 if (cplen > sizeof(t->lu_event))
709 cplen = sizeof(t->lu_event);
710
711 esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent);
712 esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate);
713
714 spin_lock_irqsave(&a->mem_lock, flags);
715
716 t->new_target_state = TS_INVALID;
717
718 if (ae->lu.dwevent & VDAAE_LU_LOST) {
719 t->new_target_state = TS_NOT_PRESENT;
720 } else {
721 switch (ae->lu.bystate) {
722 case VDAAE_LU_NOT_PRESENT:
723 case VDAAE_LU_OFFLINE:
724 case VDAAE_LU_DELETED:
725 case VDAAE_LU_FACTORY_DISABLED:
726 t->new_target_state = TS_NOT_PRESENT;
727 break;
728
729 case VDAAE_LU_ONLINE:
730 case VDAAE_LU_DEGRADED:
731 t->new_target_state = TS_PRESENT;
732 break;
733 }
734 }
735
736 if (t->new_target_state != TS_INVALID) {
737 memcpy(&t->lu_event, &ae->lu, cplen);
738
739 esas2r_disc_queue_event(a, DCDE_DEV_CHANGE);
740 }
741
742 spin_unlock_irqrestore(&a->mem_lock, flags);
743}
744
745
746
747void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
748{
749 union atto_vda_ae *ae =
750 (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data;
751 u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length);
752 union atto_vda_ae *last =
753 (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data
754 + length);
755
756 esas2r_trace_enter();
757 esas2r_trace("length: %d", length);
758
759 if (length > sizeof(struct atto_vda_ae_data)
760 || (length & 3) != 0
761 || length == 0) {
762 esas2r_log(ESAS2R_LOG_WARN,
763 "The AE request response length (%p) is too long: %d",
764 rq, length);
765
766 esas2r_hdebug("aereq->length (0x%x) too long", length);
767 esas2r_bugon();
768
769 last = ae;
770 }
771
772 while (ae < last) {
773 u16 target;
774
775 esas2r_trace("ae: %p", ae);
776 esas2r_trace("ae->hdr: %p", &(ae->hdr));
777
778 length = ae->hdr.bylength;
779
780 if (length > (u32)((u8 *)last - (u8 *)ae)
781 || (length & 3) != 0
782 || length == 0) {
783 esas2r_log(ESAS2R_LOG_CRIT,
784 "the async event length is invalid (%p): %d",
785 ae, length);
786
787 esas2r_hdebug("ae->hdr.length (0x%x) invalid", length);
788 esas2r_bugon();
789
790 break;
791 }
792
793 esas2r_nuxi_ae_data(ae);
794
795 esas2r_queue_fw_event(a, fw_event_vda_ae, ae,
796 sizeof(union atto_vda_ae));
797
798 switch (ae->hdr.bytype) {
799 case VDAAE_HDR_TYPE_RAID:
800
801 if (ae->raid.dwflags & (VDAAE_GROUP_STATE
802 | VDAAE_RBLD_STATE
803 | VDAAE_MEMBER_CHG
804 | VDAAE_PART_CHG)) {
805 esas2r_log(ESAS2R_LOG_INFO,
806 "RAID event received - name:%s rebuild_state:%d group_state:%d",
807 ae->raid.acname,
808 ae->raid.byrebuild_state,
809 ae->raid.bygroup_state);
810 }
811
812 break;
813
814 case VDAAE_HDR_TYPE_LU:
815 esas2r_log(ESAS2R_LOG_INFO,
816 "LUN event received: event:%d target_id:%d LUN:%d state:%d",
817 ae->lu.dwevent,
818 ae->lu.id.tgtlun.wtarget_id,
819 ae->lu.id.tgtlun.bylun,
820 ae->lu.bystate);
821
822 target = ae->lu.id.tgtlun.wtarget_id;
823
824 if (target < ESAS2R_MAX_TARGETS)
825 esas2r_lun_event(a, ae, target, length);
826
827 break;
828
829 case VDAAE_HDR_TYPE_DISK:
830 esas2r_log(ESAS2R_LOG_INFO, "Disk event received");
831 break;
832
833 default:
834
835 /* Silently ignore the rest and let the apps deal with
836 * them.
837 */
838
839 break;
840 }
841
842 ae = (union atto_vda_ae *)((u8 *)ae + length);
843 }
844
845 /* Now requeue it. */
846 esas2r_start_ae_request(a, rq);
847 esas2r_trace_exit();
848}
849
850/* Send an asynchronous event for a chip reset or power management. */
851void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt)
852{
853 struct atto_vda_ae_hdr ae;
854
855 if (pwr_mgt)
856 ae.bytype = VDAAE_HDR_TYPE_PWRMGT;
857 else
858 ae.bytype = VDAAE_HDR_TYPE_RESET;
859
860 ae.byversion = VDAAE_HDR_VER_0;
861 ae.byflags = 0;
862 ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr);
863
864 if (pwr_mgt)
865 esas2r_hdebug("*** sending power management AE ***");
866 else
867 esas2r_hdebug("*** sending reset AE ***");
868
869 esas2r_queue_fw_event(a, fw_event_vda_ae, &ae,
870 sizeof(union atto_vda_ae));
871}
872
873void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
874{}
875
876static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a,
877 struct esas2r_request *rq)
878{
879 u8 snslen, snslen2;
880
881 snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len;
882
883 if (snslen > rq->sense_len)
884 snslen = rq->sense_len;
885
886 if (snslen) {
887 if (rq->sense_buf)
888 memcpy(rq->sense_buf, rq->data_buf, snslen);
889 else
890 rq->sense_buf = (u8 *)rq->data_buf;
891
892 /* See about possible sense data */
893 if (snslen2 > 0x0c) {
894 u8 *s = (u8 *)rq->data_buf;
895
896 esas2r_trace_enter();
897
898 /* Report LUNS data has changed */
899 if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) {
900 esas2r_trace("rq->target_id: %d",
901 rq->target_id);
902 esas2r_target_state_changed(a, rq->target_id,
903 TS_LUN_CHANGE);
904 }
905
906 esas2r_trace("add_sense_key=%x", s[0x0c]);
907 esas2r_trace("add_sense_qual=%x", s[0x0d]);
908 esas2r_trace_exit();
909 }
910 }
911
912 rq->sense_len = snslen;
913}
914
915
916void esas2r_complete_request(struct esas2r_adapter *a,
917 struct esas2r_request *rq)
918{
919 if (rq->vrq->scsi.function == VDA_FUNC_FLASH
920 && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
921 esas2r_lock_clear_flags(&a->flags, AF_FLASHING);
922
923 /* See if we setup a callback to do special processing */
924
925 if (rq->interrupt_cb) {
926 (*rq->interrupt_cb)(a, rq);
927
928 if (rq->req_stat == RS_PENDING) {
929 esas2r_start_request(a, rq);
930 return;
931 }
932 }
933
934 if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)
935 && unlikely(rq->req_stat != RS_SUCCESS)) {
936 esas2r_check_req_rsp_sense(a, rq);
937 esas2r_log_request_failure(a, rq);
938 }
939
940 (*rq->comp_cb)(a, rq);
941}