blob: 31838d9ad39bf105985256e8789d46ee64dc752e [file] [log] [blame]
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
Sreekanth Reddya4ffce02014-09-12 15:35:29 +05306 * Copyright (C) 2012-2014 LSI Corporation
Sreekanth Reddya03bd152015-01-12 11:39:02 +05307 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05309 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * NO WARRANTY
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
30
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
43 * USA.
44 */
45
Sreekanth Reddyf92363d2012-11-30 07:44:21 +053046#include <linux/kernel.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/slab.h>
51#include <linux/types.h>
52#include <linux/pci.h>
53#include <linux/kdev_t.h>
54#include <linux/blkdev.h>
55#include <linux/delay.h>
56#include <linux/interrupt.h>
57#include <linux/dma-mapping.h>
58#include <linux/io.h>
59#include <linux/time.h>
60#include <linux/kthread.h>
61#include <linux/aer.h>
62
63
64#include "mpt3sas_base.h"
65
66static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
67
68
69#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
70
71 /* maximum controller queue depth */
72#define MAX_HBA_QUEUE_DEPTH 30000
73#define MAX_CHAIN_DEPTH 100000
74static int max_queue_depth = -1;
75module_param(max_queue_depth, int, 0);
76MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
77
78static int max_sgl_entries = -1;
79module_param(max_sgl_entries, int, 0);
80MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
81
82static int msix_disable = -1;
83module_param(msix_disable, int, 0);
84MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
85
Sreekanth Reddyfb77bb52015-06-30 12:24:47 +053086static int max_msix_vectors = -1;
Sreekanth Reddy9c500062013-08-14 18:23:20 +053087module_param(max_msix_vectors, int, 0);
88MODULE_PARM_DESC(max_msix_vectors,
Sreekanth Reddyfb77bb52015-06-30 12:24:47 +053089 " max msix vectors");
Sreekanth Reddyf92363d2012-11-30 07:44:21 +053090
91static int mpt3sas_fwfault_debug;
92MODULE_PARM_DESC(mpt3sas_fwfault_debug,
93 " enable detection of firmware fault and halt firmware - (default=0)");
94
Sreekanth Reddy9b05c912014-09-12 15:35:31 +053095static int
96_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +053097
98/**
99 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
100 *
101 */
102static int
103_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
104{
105 int ret = param_set_int(val, kp);
106 struct MPT3SAS_ADAPTER *ioc;
107
108 if (ret)
109 return ret;
110
Sreekanth Reddy08c4d552015-11-11 17:30:33 +0530111 /* global ioc spinlock to protect controller list on list operations */
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530112 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
Sreekanth Reddy08c4d552015-11-11 17:30:33 +0530113 spin_lock(&gioc_lock);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530114 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
115 ioc->fwfault_debug = mpt3sas_fwfault_debug;
Sreekanth Reddy08c4d552015-11-11 17:30:33 +0530116 spin_unlock(&gioc_lock);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530117 return 0;
118}
119module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
120 param_get_int, &mpt3sas_fwfault_debug, 0644);
121
122/**
123 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
124 * @arg: input argument, used to derive ioc
125 *
126 * Return 0 if controller is removed from pci subsystem.
127 * Return -1 for other case.
128 */
129static int mpt3sas_remove_dead_ioc_func(void *arg)
130{
131 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
132 struct pci_dev *pdev;
133
134 if ((ioc == NULL))
135 return -1;
136
137 pdev = ioc->pdev;
138 if ((pdev == NULL))
139 return -1;
Rafael J. Wysocki64cdb412014-01-10 15:27:56 +0100140 pci_stop_and_remove_bus_device_locked(pdev);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530141 return 0;
142}
143
144/**
145 * _base_fault_reset_work - workq handling ioc fault conditions
146 * @work: input argument, used to derive ioc
147 * Context: sleep.
148 *
149 * Return nothing.
150 */
151static void
152_base_fault_reset_work(struct work_struct *work)
153{
154 struct MPT3SAS_ADAPTER *ioc =
155 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
156 unsigned long flags;
157 u32 doorbell;
158 int rc;
159 struct task_struct *p;
160
161
162 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
Sreekanth Reddy16e179b2015-11-11 17:30:27 +0530163 if (ioc->shost_recovery || ioc->pci_error_recovery)
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530164 goto rearm_timer;
165 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
166
167 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
168 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
169 pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
170 ioc->name);
171
Sreekanth Reddy16e179b2015-11-11 17:30:27 +0530172 /* It may be possible that EEH recovery can resolve some of
173 * pci bus failure issues rather removing the dead ioc function
174 * by considering controller is in a non-operational state. So
175 * here priority is given to the EEH recovery. If it doesn't
176 * not resolve this issue, mpt3sas driver will consider this
177 * controller to non-operational state and remove the dead ioc
178 * function.
179 */
180 if (ioc->non_operational_loop++ < 5) {
181 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
182 flags);
183 goto rearm_timer;
184 }
185
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530186 /*
187 * Call _scsih_flush_pending_cmds callback so that we flush all
188 * pending commands back to OS. This call is required to aovid
189 * deadlock at block layer. Dead IOC will fail to do diag reset,
190 * and this call is safe since dead ioc will never return any
191 * command back from HW.
192 */
193 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
194 /*
195 * Set remove_host flag early since kernel thread will
196 * take some time to execute.
197 */
198 ioc->remove_host = 1;
199 /*Remove the Dead Host */
200 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
Sreekanth Reddyc84b06a2015-11-11 17:30:35 +0530201 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530202 if (IS_ERR(p))
203 pr_err(MPT3SAS_FMT
204 "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
205 ioc->name, __func__);
206 else
207 pr_err(MPT3SAS_FMT
208 "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
209 ioc->name, __func__);
210 return; /* don't rearm timer */
211 }
212
Sreekanth Reddy16e179b2015-11-11 17:30:27 +0530213 ioc->non_operational_loop = 0;
214
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530215 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
216 rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
217 FORCE_BIG_HAMMER);
218 pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
219 __func__, (rc == 0) ? "success" : "failed");
220 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
221 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
222 mpt3sas_base_fault_info(ioc, doorbell &
223 MPI2_DOORBELL_DATA_MASK);
224 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
225 MPI2_IOC_STATE_OPERATIONAL)
226 return; /* don't rearm timer */
227 }
228
229 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
230 rearm_timer:
231 if (ioc->fault_reset_work_q)
232 queue_delayed_work(ioc->fault_reset_work_q,
233 &ioc->fault_reset_work,
234 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
235 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
236}
237
238/**
239 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
240 * @ioc: per adapter object
241 * Context: sleep.
242 *
243 * Return nothing.
244 */
245void
246mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
247{
248 unsigned long flags;
249
250 if (ioc->fault_reset_work_q)
251 return;
252
253 /* initialize fault polling */
254
255 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
256 snprintf(ioc->fault_reset_work_q_name,
Sreekanth Reddyc84b06a2015-11-11 17:30:35 +0530257 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
258 ioc->driver_name, ioc->id);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530259 ioc->fault_reset_work_q =
260 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
261 if (!ioc->fault_reset_work_q) {
262 pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
263 ioc->name, __func__, __LINE__);
264 return;
265 }
266 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
267 if (ioc->fault_reset_work_q)
268 queue_delayed_work(ioc->fault_reset_work_q,
269 &ioc->fault_reset_work,
270 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
271 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
272}
273
274/**
275 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
276 * @ioc: per adapter object
277 * Context: sleep.
278 *
279 * Return nothing.
280 */
281void
282mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
283{
284 unsigned long flags;
285 struct workqueue_struct *wq;
286
287 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
288 wq = ioc->fault_reset_work_q;
289 ioc->fault_reset_work_q = NULL;
290 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
291 if (wq) {
Reddy, Sreekanth4dc06fd2014-07-14 12:01:35 +0530292 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530293 flush_workqueue(wq);
294 destroy_workqueue(wq);
295 }
296}
297
298/**
299 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
300 * @ioc: per adapter object
301 * @fault_code: fault code
302 *
303 * Return nothing.
304 */
305void
306mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
307{
308 pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
309 ioc->name, fault_code);
310}
311
312/**
313 * mpt3sas_halt_firmware - halt's mpt controller firmware
314 * @ioc: per adapter object
315 *
316 * For debugging timeout related issues. Writing 0xCOFFEE00
317 * to the doorbell register will halt controller firmware. With
318 * the purpose to stop both driver and firmware, the enduser can
319 * obtain a ring buffer from controller UART.
320 */
321void
322mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
323{
324 u32 doorbell;
325
326 if (!ioc->fwfault_debug)
327 return;
328
329 dump_stack();
330
331 doorbell = readl(&ioc->chip->Doorbell);
332 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
333 mpt3sas_base_fault_info(ioc , doorbell);
334 else {
335 writel(0xC0FFEE00, &ioc->chip->Doorbell);
336 pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
337 ioc->name);
338 }
339
340 if (ioc->fwfault_debug == 2)
341 for (;;)
342 ;
343 else
344 panic("panic in %s\n", __func__);
345}
346
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530347/**
348 * _base_sas_ioc_info - verbose translation of the ioc status
349 * @ioc: per adapter object
350 * @mpi_reply: reply mf payload returned from firmware
351 * @request_hdr: request mf
352 *
353 * Return nothing.
354 */
355static void
356_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
357 MPI2RequestHeader_t *request_hdr)
358{
359 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
360 MPI2_IOCSTATUS_MASK;
361 char *desc = NULL;
362 u16 frame_sz;
363 char *func_str = NULL;
364
365 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
366 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
367 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
368 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
369 return;
370
371 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
372 return;
373
374 switch (ioc_status) {
375
376/****************************************************************************
377* Common IOCStatus values for all replies
378****************************************************************************/
379
380 case MPI2_IOCSTATUS_INVALID_FUNCTION:
381 desc = "invalid function";
382 break;
383 case MPI2_IOCSTATUS_BUSY:
384 desc = "busy";
385 break;
386 case MPI2_IOCSTATUS_INVALID_SGL:
387 desc = "invalid sgl";
388 break;
389 case MPI2_IOCSTATUS_INTERNAL_ERROR:
390 desc = "internal error";
391 break;
392 case MPI2_IOCSTATUS_INVALID_VPID:
393 desc = "invalid vpid";
394 break;
395 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
396 desc = "insufficient resources";
397 break;
Suganath prabu Subramanib130b0d2016-01-28 12:06:58 +0530398 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
399 desc = "insufficient power";
400 break;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530401 case MPI2_IOCSTATUS_INVALID_FIELD:
402 desc = "invalid field";
403 break;
404 case MPI2_IOCSTATUS_INVALID_STATE:
405 desc = "invalid state";
406 break;
407 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
408 desc = "op state not supported";
409 break;
410
411/****************************************************************************
412* Config IOCStatus values
413****************************************************************************/
414
415 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
416 desc = "config invalid action";
417 break;
418 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
419 desc = "config invalid type";
420 break;
421 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
422 desc = "config invalid page";
423 break;
424 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
425 desc = "config invalid data";
426 break;
427 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
428 desc = "config no defaults";
429 break;
430 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
431 desc = "config cant commit";
432 break;
433
434/****************************************************************************
435* SCSI IO Reply
436****************************************************************************/
437
438 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
439 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
440 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
441 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
442 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
443 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
444 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
445 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
446 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
447 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
448 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
449 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
450 break;
451
452/****************************************************************************
453* For use by SCSI Initiator and SCSI Target end-to-end data protection
454****************************************************************************/
455
456 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
457 desc = "eedp guard error";
458 break;
459 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
460 desc = "eedp ref tag error";
461 break;
462 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
463 desc = "eedp app tag error";
464 break;
465
466/****************************************************************************
467* SCSI Target values
468****************************************************************************/
469
470 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
471 desc = "target invalid io index";
472 break;
473 case MPI2_IOCSTATUS_TARGET_ABORTED:
474 desc = "target aborted";
475 break;
476 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
477 desc = "target no conn retryable";
478 break;
479 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
480 desc = "target no connection";
481 break;
482 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
483 desc = "target xfer count mismatch";
484 break;
485 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
486 desc = "target data offset error";
487 break;
488 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
489 desc = "target too much write data";
490 break;
491 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
492 desc = "target iu too short";
493 break;
494 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
495 desc = "target ack nak timeout";
496 break;
497 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
498 desc = "target nak received";
499 break;
500
501/****************************************************************************
502* Serial Attached SCSI values
503****************************************************************************/
504
505 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
506 desc = "smp request failed";
507 break;
508 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
509 desc = "smp data overrun";
510 break;
511
512/****************************************************************************
513* Diagnostic Buffer Post / Diagnostic Release values
514****************************************************************************/
515
516 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
517 desc = "diagnostic released";
518 break;
519 default:
520 break;
521 }
522
523 if (!desc)
524 return;
525
526 switch (request_hdr->Function) {
527 case MPI2_FUNCTION_CONFIG:
528 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
529 func_str = "config_page";
530 break;
531 case MPI2_FUNCTION_SCSI_TASK_MGMT:
532 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
533 func_str = "task_mgmt";
534 break;
535 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
536 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
537 func_str = "sas_iounit_ctl";
538 break;
539 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
540 frame_sz = sizeof(Mpi2SepRequest_t);
541 func_str = "enclosure";
542 break;
543 case MPI2_FUNCTION_IOC_INIT:
544 frame_sz = sizeof(Mpi2IOCInitRequest_t);
545 func_str = "ioc_init";
546 break;
547 case MPI2_FUNCTION_PORT_ENABLE:
548 frame_sz = sizeof(Mpi2PortEnableRequest_t);
549 func_str = "port_enable";
550 break;
551 case MPI2_FUNCTION_SMP_PASSTHROUGH:
552 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
553 func_str = "smp_passthru";
554 break;
555 default:
556 frame_sz = 32;
557 func_str = "unknown";
558 break;
559 }
560
561 pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
562 ioc->name, desc, ioc_status, request_hdr, func_str);
563
564 _debug_dump_mf(request_hdr, frame_sz/4);
565}
566
567/**
568 * _base_display_event_data - verbose translation of firmware asyn events
569 * @ioc: per adapter object
570 * @mpi_reply: reply mf payload returned from firmware
571 *
572 * Return nothing.
573 */
574static void
575_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
576 Mpi2EventNotificationReply_t *mpi_reply)
577{
578 char *desc = NULL;
579 u16 event;
580
581 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
582 return;
583
584 event = le16_to_cpu(mpi_reply->Event);
585
586 switch (event) {
587 case MPI2_EVENT_LOG_DATA:
588 desc = "Log Data";
589 break;
590 case MPI2_EVENT_STATE_CHANGE:
591 desc = "Status Change";
592 break;
593 case MPI2_EVENT_HARD_RESET_RECEIVED:
594 desc = "Hard Reset Received";
595 break;
596 case MPI2_EVENT_EVENT_CHANGE:
597 desc = "Event Change";
598 break;
599 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
600 desc = "Device Status Change";
601 break;
602 case MPI2_EVENT_IR_OPERATION_STATUS:
Sreekanth Reddy7786ab62015-11-11 17:30:28 +0530603 if (!ioc->hide_ir_msg)
604 desc = "IR Operation Status";
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530605 break;
606 case MPI2_EVENT_SAS_DISCOVERY:
607 {
608 Mpi2EventDataSasDiscovery_t *event_data =
609 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
610 pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
611 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
612 "start" : "stop");
613 if (event_data->DiscoveryStatus)
614 pr_info("discovery_status(0x%08x)",
615 le32_to_cpu(event_data->DiscoveryStatus));
616 pr_info("\n");
617 return;
618 }
619 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
620 desc = "SAS Broadcast Primitive";
621 break;
622 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
623 desc = "SAS Init Device Status Change";
624 break;
625 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
626 desc = "SAS Init Table Overflow";
627 break;
628 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
629 desc = "SAS Topology Change List";
630 break;
631 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
632 desc = "SAS Enclosure Device Status Change";
633 break;
634 case MPI2_EVENT_IR_VOLUME:
Sreekanth Reddy7786ab62015-11-11 17:30:28 +0530635 if (!ioc->hide_ir_msg)
636 desc = "IR Volume";
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530637 break;
638 case MPI2_EVENT_IR_PHYSICAL_DISK:
Sreekanth Reddy7786ab62015-11-11 17:30:28 +0530639 if (!ioc->hide_ir_msg)
640 desc = "IR Physical Disk";
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530641 break;
642 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
Sreekanth Reddy7786ab62015-11-11 17:30:28 +0530643 if (!ioc->hide_ir_msg)
644 desc = "IR Configuration Change List";
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530645 break;
646 case MPI2_EVENT_LOG_ENTRY_ADDED:
Sreekanth Reddy7786ab62015-11-11 17:30:28 +0530647 if (!ioc->hide_ir_msg)
648 desc = "Log Entry Added";
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530649 break;
Sreekanth Reddy2d8ce8c2015-01-12 11:38:56 +0530650 case MPI2_EVENT_TEMP_THRESHOLD:
651 desc = "Temperature Threshold";
652 break;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530653 }
654
655 if (!desc)
656 return;
657
658 pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
659}
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530660
661/**
662 * _base_sas_log_info - verbose translation of firmware log info
663 * @ioc: per adapter object
664 * @log_info: log info
665 *
666 * Return nothing.
667 */
668static void
669_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
670{
671 union loginfo_type {
672 u32 loginfo;
673 struct {
674 u32 subcode:16;
675 u32 code:8;
676 u32 originator:4;
677 u32 bus_type:4;
678 } dw;
679 };
680 union loginfo_type sas_loginfo;
681 char *originator_str = NULL;
682
683 sas_loginfo.loginfo = log_info;
684 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
685 return;
686
687 /* each nexus loss loginfo */
688 if (log_info == 0x31170000)
689 return;
690
691 /* eat the loginfos associated with task aborts */
692 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
693 0x31140000 || log_info == 0x31130000))
694 return;
695
696 switch (sas_loginfo.dw.originator) {
697 case 0:
698 originator_str = "IOP";
699 break;
700 case 1:
701 originator_str = "PL";
702 break;
703 case 2:
Sreekanth Reddy7786ab62015-11-11 17:30:28 +0530704 if (!ioc->hide_ir_msg)
705 originator_str = "IR";
706 else
707 originator_str = "WarpDrive";
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530708 break;
709 }
710
711 pr_warn(MPT3SAS_FMT
712 "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
713 ioc->name, log_info,
714 originator_str, sas_loginfo.dw.code,
715 sas_loginfo.dw.subcode);
716}
717
718/**
719 * _base_display_reply_info -
720 * @ioc: per adapter object
721 * @smid: system request message index
722 * @msix_index: MSIX table index supplied by the OS
723 * @reply: reply message frame(lower 32bit addr)
724 *
725 * Return nothing.
726 */
727static void
728_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
729 u32 reply)
730{
731 MPI2DefaultReply_t *mpi_reply;
732 u16 ioc_status;
733 u32 loginfo = 0;
734
735 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
736 if (unlikely(!mpi_reply)) {
737 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
738 ioc->name, __FILE__, __LINE__, __func__);
739 return;
740 }
741 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
Sreekanth Reddyaf009412015-11-11 17:30:23 +0530742
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530743 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
744 (ioc->logging_level & MPT_DEBUG_REPLY)) {
745 _base_sas_ioc_info(ioc , mpi_reply,
746 mpt3sas_base_get_msg_frame(ioc, smid));
747 }
Sreekanth Reddyaf009412015-11-11 17:30:23 +0530748
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530749 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
750 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
751 _base_sas_log_info(ioc, loginfo);
752 }
753
754 if (ioc_status || loginfo) {
755 ioc_status &= MPI2_IOCSTATUS_MASK;
756 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
757 }
758}
759
760/**
761 * mpt3sas_base_done - base internal command completion routine
762 * @ioc: per adapter object
763 * @smid: system request message index
764 * @msix_index: MSIX table index supplied by the OS
765 * @reply: reply message frame(lower 32bit addr)
766 *
767 * Return 1 meaning mf should be freed from _base_interrupt
768 * 0 means the mf is freed from this function.
769 */
770u8
771mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
772 u32 reply)
773{
774 MPI2DefaultReply_t *mpi_reply;
775
776 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
777 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +0530778 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530779
780 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
781 return 1;
782
783 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
784 if (mpi_reply) {
785 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
786 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
787 }
788 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
789
790 complete(&ioc->base_cmds.done);
791 return 1;
792}
793
794/**
795 * _base_async_event - main callback handler for firmware asyn events
796 * @ioc: per adapter object
797 * @msix_index: MSIX table index supplied by the OS
798 * @reply: reply message frame(lower 32bit addr)
799 *
800 * Return 1 meaning mf should be freed from _base_interrupt
801 * 0 means the mf is freed from this function.
802 */
803static u8
804_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
805{
806 Mpi2EventNotificationReply_t *mpi_reply;
807 Mpi2EventAckRequest_t *ack_request;
808 u16 smid;
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +0530809 struct _event_ack_list *delayed_event_ack;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530810
811 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
812 if (!mpi_reply)
813 return 1;
814 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
815 return 1;
Sreekanth Reddyaf009412015-11-11 17:30:23 +0530816
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530817 _base_display_event_data(ioc, mpi_reply);
Sreekanth Reddyaf009412015-11-11 17:30:23 +0530818
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530819 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
820 goto out;
821 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
822 if (!smid) {
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +0530823 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
824 GFP_ATOMIC);
825 if (!delayed_event_ack)
826 goto out;
827 INIT_LIST_HEAD(&delayed_event_ack->list);
828 delayed_event_ack->Event = mpi_reply->Event;
829 delayed_event_ack->EventContext = mpi_reply->EventContext;
830 list_add_tail(&delayed_event_ack->list,
831 &ioc->delayed_event_ack_list);
832 dewtprintk(ioc, pr_info(MPT3SAS_FMT
833 "DELAYED: EVENT ACK: event (0x%04x)\n",
834 ioc->name, le16_to_cpu(mpi_reply->Event)));
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530835 goto out;
836 }
837
838 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
839 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
840 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
841 ack_request->Event = mpi_reply->Event;
842 ack_request->EventContext = mpi_reply->EventContext;
843 ack_request->VF_ID = 0; /* TODO */
844 ack_request->VP_ID = 0;
845 mpt3sas_base_put_smid_default(ioc, smid);
846
847 out:
848
849 /* scsih callback handler */
850 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
851
852 /* ctl callback handler */
853 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
854
855 return 1;
856}
857
858/**
859 * _base_get_cb_idx - obtain the callback index
860 * @ioc: per adapter object
861 * @smid: system request message index
862 *
863 * Return callback index.
864 */
865static u8
866_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
867{
868 int i;
869 u8 cb_idx;
870
871 if (smid < ioc->hi_priority_smid) {
872 i = smid - 1;
873 cb_idx = ioc->scsi_lookup[i].cb_idx;
874 } else if (smid < ioc->internal_smid) {
875 i = smid - ioc->hi_priority_smid;
876 cb_idx = ioc->hpr_lookup[i].cb_idx;
877 } else if (smid <= ioc->hba_queue_depth) {
878 i = smid - ioc->internal_smid;
879 cb_idx = ioc->internal_lookup[i].cb_idx;
880 } else
881 cb_idx = 0xFF;
882 return cb_idx;
883}
884
885/**
886 * _base_mask_interrupts - disable interrupts
887 * @ioc: per adapter object
888 *
889 * Disabling ResetIRQ, Reply and Doorbell Interrupts
890 *
891 * Return nothing.
892 */
893static void
894_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
895{
896 u32 him_register;
897
898 ioc->mask_interrupts = 1;
899 him_register = readl(&ioc->chip->HostInterruptMask);
900 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
901 writel(him_register, &ioc->chip->HostInterruptMask);
902 readl(&ioc->chip->HostInterruptMask);
903}
904
905/**
906 * _base_unmask_interrupts - enable interrupts
907 * @ioc: per adapter object
908 *
909 * Enabling only Reply Interrupts
910 *
911 * Return nothing.
912 */
913static void
914_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
915{
916 u32 him_register;
917
918 him_register = readl(&ioc->chip->HostInterruptMask);
919 him_register &= ~MPI2_HIM_RIM;
920 writel(him_register, &ioc->chip->HostInterruptMask);
921 ioc->mask_interrupts = 0;
922}
923
924union reply_descriptor {
925 u64 word;
926 struct {
927 u32 low;
928 u32 high;
929 } u;
930};
931
932/**
933 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
934 * @irq: irq number (not used)
935 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
936 * @r: pt_regs pointer (not used)
937 *
938 * Return IRQ_HANDLE if processed, else IRQ_NONE.
939 */
940static irqreturn_t
941_base_interrupt(int irq, void *bus_id)
942{
943 struct adapter_reply_queue *reply_q = bus_id;
944 union reply_descriptor rd;
945 u32 completed_cmds;
946 u8 request_desript_type;
947 u16 smid;
948 u8 cb_idx;
949 u32 reply;
950 u8 msix_index = reply_q->msix_index;
951 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
952 Mpi2ReplyDescriptorsUnion_t *rpf;
953 u8 rc;
954
955 if (ioc->mask_interrupts)
956 return IRQ_NONE;
957
958 if (!atomic_add_unless(&reply_q->busy, 1, 1))
959 return IRQ_NONE;
960
961 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
962 request_desript_type = rpf->Default.ReplyFlags
963 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
964 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
965 atomic_dec(&reply_q->busy);
966 return IRQ_NONE;
967 }
968
969 completed_cmds = 0;
970 cb_idx = 0xFF;
971 do {
972 rd.word = le64_to_cpu(rpf->Words);
973 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
974 goto out;
975 reply = 0;
976 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
977 if (request_desript_type ==
978 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
979 request_desript_type ==
980 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
981 cb_idx = _base_get_cb_idx(ioc, smid);
982 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
983 (likely(mpt_callbacks[cb_idx] != NULL))) {
984 rc = mpt_callbacks[cb_idx](ioc, smid,
985 msix_index, 0);
986 if (rc)
987 mpt3sas_base_free_smid(ioc, smid);
988 }
989 } else if (request_desript_type ==
990 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
991 reply = le32_to_cpu(
992 rpf->AddressReply.ReplyFrameAddress);
993 if (reply > ioc->reply_dma_max_address ||
994 reply < ioc->reply_dma_min_address)
995 reply = 0;
996 if (smid) {
997 cb_idx = _base_get_cb_idx(ioc, smid);
998 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
999 (likely(mpt_callbacks[cb_idx] != NULL))) {
1000 rc = mpt_callbacks[cb_idx](ioc, smid,
1001 msix_index, reply);
1002 if (reply)
1003 _base_display_reply_info(ioc,
1004 smid, msix_index, reply);
1005 if (rc)
1006 mpt3sas_base_free_smid(ioc,
1007 smid);
1008 }
1009 } else {
1010 _base_async_event(ioc, msix_index, reply);
1011 }
1012
1013 /* reply free queue handling */
1014 if (reply) {
1015 ioc->reply_free_host_index =
1016 (ioc->reply_free_host_index ==
1017 (ioc->reply_free_queue_depth - 1)) ?
1018 0 : ioc->reply_free_host_index + 1;
1019 ioc->reply_free[ioc->reply_free_host_index] =
1020 cpu_to_le32(reply);
1021 wmb();
1022 writel(ioc->reply_free_host_index,
1023 &ioc->chip->ReplyFreeHostIndex);
1024 }
1025 }
1026
1027 rpf->Words = cpu_to_le64(ULLONG_MAX);
1028 reply_q->reply_post_host_index =
1029 (reply_q->reply_post_host_index ==
1030 (ioc->reply_post_queue_depth - 1)) ? 0 :
1031 reply_q->reply_post_host_index + 1;
1032 request_desript_type =
1033 reply_q->reply_post_free[reply_q->reply_post_host_index].
1034 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1035 completed_cmds++;
1036 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1037 goto out;
1038 if (!reply_q->reply_post_host_index)
1039 rpf = reply_q->reply_post_free;
1040 else
1041 rpf++;
1042 } while (1);
1043
1044 out:
1045
1046 if (!completed_cmds) {
1047 atomic_dec(&reply_q->busy);
1048 return IRQ_NONE;
1049 }
1050
1051 wmb();
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05301052 if (ioc->is_warpdrive) {
1053 writel(reply_q->reply_post_host_index,
1054 ioc->reply_post_host_index[msix_index]);
1055 atomic_dec(&reply_q->busy);
1056 return IRQ_HANDLED;
1057 }
Sreekanth Reddyfb77bb52015-06-30 12:24:47 +05301058
1059 /* Update Reply Post Host Index.
1060 * For those HBA's which support combined reply queue feature
1061 * 1. Get the correct Supplemental Reply Post Host Index Register.
1062 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1063 * Index Register address bank i.e replyPostRegisterIndex[],
1064 * 2. Then update this register with new reply host index value
1065 * in ReplyPostIndex field and the MSIxIndex field with
1066 * msix_index value reduced to a value between 0 and 7,
1067 * using a modulo 8 operation. Since each Supplemental Reply Post
1068 * Host Index Register supports 8 MSI-X vectors.
1069 *
1070 * For other HBA's just update the Reply Post Host Index register with
1071 * new reply host index value in ReplyPostIndex Field and msix_index
1072 * value in MSIxIndex field.
1073 */
1074 if (ioc->msix96_vector)
1075 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1076 MPI2_RPHI_MSIX_INDEX_SHIFT),
1077 ioc->replyPostRegisterIndex[msix_index/8]);
1078 else
1079 writel(reply_q->reply_post_host_index | (msix_index <<
1080 MPI2_RPHI_MSIX_INDEX_SHIFT),
1081 &ioc->chip->ReplyPostHostIndex);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301082 atomic_dec(&reply_q->busy);
1083 return IRQ_HANDLED;
1084}
1085
1086/**
1087 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1088 * @ioc: per adapter object
1089 *
1090 */
1091static inline int
1092_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1093{
1094 return (ioc->facts.IOCCapabilities &
1095 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1096}
1097
1098/**
1099 * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
1100 * @ioc: per adapter object
1101 * Context: ISR conext
1102 *
1103 * Called when a Task Management request has completed. We want
1104 * to flush the other reply queues so all the outstanding IO has been
1105 * completed back to OS before we process the TM completetion.
1106 *
1107 * Return nothing.
1108 */
1109void
1110mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1111{
1112 struct adapter_reply_queue *reply_q;
1113
1114 /* If MSIX capability is turned off
1115 * then multi-queues are not enabled
1116 */
1117 if (!_base_is_controller_msix_enabled(ioc))
1118 return;
1119
1120 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1121 if (ioc->shost_recovery)
1122 return;
1123 /* TMs are on msix_index == 0 */
1124 if (reply_q->msix_index == 0)
1125 continue;
1126 _base_interrupt(reply_q->vector, (void *)reply_q);
1127 }
1128}
1129
1130/**
1131 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1132 * @cb_idx: callback index
1133 *
1134 * Return nothing.
1135 */
1136void
1137mpt3sas_base_release_callback_handler(u8 cb_idx)
1138{
1139 mpt_callbacks[cb_idx] = NULL;
1140}
1141
1142/**
1143 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1144 * @cb_func: callback function
1145 *
1146 * Returns cb_func.
1147 */
1148u8
1149mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1150{
1151 u8 cb_idx;
1152
1153 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1154 if (mpt_callbacks[cb_idx] == NULL)
1155 break;
1156
1157 mpt_callbacks[cb_idx] = cb_func;
1158 return cb_idx;
1159}
1160
1161/**
1162 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1163 *
1164 * Return nothing.
1165 */
1166void
1167mpt3sas_base_initialize_callback_handler(void)
1168{
1169 u8 cb_idx;
1170
1171 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1172 mpt3sas_base_release_callback_handler(cb_idx);
1173}
1174
1175
1176/**
1177 * _base_build_zero_len_sge - build zero length sg entry
1178 * @ioc: per adapter object
1179 * @paddr: virtual address for SGE
1180 *
1181 * Create a zero length scatter gather entry to insure the IOCs hardware has
1182 * something to use if the target device goes brain dead and tries
1183 * to send data even when none is asked for.
1184 *
1185 * Return nothing.
1186 */
1187static void
1188_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1189{
1190 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1191 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1192 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1193 MPI2_SGE_FLAGS_SHIFT);
1194 ioc->base_add_sg_single(paddr, flags_length, -1);
1195}
1196
1197/**
1198 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1199 * @paddr: virtual address for SGE
1200 * @flags_length: SGE flags and data transfer length
1201 * @dma_addr: Physical address
1202 *
1203 * Return nothing.
1204 */
1205static void
1206_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1207{
1208 Mpi2SGESimple32_t *sgel = paddr;
1209
1210 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1211 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1212 sgel->FlagsLength = cpu_to_le32(flags_length);
1213 sgel->Address = cpu_to_le32(dma_addr);
1214}
1215
1216
1217/**
1218 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1219 * @paddr: virtual address for SGE
1220 * @flags_length: SGE flags and data transfer length
1221 * @dma_addr: Physical address
1222 *
1223 * Return nothing.
1224 */
1225static void
1226_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1227{
1228 Mpi2SGESimple64_t *sgel = paddr;
1229
1230 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1231 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1232 sgel->FlagsLength = cpu_to_le32(flags_length);
1233 sgel->Address = cpu_to_le64(dma_addr);
1234}
1235
1236/**
1237 * _base_get_chain_buffer_tracker - obtain chain tracker
1238 * @ioc: per adapter object
1239 * @smid: smid associated to an IO request
1240 *
1241 * Returns chain tracker(from ioc->free_chain_list)
1242 */
1243static struct chain_tracker *
1244_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1245{
1246 struct chain_tracker *chain_req;
1247 unsigned long flags;
1248
1249 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1250 if (list_empty(&ioc->free_chain_list)) {
1251 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1252 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
1253 "chain buffers not available\n", ioc->name));
1254 return NULL;
1255 }
1256 chain_req = list_entry(ioc->free_chain_list.next,
1257 struct chain_tracker, tracker_list);
1258 list_del_init(&chain_req->tracker_list);
1259 list_add_tail(&chain_req->tracker_list,
1260 &ioc->scsi_lookup[smid - 1].chain_list);
1261 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1262 return chain_req;
1263}
1264
1265
1266/**
1267 * _base_build_sg - build generic sg
1268 * @ioc: per adapter object
1269 * @psge: virtual address for SGE
1270 * @data_out_dma: physical address for WRITES
1271 * @data_out_sz: data xfer size for WRITES
1272 * @data_in_dma: physical address for READS
1273 * @data_in_sz: data xfer size for READS
1274 *
1275 * Return nothing.
1276 */
1277static void
1278_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1279 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1280 size_t data_in_sz)
1281{
1282 u32 sgl_flags;
1283
1284 if (!data_out_sz && !data_in_sz) {
1285 _base_build_zero_len_sge(ioc, psge);
1286 return;
1287 }
1288
1289 if (data_out_sz && data_in_sz) {
1290 /* WRITE sgel first */
1291 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1292 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1293 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1294 ioc->base_add_sg_single(psge, sgl_flags |
1295 data_out_sz, data_out_dma);
1296
1297 /* incr sgel */
1298 psge += ioc->sge_size;
1299
1300 /* READ sgel last */
1301 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1302 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1303 MPI2_SGE_FLAGS_END_OF_LIST);
1304 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1305 ioc->base_add_sg_single(psge, sgl_flags |
1306 data_in_sz, data_in_dma);
1307 } else if (data_out_sz) /* WRITE */ {
1308 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1309 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1310 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1311 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1312 ioc->base_add_sg_single(psge, sgl_flags |
1313 data_out_sz, data_out_dma);
1314 } else if (data_in_sz) /* READ */ {
1315 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1316 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1317 MPI2_SGE_FLAGS_END_OF_LIST);
1318 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1319 ioc->base_add_sg_single(psge, sgl_flags |
1320 data_in_sz, data_in_dma);
1321 }
1322}
1323
1324/* IEEE format sgls */
1325
1326/**
1327 * _base_add_sg_single_ieee - add sg element for IEEE format
1328 * @paddr: virtual address for SGE
1329 * @flags: SGE flags
1330 * @chain_offset: number of 128 byte elements from start of segment
1331 * @length: data transfer length
1332 * @dma_addr: Physical address
1333 *
1334 * Return nothing.
1335 */
1336static void
1337_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
1338 dma_addr_t dma_addr)
1339{
1340 Mpi25IeeeSgeChain64_t *sgel = paddr;
1341
1342 sgel->Flags = flags;
1343 sgel->NextChainOffset = chain_offset;
1344 sgel->Length = cpu_to_le32(length);
1345 sgel->Address = cpu_to_le64(dma_addr);
1346}
1347
1348/**
1349 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
1350 * @ioc: per adapter object
1351 * @paddr: virtual address for SGE
1352 *
1353 * Create a zero length scatter gather entry to insure the IOCs hardware has
1354 * something to use if the target device goes brain dead and tries
1355 * to send data even when none is asked for.
1356 *
1357 * Return nothing.
1358 */
1359static void
1360_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1361{
1362 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1363 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
1364 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
Suganath prabu Subramanib130b0d2016-01-28 12:06:58 +05301365
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301366 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
1367}
1368
1369/**
Sreekanth Reddy471ef9d2015-11-11 17:30:24 +05301370 * _base_build_sg_scmd - main sg creation routine
1371 * @ioc: per adapter object
1372 * @scmd: scsi command
1373 * @smid: system request message index
1374 * Context: none.
1375 *
1376 * The main routine that builds scatter gather table from a given
1377 * scsi request sent via the .queuecommand main handler.
1378 *
1379 * Returns 0 success, anything else error
1380 */
1381static int
1382_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
1383 struct scsi_cmnd *scmd, u16 smid)
1384{
1385 Mpi2SCSIIORequest_t *mpi_request;
1386 dma_addr_t chain_dma;
1387 struct scatterlist *sg_scmd;
1388 void *sg_local, *chain;
1389 u32 chain_offset;
1390 u32 chain_length;
1391 u32 chain_flags;
1392 int sges_left;
1393 u32 sges_in_segment;
1394 u32 sgl_flags;
1395 u32 sgl_flags_last_element;
1396 u32 sgl_flags_end_buffer;
1397 struct chain_tracker *chain_req;
1398
1399 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1400
1401 /* init scatter gather flags */
1402 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
1403 if (scmd->sc_data_direction == DMA_TO_DEVICE)
1404 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
1405 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
1406 << MPI2_SGE_FLAGS_SHIFT;
1407 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
1408 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
1409 << MPI2_SGE_FLAGS_SHIFT;
1410 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1411
1412 sg_scmd = scsi_sglist(scmd);
1413 sges_left = scsi_dma_map(scmd);
1414 if (sges_left < 0) {
1415 sdev_printk(KERN_ERR, scmd->device,
1416 "pci_map_sg failed: request for %d bytes!\n",
1417 scsi_bufflen(scmd));
1418 return -ENOMEM;
1419 }
1420
1421 sg_local = &mpi_request->SGL;
1422 sges_in_segment = ioc->max_sges_in_main_message;
1423 if (sges_left <= sges_in_segment)
1424 goto fill_in_last_segment;
1425
1426 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
1427 (sges_in_segment * ioc->sge_size))/4;
1428
1429 /* fill in main message segment when there is a chain following */
1430 while (sges_in_segment) {
1431 if (sges_in_segment == 1)
1432 ioc->base_add_sg_single(sg_local,
1433 sgl_flags_last_element | sg_dma_len(sg_scmd),
1434 sg_dma_address(sg_scmd));
1435 else
1436 ioc->base_add_sg_single(sg_local, sgl_flags |
1437 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1438 sg_scmd = sg_next(sg_scmd);
1439 sg_local += ioc->sge_size;
1440 sges_left--;
1441 sges_in_segment--;
1442 }
1443
1444 /* initializing the chain flags and pointers */
1445 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
1446 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1447 if (!chain_req)
1448 return -1;
1449 chain = chain_req->chain_buffer;
1450 chain_dma = chain_req->chain_buffer_dma;
1451 do {
1452 sges_in_segment = (sges_left <=
1453 ioc->max_sges_in_chain_message) ? sges_left :
1454 ioc->max_sges_in_chain_message;
1455 chain_offset = (sges_left == sges_in_segment) ?
1456 0 : (sges_in_segment * ioc->sge_size)/4;
1457 chain_length = sges_in_segment * ioc->sge_size;
1458 if (chain_offset) {
1459 chain_offset = chain_offset <<
1460 MPI2_SGE_CHAIN_OFFSET_SHIFT;
1461 chain_length += ioc->sge_size;
1462 }
1463 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
1464 chain_length, chain_dma);
1465 sg_local = chain;
1466 if (!chain_offset)
1467 goto fill_in_last_segment;
1468
1469 /* fill in chain segments */
1470 while (sges_in_segment) {
1471 if (sges_in_segment == 1)
1472 ioc->base_add_sg_single(sg_local,
1473 sgl_flags_last_element |
1474 sg_dma_len(sg_scmd),
1475 sg_dma_address(sg_scmd));
1476 else
1477 ioc->base_add_sg_single(sg_local, sgl_flags |
1478 sg_dma_len(sg_scmd),
1479 sg_dma_address(sg_scmd));
1480 sg_scmd = sg_next(sg_scmd);
1481 sg_local += ioc->sge_size;
1482 sges_left--;
1483 sges_in_segment--;
1484 }
1485
1486 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1487 if (!chain_req)
1488 return -1;
1489 chain = chain_req->chain_buffer;
1490 chain_dma = chain_req->chain_buffer_dma;
1491 } while (1);
1492
1493
1494 fill_in_last_segment:
1495
1496 /* fill the last segment */
1497 while (sges_left) {
1498 if (sges_left == 1)
1499 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
1500 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1501 else
1502 ioc->base_add_sg_single(sg_local, sgl_flags |
1503 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1504 sg_scmd = sg_next(sg_scmd);
1505 sg_local += ioc->sge_size;
1506 sges_left--;
1507 }
1508
1509 return 0;
1510}
1511
1512/**
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301513 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
1514 * @ioc: per adapter object
1515 * @scmd: scsi command
1516 * @smid: system request message index
1517 * Context: none.
1518 *
1519 * The main routine that builds scatter gather table from a given
1520 * scsi request sent via the .queuecommand main handler.
1521 *
1522 * Returns 0 success, anything else error
1523 */
1524static int
1525_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1526 struct scsi_cmnd *scmd, u16 smid)
1527{
1528 Mpi2SCSIIORequest_t *mpi_request;
1529 dma_addr_t chain_dma;
1530 struct scatterlist *sg_scmd;
1531 void *sg_local, *chain;
1532 u32 chain_offset;
1533 u32 chain_length;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301534 int sges_left;
1535 u32 sges_in_segment;
1536 u8 simple_sgl_flags;
1537 u8 simple_sgl_flags_last;
1538 u8 chain_sgl_flags;
1539 struct chain_tracker *chain_req;
1540
1541 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1542
1543 /* init scatter gather flags */
1544 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1545 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1546 simple_sgl_flags_last = simple_sgl_flags |
1547 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1548 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1549 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1550
1551 sg_scmd = scsi_sglist(scmd);
1552 sges_left = scsi_dma_map(scmd);
Sreekanth Reddy62f5c742015-06-30 12:25:01 +05301553 if (sges_left < 0) {
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301554 sdev_printk(KERN_ERR, scmd->device,
1555 "pci_map_sg failed: request for %d bytes!\n",
1556 scsi_bufflen(scmd));
1557 return -ENOMEM;
1558 }
1559
1560 sg_local = &mpi_request->SGL;
1561 sges_in_segment = (ioc->request_sz -
1562 offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
1563 if (sges_left <= sges_in_segment)
1564 goto fill_in_last_segment;
1565
1566 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
1567 (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
1568
1569 /* fill in main message segment when there is a chain following */
1570 while (sges_in_segment > 1) {
1571 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1572 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1573 sg_scmd = sg_next(sg_scmd);
1574 sg_local += ioc->sge_size_ieee;
1575 sges_left--;
1576 sges_in_segment--;
1577 }
1578
Wei Yongjun25ef16d2012-12-12 02:26:51 +05301579 /* initializing the pointers */
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301580 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1581 if (!chain_req)
1582 return -1;
1583 chain = chain_req->chain_buffer;
1584 chain_dma = chain_req->chain_buffer_dma;
1585 do {
1586 sges_in_segment = (sges_left <=
1587 ioc->max_sges_in_chain_message) ? sges_left :
1588 ioc->max_sges_in_chain_message;
1589 chain_offset = (sges_left == sges_in_segment) ?
1590 0 : sges_in_segment;
1591 chain_length = sges_in_segment * ioc->sge_size_ieee;
1592 if (chain_offset)
1593 chain_length += ioc->sge_size_ieee;
1594 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
1595 chain_offset, chain_length, chain_dma);
1596
1597 sg_local = chain;
1598 if (!chain_offset)
1599 goto fill_in_last_segment;
1600
1601 /* fill in chain segments */
1602 while (sges_in_segment) {
1603 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1604 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1605 sg_scmd = sg_next(sg_scmd);
1606 sg_local += ioc->sge_size_ieee;
1607 sges_left--;
1608 sges_in_segment--;
1609 }
1610
1611 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1612 if (!chain_req)
1613 return -1;
1614 chain = chain_req->chain_buffer;
1615 chain_dma = chain_req->chain_buffer_dma;
1616 } while (1);
1617
1618
1619 fill_in_last_segment:
1620
1621 /* fill the last segment */
Sreekanth Reddy62f5c742015-06-30 12:25:01 +05301622 while (sges_left > 0) {
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301623 if (sges_left == 1)
1624 _base_add_sg_single_ieee(sg_local,
1625 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
1626 sg_dma_address(sg_scmd));
1627 else
1628 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1629 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1630 sg_scmd = sg_next(sg_scmd);
1631 sg_local += ioc->sge_size_ieee;
1632 sges_left--;
1633 }
1634
1635 return 0;
1636}
1637
1638/**
1639 * _base_build_sg_ieee - build generic sg for IEEE format
1640 * @ioc: per adapter object
1641 * @psge: virtual address for SGE
1642 * @data_out_dma: physical address for WRITES
1643 * @data_out_sz: data xfer size for WRITES
1644 * @data_in_dma: physical address for READS
1645 * @data_in_sz: data xfer size for READS
1646 *
1647 * Return nothing.
1648 */
1649static void
1650_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
1651 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1652 size_t data_in_sz)
1653{
1654 u8 sgl_flags;
1655
1656 if (!data_out_sz && !data_in_sz) {
1657 _base_build_zero_len_sge_ieee(ioc, psge);
1658 return;
1659 }
1660
1661 if (data_out_sz && data_in_sz) {
1662 /* WRITE sgel first */
1663 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1664 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1665 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1666 data_out_dma);
1667
1668 /* incr sgel */
1669 psge += ioc->sge_size_ieee;
1670
1671 /* READ sgel last */
1672 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1673 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1674 data_in_dma);
1675 } else if (data_out_sz) /* WRITE */ {
1676 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1677 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1678 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1679 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1680 data_out_dma);
1681 } else if (data_in_sz) /* READ */ {
1682 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1683 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1684 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1685 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1686 data_in_dma);
1687 }
1688}
1689
1690#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1691
1692/**
1693 * _base_config_dma_addressing - set dma addressing
1694 * @ioc: per adapter object
1695 * @pdev: PCI device struct
1696 *
1697 * Returns 0 for success, non-zero for failure.
1698 */
1699static int
1700_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
1701{
1702 struct sysinfo s;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301703 u64 consistent_dma_mask;
1704
1705 if (ioc->dma_mask)
1706 consistent_dma_mask = DMA_BIT_MASK(64);
1707 else
1708 consistent_dma_mask = DMA_BIT_MASK(32);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301709
1710 if (sizeof(dma_addr_t) > 4) {
1711 const uint64_t required_mask =
1712 dma_get_required_mask(&pdev->dev);
1713 if ((required_mask > DMA_BIT_MASK(32)) &&
1714 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301715 !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301716 ioc->base_add_sg_single = &_base_add_sg_single_64;
1717 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301718 ioc->dma_mask = 64;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301719 goto out;
1720 }
1721 }
1722
1723 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1724 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1725 ioc->base_add_sg_single = &_base_add_sg_single_32;
1726 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301727 ioc->dma_mask = 32;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301728 } else
1729 return -ENODEV;
1730
1731 out:
1732 si_meminfo(&s);
1733 pr_info(MPT3SAS_FMT
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301734 "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
1735 ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301736
1737 return 0;
1738}
1739
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301740static int
1741_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
1742 struct pci_dev *pdev)
1743{
1744 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1745 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1746 return -ENODEV;
1747 }
1748 return 0;
1749}
1750
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301751/**
1752 * _base_check_enable_msix - checks MSIX capabable.
1753 * @ioc: per adapter object
1754 *
1755 * Check to see if card is capable of MSIX, and set number
1756 * of available msix vectors
1757 */
1758static int
1759_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1760{
1761 int base;
1762 u16 message_control;
1763
Sreekanth Reddy42081172015-11-11 17:30:26 +05301764 /* Check whether controller SAS2008 B0 controller,
1765 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
1766 */
1767 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
1768 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
1769 return -EINVAL;
1770 }
1771
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301772 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1773 if (!base) {
1774 dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
1775 ioc->name));
1776 return -EINVAL;
1777 }
1778
1779 /* get msix vector count */
Sreekanth Reddy42081172015-11-11 17:30:26 +05301780 /* NUMA_IO not supported for older controllers */
1781 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1782 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1783 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1784 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1785 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1786 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1787 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1788 ioc->msix_vector_count = 1;
1789 else {
1790 pci_read_config_word(ioc->pdev, base + 2, &message_control);
1791 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1792 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301793 dinitprintk(ioc, pr_info(MPT3SAS_FMT
1794 "msix is supported, vector_count(%d)\n",
1795 ioc->name, ioc->msix_vector_count));
1796 return 0;
1797}
1798
1799/**
1800 * _base_free_irq - free irq
1801 * @ioc: per adapter object
1802 *
1803 * Freeing respective reply_queue from the list.
1804 */
1805static void
1806_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
1807{
1808 struct adapter_reply_queue *reply_q, *next;
1809
1810 if (list_empty(&ioc->reply_queue_list))
1811 return;
1812
1813 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1814 list_del(&reply_q->list);
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301815 irq_set_affinity_hint(reply_q->vector, NULL);
1816 free_cpumask_var(reply_q->affinity_hint);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301817 synchronize_irq(reply_q->vector);
1818 free_irq(reply_q->vector, reply_q);
1819 kfree(reply_q);
1820 }
1821}
1822
1823/**
1824 * _base_request_irq - request irq
1825 * @ioc: per adapter object
1826 * @index: msix index into vector table
1827 * @vector: irq vector
1828 *
1829 * Inserting respective reply_queue into the list.
1830 */
1831static int
1832_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
1833{
1834 struct adapter_reply_queue *reply_q;
1835 int r;
1836
1837 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1838 if (!reply_q) {
1839 pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
1840 ioc->name, (int)sizeof(struct adapter_reply_queue));
1841 return -ENOMEM;
1842 }
1843 reply_q->ioc = ioc;
1844 reply_q->msix_index = index;
1845 reply_q->vector = vector;
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301846
1847 if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL))
1848 return -ENOMEM;
1849 cpumask_clear(reply_q->affinity_hint);
1850
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301851 atomic_set(&reply_q->busy, 0);
1852 if (ioc->msix_enable)
1853 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
Sreekanth Reddyc84b06a2015-11-11 17:30:35 +05301854 ioc->driver_name, ioc->id, index);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301855 else
1856 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
Sreekanth Reddyc84b06a2015-11-11 17:30:35 +05301857 ioc->driver_name, ioc->id);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301858 r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1859 reply_q);
1860 if (r) {
1861 pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
1862 reply_q->name, vector);
1863 kfree(reply_q);
1864 return -EBUSY;
1865 }
1866
1867 INIT_LIST_HEAD(&reply_q->list);
1868 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1869 return 0;
1870}
1871
1872/**
1873 * _base_assign_reply_queues - assigning msix index for each cpu
1874 * @ioc: per adapter object
1875 *
1876 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1877 *
1878 * It would nice if we could call irq_set_affinity, however it is not
1879 * an exported symbol
1880 */
1881static void
1882_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1883{
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001884 unsigned int cpu, nr_cpus, nr_msix, index = 0;
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301885 struct adapter_reply_queue *reply_q;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301886
1887 if (!_base_is_controller_msix_enabled(ioc))
1888 return;
1889
1890 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1891
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001892 nr_cpus = num_online_cpus();
1893 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
1894 ioc->facts.MaxMSIxVectors);
1895 if (!nr_msix)
1896 return;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301897
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001898 cpu = cpumask_first(cpu_online_mask);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301899
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301900 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1901
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001902 unsigned int i, group = nr_cpus / nr_msix;
1903
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301904 if (cpu >= nr_cpus)
1905 break;
1906
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001907 if (index < nr_cpus % nr_msix)
1908 group++;
1909
1910 for (i = 0 ; i < group ; i++) {
1911 ioc->cpu_msix_table[cpu] = index;
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301912 cpumask_or(reply_q->affinity_hint,
1913 reply_q->affinity_hint, get_cpu_mask(cpu));
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001914 cpu = cpumask_next(cpu, cpu_online_mask);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301915 }
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001916
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301917 if (irq_set_affinity_hint(reply_q->vector,
1918 reply_q->affinity_hint))
1919 dinitprintk(ioc, pr_info(MPT3SAS_FMT
1920 "error setting affinity hint for irq vector %d\n",
1921 ioc->name, reply_q->vector));
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001922 index++;
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301923 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301924}
1925
1926/**
1927 * _base_disable_msix - disables msix
1928 * @ioc: per adapter object
1929 *
1930 */
1931static void
1932_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
1933{
1934 if (!ioc->msix_enable)
1935 return;
1936 pci_disable_msix(ioc->pdev);
1937 ioc->msix_enable = 0;
1938}
1939
1940/**
1941 * _base_enable_msix - enables msix, failback to io_apic
1942 * @ioc: per adapter object
1943 *
1944 */
1945static int
1946_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1947{
1948 struct msix_entry *entries, *a;
1949 int r;
1950 int i;
1951 u8 try_msix = 0;
1952
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301953 if (msix_disable == -1 || msix_disable == 0)
1954 try_msix = 1;
1955
1956 if (!try_msix)
1957 goto try_ioapic;
1958
1959 if (_base_check_enable_msix(ioc) != 0)
1960 goto try_ioapic;
1961
1962 ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1963 ioc->msix_vector_count);
1964
Sreekanth Reddy9c500062013-08-14 18:23:20 +05301965 printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
1966 ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
1967 ioc->cpu_count, max_msix_vectors);
1968
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301969 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
1970 max_msix_vectors = 8;
1971
Sreekanth Reddy9c500062013-08-14 18:23:20 +05301972 if (max_msix_vectors > 0) {
1973 ioc->reply_queue_count = min_t(int, max_msix_vectors,
1974 ioc->reply_queue_count);
1975 ioc->msix_vector_count = ioc->reply_queue_count;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301976 } else if (max_msix_vectors == 0)
1977 goto try_ioapic;
Sreekanth Reddy9c500062013-08-14 18:23:20 +05301978
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301979 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1980 GFP_KERNEL);
1981 if (!entries) {
1982 dfailprintk(ioc, pr_info(MPT3SAS_FMT
1983 "kcalloc failed @ at %s:%d/%s() !!!\n",
1984 ioc->name, __FILE__, __LINE__, __func__));
1985 goto try_ioapic;
1986 }
1987
1988 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1989 a->entry = i;
1990
Alexander Gordeev6bfa6902014-08-18 08:01:46 +02001991 r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301992 if (r) {
1993 dfailprintk(ioc, pr_info(MPT3SAS_FMT
Alexander Gordeev6bfa6902014-08-18 08:01:46 +02001994 "pci_enable_msix_exact failed (r=%d) !!!\n",
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301995 ioc->name, r));
1996 kfree(entries);
1997 goto try_ioapic;
1998 }
1999
2000 ioc->msix_enable = 1;
2001 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
2002 r = _base_request_irq(ioc, i, a->vector);
2003 if (r) {
2004 _base_free_irq(ioc);
2005 _base_disable_msix(ioc);
2006 kfree(entries);
2007 goto try_ioapic;
2008 }
2009 }
2010
2011 kfree(entries);
2012 return 0;
2013
2014/* failback to io_apic interrupt routing */
2015 try_ioapic:
2016
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05302017 ioc->reply_queue_count = 1;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302018 r = _base_request_irq(ioc, 0, ioc->pdev->irq);
2019
2020 return r;
2021}
2022
2023/**
Sreekanth Reddy580d4e32015-06-30 12:24:50 +05302024 * mpt3sas_base_unmap_resources - free controller resources
2025 * @ioc: per adapter object
2026 */
2027void
2028mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
2029{
2030 struct pci_dev *pdev = ioc->pdev;
2031
2032 dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
2033 ioc->name, __func__));
2034
2035 _base_free_irq(ioc);
2036 _base_disable_msix(ioc);
2037
Tomas Henzl5f985d82015-12-23 14:21:47 +01002038 if (ioc->msix96_vector) {
Sreekanth Reddy580d4e32015-06-30 12:24:50 +05302039 kfree(ioc->replyPostRegisterIndex);
Tomas Henzl5f985d82015-12-23 14:21:47 +01002040 ioc->replyPostRegisterIndex = NULL;
2041 }
Sreekanth Reddy580d4e32015-06-30 12:24:50 +05302042
2043 if (ioc->chip_phys) {
2044 iounmap(ioc->chip);
2045 ioc->chip_phys = 0;
2046 }
2047
2048 if (pci_is_enabled(pdev)) {
2049 pci_release_selected_regions(ioc->pdev, ioc->bars);
2050 pci_disable_pcie_error_reporting(pdev);
2051 pci_disable_device(pdev);
2052 }
2053}
2054
2055/**
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302056 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
2057 * @ioc: per adapter object
2058 *
2059 * Returns 0 for success, non-zero for failure.
2060 */
2061int
2062mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2063{
2064 struct pci_dev *pdev = ioc->pdev;
2065 u32 memap_sz;
2066 u32 pio_sz;
2067 int i, r = 0;
2068 u64 pio_chip = 0;
2069 u64 chip_phys = 0;
2070 struct adapter_reply_queue *reply_q;
2071
2072 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
2073 ioc->name, __func__));
2074
2075 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
2076 if (pci_enable_device_mem(pdev)) {
2077 pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
2078 ioc->name);
Joe Lawrencecf9bd21a2013-08-08 16:45:39 -04002079 ioc->bars = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302080 return -ENODEV;
2081 }
2082
2083
2084 if (pci_request_selected_regions(pdev, ioc->bars,
Sreekanth Reddyc84b06a2015-11-11 17:30:35 +05302085 ioc->driver_name)) {
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302086 pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
2087 ioc->name);
Joe Lawrencecf9bd21a2013-08-08 16:45:39 -04002088 ioc->bars = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302089 r = -ENODEV;
2090 goto out_fail;
2091 }
2092
2093/* AER (Advanced Error Reporting) hooks */
2094 pci_enable_pcie_error_reporting(pdev);
2095
2096 pci_set_master(pdev);
2097
2098
2099 if (_base_config_dma_addressing(ioc, pdev) != 0) {
2100 pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
2101 ioc->name, pci_name(pdev));
2102 r = -ENODEV;
2103 goto out_fail;
2104 }
2105
Sreekanth Reddy5aeeb782015-07-15 10:19:56 +05302106 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
2107 (!memap_sz || !pio_sz); i++) {
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302108 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
2109 if (pio_sz)
2110 continue;
2111 pio_chip = (u64)pci_resource_start(pdev, i);
2112 pio_sz = pci_resource_len(pdev, i);
2113 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
2114 if (memap_sz)
2115 continue;
2116 ioc->chip_phys = pci_resource_start(pdev, i);
2117 chip_phys = (u64)ioc->chip_phys;
2118 memap_sz = pci_resource_len(pdev, i);
2119 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302120 }
2121 }
2122
Sreekanth Reddy5aeeb782015-07-15 10:19:56 +05302123 if (ioc->chip == NULL) {
2124 pr_err(MPT3SAS_FMT "unable to map adapter memory! "
2125 " or resource not found\n", ioc->name);
2126 r = -EINVAL;
2127 goto out_fail;
2128 }
2129
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302130 _base_mask_interrupts(ioc);
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05302131
2132 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
2133 if (r)
2134 goto out_fail;
2135
2136 if (!ioc->rdpq_array_enable_assigned) {
2137 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
2138 ioc->rdpq_array_enable_assigned = 1;
2139 }
2140
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302141 r = _base_enable_msix(ioc);
2142 if (r)
2143 goto out_fail;
2144
Sreekanth Reddyfb77bb52015-06-30 12:24:47 +05302145 /* Use the Combined reply queue feature only for SAS3 C0 & higher
2146 * revision HBAs and also only when reply queue count is greater than 8
2147 */
2148 if (ioc->msix96_vector && ioc->reply_queue_count > 8) {
2149 /* Determine the Supplemental Reply Post Host Index Registers
2150 * Addresse. Supplemental Reply Post Host Index Registers
2151 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
2152 * each register is at offset bytes of
2153 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
2154 */
2155 ioc->replyPostRegisterIndex = kcalloc(
2156 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT,
2157 sizeof(resource_size_t *), GFP_KERNEL);
2158 if (!ioc->replyPostRegisterIndex) {
2159 dfailprintk(ioc, printk(MPT3SAS_FMT
2160 "allocation for reply Post Register Index failed!!!\n",
2161 ioc->name));
2162 r = -ENOMEM;
2163 goto out_fail;
2164 }
2165
2166 for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) {
2167 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
2168 ((u8 *)&ioc->chip->Doorbell +
2169 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2170 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
2171 }
2172 } else
2173 ioc->msix96_vector = 0;
2174
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302175 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
2176 pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
2177 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
2178 "IO-APIC enabled"), reply_q->vector);
2179
2180 pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
2181 ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
2182 pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
2183 ioc->name, (unsigned long long)pio_chip, pio_sz);
2184
2185 /* Save PCI configuration state for recovery from PCI AER/EEH errors */
2186 pci_save_state(pdev);
2187 return 0;
2188
2189 out_fail:
Sreekanth Reddy580d4e32015-06-30 12:24:50 +05302190 mpt3sas_base_unmap_resources(ioc);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302191 return r;
2192}
2193
2194/**
2195 * mpt3sas_base_get_msg_frame - obtain request mf pointer
2196 * @ioc: per adapter object
2197 * @smid: system request message index(smid zero is invalid)
2198 *
2199 * Returns virt pointer to message frame.
2200 */
2201void *
2202mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2203{
2204 return (void *)(ioc->request + (smid * ioc->request_sz));
2205}
2206
2207/**
2208 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
2209 * @ioc: per adapter object
2210 * @smid: system request message index
2211 *
2212 * Returns virt pointer to sense buffer.
2213 */
2214void *
2215mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2216{
2217 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
2218}
2219
2220/**
2221 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
2222 * @ioc: per adapter object
2223 * @smid: system request message index
2224 *
2225 * Returns phys pointer to the low 32bit address of the sense buffer.
2226 */
2227__le32
2228mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2229{
2230 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
2231 SCSI_SENSE_BUFFERSIZE));
2232}
2233
2234/**
2235 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
2236 * @ioc: per adapter object
2237 * @phys_addr: lower 32 physical addr of the reply
2238 *
2239 * Converts 32bit lower physical addr into a virt address.
2240 */
2241void *
2242mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
2243{
2244 if (!phys_addr)
2245 return NULL;
2246 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
2247}
2248
2249/**
2250 * mpt3sas_base_get_smid - obtain a free smid from internal queue
2251 * @ioc: per adapter object
2252 * @cb_idx: callback index
2253 *
2254 * Returns smid (zero is invalid)
2255 */
2256u16
2257mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2258{
2259 unsigned long flags;
2260 struct request_tracker *request;
2261 u16 smid;
2262
2263 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2264 if (list_empty(&ioc->internal_free_list)) {
2265 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2266 pr_err(MPT3SAS_FMT "%s: smid not available\n",
2267 ioc->name, __func__);
2268 return 0;
2269 }
2270
2271 request = list_entry(ioc->internal_free_list.next,
2272 struct request_tracker, tracker_list);
2273 request->cb_idx = cb_idx;
2274 smid = request->smid;
2275 list_del(&request->tracker_list);
2276 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2277 return smid;
2278}
2279
2280/**
2281 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
2282 * @ioc: per adapter object
2283 * @cb_idx: callback index
2284 * @scmd: pointer to scsi command object
2285 *
2286 * Returns smid (zero is invalid)
2287 */
2288u16
2289mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
2290 struct scsi_cmnd *scmd)
2291{
2292 unsigned long flags;
2293 struct scsiio_tracker *request;
2294 u16 smid;
2295
2296 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2297 if (list_empty(&ioc->free_list)) {
2298 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2299 pr_err(MPT3SAS_FMT "%s: smid not available\n",
2300 ioc->name, __func__);
2301 return 0;
2302 }
2303
2304 request = list_entry(ioc->free_list.next,
2305 struct scsiio_tracker, tracker_list);
2306 request->scmd = scmd;
2307 request->cb_idx = cb_idx;
2308 smid = request->smid;
2309 list_del(&request->tracker_list);
2310 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2311 return smid;
2312}
2313
2314/**
2315 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
2316 * @ioc: per adapter object
2317 * @cb_idx: callback index
2318 *
2319 * Returns smid (zero is invalid)
2320 */
2321u16
2322mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2323{
2324 unsigned long flags;
2325 struct request_tracker *request;
2326 u16 smid;
2327
2328 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2329 if (list_empty(&ioc->hpr_free_list)) {
2330 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2331 return 0;
2332 }
2333
2334 request = list_entry(ioc->hpr_free_list.next,
2335 struct request_tracker, tracker_list);
2336 request->cb_idx = cb_idx;
2337 smid = request->smid;
2338 list_del(&request->tracker_list);
2339 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2340 return smid;
2341}
2342
2343/**
2344 * mpt3sas_base_free_smid - put smid back on free_list
2345 * @ioc: per adapter object
2346 * @smid: system request message index
2347 *
2348 * Return nothing.
2349 */
2350void
2351mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2352{
2353 unsigned long flags;
2354 int i;
2355 struct chain_tracker *chain_req, *next;
2356
2357 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2358 if (smid < ioc->hi_priority_smid) {
2359 /* scsiio queue */
2360 i = smid - 1;
2361 if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
2362 list_for_each_entry_safe(chain_req, next,
2363 &ioc->scsi_lookup[i].chain_list, tracker_list) {
2364 list_del_init(&chain_req->tracker_list);
2365 list_add(&chain_req->tracker_list,
2366 &ioc->free_chain_list);
2367 }
2368 }
2369 ioc->scsi_lookup[i].cb_idx = 0xFF;
2370 ioc->scsi_lookup[i].scmd = NULL;
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05302371 ioc->scsi_lookup[i].direct_io = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302372 list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
2373 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2374
2375 /*
2376 * See _wait_for_commands_to_complete() call with regards
2377 * to this code.
2378 */
2379 if (ioc->shost_recovery && ioc->pending_io_count) {
2380 if (ioc->pending_io_count == 1)
2381 wake_up(&ioc->reset_wq);
2382 ioc->pending_io_count--;
2383 }
2384 return;
2385 } else if (smid < ioc->internal_smid) {
2386 /* hi-priority */
2387 i = smid - ioc->hi_priority_smid;
2388 ioc->hpr_lookup[i].cb_idx = 0xFF;
2389 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
2390 } else if (smid <= ioc->hba_queue_depth) {
2391 /* internal queue */
2392 i = smid - ioc->internal_smid;
2393 ioc->internal_lookup[i].cb_idx = 0xFF;
2394 list_add(&ioc->internal_lookup[i].tracker_list,
2395 &ioc->internal_free_list);
2396 }
2397 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2398}
2399
2400/**
2401 * _base_writeq - 64 bit write to MMIO
2402 * @ioc: per adapter object
2403 * @b: data payload
2404 * @addr: address in MMIO space
2405 * @writeq_lock: spin lock
2406 *
2407 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
2408 * care of 32 bit environment where its not quarenteed to send the entire word
2409 * in one transfer.
2410 */
2411#if defined(writeq) && defined(CONFIG_64BIT)
2412static inline void
2413_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2414{
2415 writeq(cpu_to_le64(b), addr);
2416}
2417#else
2418static inline void
2419_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2420{
2421 unsigned long flags;
2422 __u64 data_out = cpu_to_le64(b);
2423
2424 spin_lock_irqsave(writeq_lock, flags);
2425 writel((u32)(data_out), addr);
2426 writel((u32)(data_out >> 32), (addr + 4));
2427 spin_unlock_irqrestore(writeq_lock, flags);
2428}
2429#endif
2430
2431static inline u8
2432_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
2433{
2434 return ioc->cpu_msix_table[raw_smp_processor_id()];
2435}
2436
2437/**
2438 * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
2439 * @ioc: per adapter object
2440 * @smid: system request message index
2441 * @handle: device handle
2442 *
2443 * Return nothing.
2444 */
2445void
2446mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
2447{
2448 Mpi2RequestDescriptorUnion_t descriptor;
2449 u64 *request = (u64 *)&descriptor;
2450
2451
2452 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2453 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
2454 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2455 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2456 descriptor.SCSIIO.LMID = 0;
2457 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2458 &ioc->scsi_lookup_lock);
2459}
2460
2461/**
2462 * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
2463 * @ioc: per adapter object
2464 * @smid: system request message index
2465 * @handle: device handle
2466 *
2467 * Return nothing.
2468 */
2469void
2470mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2471 u16 handle)
2472{
2473 Mpi2RequestDescriptorUnion_t descriptor;
2474 u64 *request = (u64 *)&descriptor;
2475
2476 descriptor.SCSIIO.RequestFlags =
2477 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2478 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
2479 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2480 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2481 descriptor.SCSIIO.LMID = 0;
2482 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2483 &ioc->scsi_lookup_lock);
2484}
2485
2486/**
2487 * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
2488 * @ioc: per adapter object
2489 * @smid: system request message index
2490 *
2491 * Return nothing.
2492 */
2493void
2494mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2495{
2496 Mpi2RequestDescriptorUnion_t descriptor;
2497 u64 *request = (u64 *)&descriptor;
2498
2499 descriptor.HighPriority.RequestFlags =
2500 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2501 descriptor.HighPriority.MSIxIndex = 0;
2502 descriptor.HighPriority.SMID = cpu_to_le16(smid);
2503 descriptor.HighPriority.LMID = 0;
2504 descriptor.HighPriority.Reserved1 = 0;
2505 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2506 &ioc->scsi_lookup_lock);
2507}
2508
2509/**
2510 * mpt3sas_base_put_smid_default - Default, primarily used for config pages
2511 * @ioc: per adapter object
2512 * @smid: system request message index
2513 *
2514 * Return nothing.
2515 */
2516void
2517mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2518{
2519 Mpi2RequestDescriptorUnion_t descriptor;
2520 u64 *request = (u64 *)&descriptor;
2521
2522 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2523 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
2524 descriptor.Default.SMID = cpu_to_le16(smid);
2525 descriptor.Default.LMID = 0;
2526 descriptor.Default.DescriptorTypeDependent = 0;
2527 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2528 &ioc->scsi_lookup_lock);
2529}
2530
Sreekanth Reddy1117b312014-09-12 15:35:30 +05302531/**
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302532 * _base_display_OEMs_branding - Display branding string
Sreekanth Reddy1117b312014-09-12 15:35:30 +05302533 * @ioc: per adapter object
2534 *
2535 * Return nothing.
2536 */
2537static void
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302538_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
Sreekanth Reddy1117b312014-09-12 15:35:30 +05302539{
2540 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
2541 return;
2542
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302543 switch (ioc->pdev->subsystem_vendor) {
2544 case PCI_VENDOR_ID_INTEL:
2545 switch (ioc->pdev->device) {
2546 case MPI2_MFGPAGE_DEVID_SAS2008:
2547 switch (ioc->pdev->subsystem_device) {
2548 case MPT2SAS_INTEL_RMS2LL080_SSDID:
2549 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2550 MPT2SAS_INTEL_RMS2LL080_BRANDING);
2551 break;
2552 case MPT2SAS_INTEL_RMS2LL040_SSDID:
2553 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2554 MPT2SAS_INTEL_RMS2LL040_BRANDING);
2555 break;
2556 case MPT2SAS_INTEL_SSD910_SSDID:
2557 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2558 MPT2SAS_INTEL_SSD910_BRANDING);
2559 break;
2560 default:
2561 pr_info(MPT3SAS_FMT
2562 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2563 ioc->name, ioc->pdev->subsystem_device);
2564 break;
2565 }
2566 case MPI2_MFGPAGE_DEVID_SAS2308_2:
2567 switch (ioc->pdev->subsystem_device) {
2568 case MPT2SAS_INTEL_RS25GB008_SSDID:
2569 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2570 MPT2SAS_INTEL_RS25GB008_BRANDING);
2571 break;
2572 case MPT2SAS_INTEL_RMS25JB080_SSDID:
2573 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2574 MPT2SAS_INTEL_RMS25JB080_BRANDING);
2575 break;
2576 case MPT2SAS_INTEL_RMS25JB040_SSDID:
2577 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2578 MPT2SAS_INTEL_RMS25JB040_BRANDING);
2579 break;
2580 case MPT2SAS_INTEL_RMS25KB080_SSDID:
2581 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2582 MPT2SAS_INTEL_RMS25KB080_BRANDING);
2583 break;
2584 case MPT2SAS_INTEL_RMS25KB040_SSDID:
2585 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2586 MPT2SAS_INTEL_RMS25KB040_BRANDING);
2587 break;
2588 case MPT2SAS_INTEL_RMS25LB040_SSDID:
2589 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2590 MPT2SAS_INTEL_RMS25LB040_BRANDING);
2591 break;
2592 case MPT2SAS_INTEL_RMS25LB080_SSDID:
2593 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2594 MPT2SAS_INTEL_RMS25LB080_BRANDING);
2595 break;
2596 default:
2597 pr_info(MPT3SAS_FMT
2598 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2599 ioc->name, ioc->pdev->subsystem_device);
2600 break;
2601 }
2602 case MPI25_MFGPAGE_DEVID_SAS3008:
2603 switch (ioc->pdev->subsystem_device) {
2604 case MPT3SAS_INTEL_RMS3JC080_SSDID:
2605 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2606 MPT3SAS_INTEL_RMS3JC080_BRANDING);
2607 break;
Sreekanth Reddy1117b312014-09-12 15:35:30 +05302608
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302609 case MPT3SAS_INTEL_RS3GC008_SSDID:
2610 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2611 MPT3SAS_INTEL_RS3GC008_BRANDING);
2612 break;
2613 case MPT3SAS_INTEL_RS3FC044_SSDID:
2614 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2615 MPT3SAS_INTEL_RS3FC044_BRANDING);
2616 break;
2617 case MPT3SAS_INTEL_RS3UC080_SSDID:
2618 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2619 MPT3SAS_INTEL_RS3UC080_BRANDING);
2620 break;
2621 default:
2622 pr_info(MPT3SAS_FMT
2623 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2624 ioc->name, ioc->pdev->subsystem_device);
2625 break;
2626 }
Sreekanth Reddy1117b312014-09-12 15:35:30 +05302627 break;
2628 default:
2629 pr_info(MPT3SAS_FMT
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302630 "Intel(R) Controller: Subsystem ID: 0x%X\n",
Sreekanth Reddyd8eb4a42015-06-30 12:25:02 +05302631 ioc->name, ioc->pdev->subsystem_device);
2632 break;
2633 }
2634 break;
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302635 case PCI_VENDOR_ID_DELL:
2636 switch (ioc->pdev->device) {
2637 case MPI2_MFGPAGE_DEVID_SAS2008:
2638 switch (ioc->pdev->subsystem_device) {
2639 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
2640 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2641 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
2642 break;
2643 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
2644 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2645 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
2646 break;
2647 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
2648 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2649 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
2650 break;
2651 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
2652 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2653 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
2654 break;
2655 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
2656 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2657 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
2658 break;
2659 case MPT2SAS_DELL_PERC_H200_SSDID:
2660 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2661 MPT2SAS_DELL_PERC_H200_BRANDING);
2662 break;
2663 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
2664 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2665 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
2666 break;
2667 default:
2668 pr_info(MPT3SAS_FMT
2669 "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
2670 ioc->name, ioc->pdev->subsystem_device);
2671 break;
2672 }
2673 break;
2674 case MPI25_MFGPAGE_DEVID_SAS3008:
2675 switch (ioc->pdev->subsystem_device) {
2676 case MPT3SAS_DELL_12G_HBA_SSDID:
2677 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2678 MPT3SAS_DELL_12G_HBA_BRANDING);
2679 break;
2680 default:
2681 pr_info(MPT3SAS_FMT
2682 "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
2683 ioc->name, ioc->pdev->subsystem_device);
2684 break;
2685 }
2686 break;
2687 default:
2688 pr_info(MPT3SAS_FMT
2689 "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
2690 ioc->pdev->subsystem_device);
2691 break;
2692 }
2693 break;
2694 case PCI_VENDOR_ID_CISCO:
2695 switch (ioc->pdev->device) {
2696 case MPI25_MFGPAGE_DEVID_SAS3008:
2697 switch (ioc->pdev->subsystem_device) {
2698 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
2699 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2700 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
2701 break;
2702 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
2703 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2704 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
2705 break;
2706 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2707 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2708 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
2709 break;
2710 default:
2711 pr_info(MPT3SAS_FMT
2712 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2713 ioc->name, ioc->pdev->subsystem_device);
2714 break;
2715 }
2716 break;
2717 case MPI25_MFGPAGE_DEVID_SAS3108_1:
2718 switch (ioc->pdev->subsystem_device) {
2719 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2720 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2721 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
2722 break;
2723 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
2724 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2725 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
2726 );
2727 break;
2728 default:
2729 pr_info(MPT3SAS_FMT
2730 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2731 ioc->name, ioc->pdev->subsystem_device);
2732 break;
2733 }
2734 break;
2735 default:
2736 pr_info(MPT3SAS_FMT
2737 "Cisco SAS HBA: Subsystem ID: 0x%X\n",
2738 ioc->name, ioc->pdev->subsystem_device);
2739 break;
2740 }
2741 break;
2742 case MPT2SAS_HP_3PAR_SSVID:
2743 switch (ioc->pdev->device) {
2744 case MPI2_MFGPAGE_DEVID_SAS2004:
2745 switch (ioc->pdev->subsystem_device) {
2746 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
2747 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2748 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
2749 break;
2750 default:
2751 pr_info(MPT3SAS_FMT
2752 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
2753 ioc->name, ioc->pdev->subsystem_device);
2754 break;
2755 }
2756 case MPI2_MFGPAGE_DEVID_SAS2308_2:
2757 switch (ioc->pdev->subsystem_device) {
2758 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
2759 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2760 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
2761 break;
2762 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
2763 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2764 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
2765 break;
2766 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
2767 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2768 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
2769 break;
2770 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
2771 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2772 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
2773 break;
2774 default:
2775 pr_info(MPT3SAS_FMT
2776 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
2777 ioc->name, ioc->pdev->subsystem_device);
2778 break;
2779 }
2780 default:
2781 pr_info(MPT3SAS_FMT
2782 "HP SAS HBA: Subsystem ID: 0x%X\n",
2783 ioc->name, ioc->pdev->subsystem_device);
2784 break;
2785 }
Sreekanth Reddy38e41412015-06-30 12:24:57 +05302786 default:
Sreekanth Reddy38e41412015-06-30 12:24:57 +05302787 break;
2788 }
2789}
Sreekanth Reddyfb84dfc2015-06-30 12:24:56 +05302790
2791/**
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302792 * _base_display_ioc_capabilities - Disply IOC's capabilities.
2793 * @ioc: per adapter object
2794 *
2795 * Return nothing.
2796 */
2797static void
2798_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
2799{
2800 int i = 0;
2801 char desc[16];
2802 u32 iounit_pg1_flags;
2803 u32 bios_version;
2804
2805 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2806 strncpy(desc, ioc->manu_pg0.ChipName, 16);
2807 pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
2808 "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2809 ioc->name, desc,
2810 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2811 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2812 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2813 ioc->facts.FWVersion.Word & 0x000000FF,
2814 ioc->pdev->revision,
2815 (bios_version & 0xFF000000) >> 24,
2816 (bios_version & 0x00FF0000) >> 16,
2817 (bios_version & 0x0000FF00) >> 8,
2818 bios_version & 0x000000FF);
2819
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302820 _base_display_OEMs_branding(ioc);
Sreekanth Reddy1117b312014-09-12 15:35:30 +05302821
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302822 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
2823
2824 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2825 pr_info("Initiator");
2826 i++;
2827 }
2828
2829 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2830 pr_info("%sTarget", i ? "," : "");
2831 i++;
2832 }
2833
2834 i = 0;
2835 pr_info("), ");
2836 pr_info("Capabilities=(");
2837
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05302838 if (!ioc->hide_ir_msg) {
2839 if (ioc->facts.IOCCapabilities &
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302840 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2841 pr_info("Raid");
2842 i++;
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05302843 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302844 }
2845
2846 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2847 pr_info("%sTLR", i ? "," : "");
2848 i++;
2849 }
2850
2851 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2852 pr_info("%sMulticast", i ? "," : "");
2853 i++;
2854 }
2855
2856 if (ioc->facts.IOCCapabilities &
2857 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2858 pr_info("%sBIDI Target", i ? "," : "");
2859 i++;
2860 }
2861
2862 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
2863 pr_info("%sEEDP", i ? "," : "");
2864 i++;
2865 }
2866
2867 if (ioc->facts.IOCCapabilities &
2868 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
2869 pr_info("%sSnapshot Buffer", i ? "," : "");
2870 i++;
2871 }
2872
2873 if (ioc->facts.IOCCapabilities &
2874 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
2875 pr_info("%sDiag Trace Buffer", i ? "," : "");
2876 i++;
2877 }
2878
2879 if (ioc->facts.IOCCapabilities &
2880 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
2881 pr_info("%sDiag Extended Buffer", i ? "," : "");
2882 i++;
2883 }
2884
2885 if (ioc->facts.IOCCapabilities &
2886 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
2887 pr_info("%sTask Set Full", i ? "," : "");
2888 i++;
2889 }
2890
2891 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2892 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
2893 pr_info("%sNCQ", i ? "," : "");
2894 i++;
2895 }
2896
2897 pr_info(")\n");
2898}
2899
2900/**
2901 * mpt3sas_base_update_missing_delay - change the missing delay timers
2902 * @ioc: per adapter object
2903 * @device_missing_delay: amount of time till device is reported missing
2904 * @io_missing_delay: interval IO is returned when there is a missing device
2905 *
2906 * Return nothing.
2907 *
2908 * Passed on the command line, this function will modify the device missing
2909 * delay, as well as the io missing delay. This should be called at driver
2910 * load time.
2911 */
2912void
2913mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
2914 u16 device_missing_delay, u8 io_missing_delay)
2915{
2916 u16 dmd, dmd_new, dmd_orignal;
2917 u8 io_missing_delay_original;
2918 u16 sz;
2919 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
2920 Mpi2ConfigReply_t mpi_reply;
2921 u8 num_phys = 0;
2922 u16 ioc_status;
2923
2924 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
2925 if (!num_phys)
2926 return;
2927
2928 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
2929 sizeof(Mpi2SasIOUnit1PhyData_t));
2930 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
2931 if (!sas_iounit_pg1) {
2932 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2933 ioc->name, __FILE__, __LINE__, __func__);
2934 goto out;
2935 }
2936 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
2937 sas_iounit_pg1, sz))) {
2938 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2939 ioc->name, __FILE__, __LINE__, __func__);
2940 goto out;
2941 }
2942 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2943 MPI2_IOCSTATUS_MASK;
2944 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2945 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2946 ioc->name, __FILE__, __LINE__, __func__);
2947 goto out;
2948 }
2949
2950 /* device missing delay */
2951 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
2952 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2953 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2954 else
2955 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2956 dmd_orignal = dmd;
2957 if (device_missing_delay > 0x7F) {
2958 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
2959 device_missing_delay;
2960 dmd = dmd / 16;
2961 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
2962 } else
2963 dmd = device_missing_delay;
2964 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
2965
2966 /* io missing delay */
2967 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
2968 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
2969
2970 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
2971 sz)) {
2972 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2973 dmd_new = (dmd &
2974 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2975 else
2976 dmd_new =
2977 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2978 pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
2979 ioc->name, dmd_orignal, dmd_new);
2980 pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
2981 ioc->name, io_missing_delay_original,
2982 io_missing_delay);
2983 ioc->device_missing_delay = dmd_new;
2984 ioc->io_missing_delay = io_missing_delay;
2985 }
2986
2987out:
2988 kfree(sas_iounit_pg1);
2989}
2990/**
2991 * _base_static_config_pages - static start of day config pages
2992 * @ioc: per adapter object
2993 *
2994 * Return nothing.
2995 */
2996static void
2997_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
2998{
2999 Mpi2ConfigReply_t mpi_reply;
3000 u32 iounit_pg1_flags;
3001
3002 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
3003 if (ioc->ir_firmware)
3004 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
3005 &ioc->manu_pg10);
3006
3007 /*
3008 * Ensure correct T10 PI operation if vendor left EEDPTagMode
3009 * flag unset in NVDATA.
3010 */
3011 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
3012 if (ioc->manu_pg11.EEDPTagMode == 0) {
3013 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
3014 ioc->name);
3015 ioc->manu_pg11.EEDPTagMode &= ~0x3;
3016 ioc->manu_pg11.EEDPTagMode |= 0x1;
3017 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
3018 &ioc->manu_pg11);
3019 }
3020
3021 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
3022 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
3023 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
3024 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
3025 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
Sreekanth Reddy2d8ce8c2015-01-12 11:38:56 +05303026 mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303027 _base_display_ioc_capabilities(ioc);
3028
3029 /*
3030 * Enable task_set_full handling in iounit_pg1 when the
3031 * facts capabilities indicate that its supported.
3032 */
3033 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
3034 if ((ioc->facts.IOCCapabilities &
3035 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
3036 iounit_pg1_flags &=
3037 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
3038 else
3039 iounit_pg1_flags |=
3040 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
3041 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
3042 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
Sreekanth Reddy2d8ce8c2015-01-12 11:38:56 +05303043
3044 if (ioc->iounit_pg8.NumSensors)
3045 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303046}
3047
3048/**
3049 * _base_release_memory_pools - release memory
3050 * @ioc: per adapter object
3051 *
3052 * Free memory allocated from _base_allocate_memory_pools.
3053 *
3054 * Return nothing.
3055 */
3056static void
3057_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3058{
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05303059 int i = 0;
3060 struct reply_post_struct *rps;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303061
3062 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3063 __func__));
3064
3065 if (ioc->request) {
3066 pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
3067 ioc->request, ioc->request_dma);
3068 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3069 "request_pool(0x%p): free\n",
3070 ioc->name, ioc->request));
3071 ioc->request = NULL;
3072 }
3073
3074 if (ioc->sense) {
3075 pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
3076 if (ioc->sense_dma_pool)
3077 pci_pool_destroy(ioc->sense_dma_pool);
3078 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3079 "sense_pool(0x%p): free\n",
3080 ioc->name, ioc->sense));
3081 ioc->sense = NULL;
3082 }
3083
3084 if (ioc->reply) {
3085 pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
3086 if (ioc->reply_dma_pool)
3087 pci_pool_destroy(ioc->reply_dma_pool);
3088 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3089 "reply_pool(0x%p): free\n",
3090 ioc->name, ioc->reply));
3091 ioc->reply = NULL;
3092 }
3093
3094 if (ioc->reply_free) {
3095 pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
3096 ioc->reply_free_dma);
3097 if (ioc->reply_free_dma_pool)
3098 pci_pool_destroy(ioc->reply_free_dma_pool);
3099 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3100 "reply_free_pool(0x%p): free\n",
3101 ioc->name, ioc->reply_free));
3102 ioc->reply_free = NULL;
3103 }
3104
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05303105 if (ioc->reply_post) {
3106 do {
3107 rps = &ioc->reply_post[i];
3108 if (rps->reply_post_free) {
3109 pci_pool_free(
3110 ioc->reply_post_free_dma_pool,
3111 rps->reply_post_free,
3112 rps->reply_post_free_dma);
3113 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3114 "reply_post_free_pool(0x%p): free\n",
3115 ioc->name, rps->reply_post_free));
3116 rps->reply_post_free = NULL;
3117 }
3118 } while (ioc->rdpq_array_enable &&
3119 (++i < ioc->reply_queue_count));
3120
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303121 if (ioc->reply_post_free_dma_pool)
3122 pci_pool_destroy(ioc->reply_post_free_dma_pool);
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05303123 kfree(ioc->reply_post);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303124 }
3125
3126 if (ioc->config_page) {
3127 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3128 "config_page(0x%p): free\n", ioc->name,
3129 ioc->config_page));
3130 pci_free_consistent(ioc->pdev, ioc->config_page_sz,
3131 ioc->config_page, ioc->config_page_dma);
3132 }
3133
3134 if (ioc->scsi_lookup) {
3135 free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
3136 ioc->scsi_lookup = NULL;
3137 }
3138 kfree(ioc->hpr_lookup);
3139 kfree(ioc->internal_lookup);
3140 if (ioc->chain_lookup) {
3141 for (i = 0; i < ioc->chain_depth; i++) {
3142 if (ioc->chain_lookup[i].chain_buffer)
3143 pci_pool_free(ioc->chain_dma_pool,
3144 ioc->chain_lookup[i].chain_buffer,
3145 ioc->chain_lookup[i].chain_buffer_dma);
3146 }
3147 if (ioc->chain_dma_pool)
3148 pci_pool_destroy(ioc->chain_dma_pool);
3149 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
3150 ioc->chain_lookup = NULL;
3151 }
3152}
3153
3154/**
3155 * _base_allocate_memory_pools - allocate start of day memory pools
3156 * @ioc: per adapter object
3157 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3158 *
3159 * Returns 0 success, anything else error
3160 */
3161static int
3162_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3163{
3164 struct mpt3sas_facts *facts;
3165 u16 max_sge_elements;
3166 u16 chains_needed_per_io;
3167 u32 sz, total_sz, reply_post_free_sz;
3168 u32 retry_sz;
3169 u16 max_request_credit;
3170 unsigned short sg_tablesize;
3171 u16 sge_size;
3172 int i;
3173
3174 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3175 __func__));
3176
3177
3178 retry_sz = 0;
3179 facts = &ioc->facts;
3180
3181 /* command line tunables for max sgl entries */
3182 if (max_sgl_entries != -1)
3183 sg_tablesize = max_sgl_entries;
Sreekanth Reddy471ef9d2015-11-11 17:30:24 +05303184 else {
3185 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
3186 sg_tablesize = MPT2SAS_SG_DEPTH;
3187 else
3188 sg_tablesize = MPT3SAS_SG_DEPTH;
3189 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303190
Sreekanth Reddy8a7e4c22015-11-11 17:30:18 +05303191 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
3192 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
3193 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
Sreekanth Reddyad666a02015-01-12 11:39:00 +05303194 sg_tablesize = min_t(unsigned short, sg_tablesize,
3195 SCSI_MAX_SG_CHAIN_SEGMENTS);
3196 pr_warn(MPT3SAS_FMT
3197 "sg_tablesize(%u) is bigger than kernel"
3198 " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name,
Sreekanth Reddy8a7e4c22015-11-11 17:30:18 +05303199 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
Sreekanth Reddyad666a02015-01-12 11:39:00 +05303200 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303201 ioc->shost->sg_tablesize = sg_tablesize;
3202
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05303203 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
3204 (facts->RequestCredit / 4));
3205 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
3206 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
3207 INTERNAL_SCSIIO_CMDS_COUNT)) {
3208 pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
3209 Credits, it has just %d number of credits\n",
3210 ioc->name, facts->RequestCredit);
3211 return -ENOMEM;
3212 }
3213 ioc->internal_depth = 10;
3214 }
3215
3216 ioc->hi_priority_depth = ioc->internal_depth - (5);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303217 /* command line tunables for max controller queue depth */
3218 if (max_queue_depth != -1 && max_queue_depth != 0) {
3219 max_request_credit = min_t(u16, max_queue_depth +
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05303220 ioc->internal_depth, facts->RequestCredit);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303221 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
3222 max_request_credit = MAX_HBA_QUEUE_DEPTH;
3223 } else
3224 max_request_credit = min_t(u16, facts->RequestCredit,
3225 MAX_HBA_QUEUE_DEPTH);
3226
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05303227 /* Firmware maintains additional facts->HighPriorityCredit number of
3228 * credits for HiPriprity Request messages, so hba queue depth will be
3229 * sum of max_request_credit and high priority queue depth.
3230 */
3231 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303232
3233 /* request frame size */
3234 ioc->request_sz = facts->IOCRequestFrameSize * 4;
3235
3236 /* reply frame size */
3237 ioc->reply_sz = facts->ReplyFrameSize * 4;
3238
3239 /* calculate the max scatter element size */
3240 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
3241
3242 retry_allocation:
3243 total_sz = 0;
3244 /* calculate number of sg elements left over in the 1st frame */
3245 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
3246 sizeof(Mpi2SGEIOUnion_t)) + sge_size);
3247 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
3248
3249 /* now do the same for a chain buffer */
3250 max_sge_elements = ioc->request_sz - sge_size;
3251 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
3252
3253 /*
3254 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
3255 */
3256 chains_needed_per_io = ((ioc->shost->sg_tablesize -
3257 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
3258 + 1;
3259 if (chains_needed_per_io > facts->MaxChainDepth) {
3260 chains_needed_per_io = facts->MaxChainDepth;
3261 ioc->shost->sg_tablesize = min_t(u16,
3262 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
3263 * chains_needed_per_io), ioc->shost->sg_tablesize);
3264 }
3265 ioc->chains_needed_per_io = chains_needed_per_io;
3266
3267 /* reply free queue sizing - taking into account for 64 FW events */
3268 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
3269
3270 /* calculate reply descriptor post queue depth */
3271 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
3272 ioc->reply_free_queue_depth + 1 ;
3273 /* align the reply post queue on the next 16 count boundary */
3274 if (ioc->reply_post_queue_depth % 16)
3275 ioc->reply_post_queue_depth += 16 -
3276 (ioc->reply_post_queue_depth % 16);
3277
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303278 if (ioc->reply_post_queue_depth >
3279 facts->MaxReplyDescriptorPostQueueDepth) {
3280 ioc->reply_post_queue_depth =
3281 facts->MaxReplyDescriptorPostQueueDepth -
3282 (facts->MaxReplyDescriptorPostQueueDepth % 16);
3283 ioc->hba_queue_depth =
3284 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
3285 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
3286 }
3287
3288 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
3289 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
3290 "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
3291 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
3292 ioc->chains_needed_per_io));
3293
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05303294 /* reply post queue, 16 byte align */
3295 reply_post_free_sz = ioc->reply_post_queue_depth *
3296 sizeof(Mpi2DefaultReplyDescriptor_t);
3297
3298 sz = reply_post_free_sz;
3299 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
3300 sz *= ioc->reply_queue_count;
3301
3302 ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
3303 (ioc->reply_queue_count):1,
3304 sizeof(struct reply_post_struct), GFP_KERNEL);
3305
3306 if (!ioc->reply_post) {
3307 pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
3308 ioc->name);
3309 goto out;
3310 }
3311 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
3312 ioc->pdev, sz, 16, 0);
3313 if (!ioc->reply_post_free_dma_pool) {
3314 pr_err(MPT3SAS_FMT
3315 "reply_post_free pool: pci_pool_create failed\n",
3316 ioc->name);
3317 goto out;
3318 }
3319 i = 0;
3320 do {
3321 ioc->reply_post[i].reply_post_free =
3322 pci_pool_alloc(ioc->reply_post_free_dma_pool,
3323 GFP_KERNEL,
3324 &ioc->reply_post[i].reply_post_free_dma);
3325 if (!ioc->reply_post[i].reply_post_free) {
3326 pr_err(MPT3SAS_FMT
3327 "reply_post_free pool: pci_pool_alloc failed\n",
3328 ioc->name);
3329 goto out;
3330 }
3331 memset(ioc->reply_post[i].reply_post_free, 0, sz);
3332 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3333 "reply post free pool (0x%p): depth(%d),"
3334 "element_size(%d), pool_size(%d kB)\n", ioc->name,
3335 ioc->reply_post[i].reply_post_free,
3336 ioc->reply_post_queue_depth, 8, sz/1024));
3337 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3338 "reply_post_free_dma = (0x%llx)\n", ioc->name,
3339 (unsigned long long)
3340 ioc->reply_post[i].reply_post_free_dma));
3341 total_sz += sz;
3342 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
3343
3344 if (ioc->dma_mask == 64) {
3345 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
3346 pr_warn(MPT3SAS_FMT
3347 "no suitable consistent DMA mask for %s\n",
3348 ioc->name, pci_name(ioc->pdev));
3349 goto out;
3350 }
3351 }
3352
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303353 ioc->scsiio_depth = ioc->hba_queue_depth -
3354 ioc->hi_priority_depth - ioc->internal_depth;
3355
3356 /* set the scsi host can_queue depth
3357 * with some internal commands that could be outstanding
3358 */
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05303359 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303360 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3361 "scsi host: can_queue depth (%d)\n",
3362 ioc->name, ioc->shost->can_queue));
3363
3364
3365 /* contiguous pool for request and chains, 16 byte align, one extra "
3366 * "frame for smid=0
3367 */
3368 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
3369 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
3370
3371 /* hi-priority queue */
3372 sz += (ioc->hi_priority_depth * ioc->request_sz);
3373
3374 /* internal queue */
3375 sz += (ioc->internal_depth * ioc->request_sz);
3376
3377 ioc->request_dma_sz = sz;
3378 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
3379 if (!ioc->request) {
3380 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
3381 "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
3382 "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
3383 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
3384 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
3385 goto out;
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05303386 retry_sz = 64;
3387 ioc->hba_queue_depth -= retry_sz;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303388 goto retry_allocation;
3389 }
3390
3391 if (retry_sz)
3392 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
3393 "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
3394 "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
3395 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
3396
3397 /* hi-priority queue */
3398 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
3399 ioc->request_sz);
3400 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
3401 ioc->request_sz);
3402
3403 /* internal queue */
3404 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
3405 ioc->request_sz);
3406 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
3407 ioc->request_sz);
3408
3409 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3410 "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
3411 ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
3412 (ioc->hba_queue_depth * ioc->request_sz)/1024));
3413
3414 dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
3415 ioc->name, (unsigned long long) ioc->request_dma));
3416 total_sz += sz;
3417
3418 sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
3419 ioc->scsi_lookup_pages = get_order(sz);
3420 ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
3421 GFP_KERNEL, ioc->scsi_lookup_pages);
3422 if (!ioc->scsi_lookup) {
3423 pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
3424 ioc->name, (int)sz);
3425 goto out;
3426 }
3427
3428 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
3429 ioc->name, ioc->request, ioc->scsiio_depth));
3430
3431 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
3432 sz = ioc->chain_depth * sizeof(struct chain_tracker);
3433 ioc->chain_pages = get_order(sz);
3434 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
3435 GFP_KERNEL, ioc->chain_pages);
3436 if (!ioc->chain_lookup) {
3437 pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
3438 ioc->name);
3439 goto out;
3440 }
3441 ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
3442 ioc->request_sz, 16, 0);
3443 if (!ioc->chain_dma_pool) {
3444 pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
3445 ioc->name);
3446 goto out;
3447 }
3448 for (i = 0; i < ioc->chain_depth; i++) {
3449 ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
3450 ioc->chain_dma_pool , GFP_KERNEL,
3451 &ioc->chain_lookup[i].chain_buffer_dma);
3452 if (!ioc->chain_lookup[i].chain_buffer) {
3453 ioc->chain_depth = i;
3454 goto chain_done;
3455 }
3456 total_sz += ioc->request_sz;
3457 }
3458 chain_done:
3459 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3460 "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
3461 ioc->name, ioc->chain_depth, ioc->request_sz,
3462 ((ioc->chain_depth * ioc->request_sz))/1024));
3463
3464 /* initialize hi-priority queue smid's */
3465 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
3466 sizeof(struct request_tracker), GFP_KERNEL);
3467 if (!ioc->hpr_lookup) {
3468 pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
3469 ioc->name);
3470 goto out;
3471 }
3472 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
3473 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3474 "hi_priority(0x%p): depth(%d), start smid(%d)\n",
3475 ioc->name, ioc->hi_priority,
3476 ioc->hi_priority_depth, ioc->hi_priority_smid));
3477
3478 /* initialize internal queue smid's */
3479 ioc->internal_lookup = kcalloc(ioc->internal_depth,
3480 sizeof(struct request_tracker), GFP_KERNEL);
3481 if (!ioc->internal_lookup) {
3482 pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
3483 ioc->name);
3484 goto out;
3485 }
3486 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
3487 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3488 "internal(0x%p): depth(%d), start smid(%d)\n",
3489 ioc->name, ioc->internal,
3490 ioc->internal_depth, ioc->internal_smid));
3491
3492 /* sense buffers, 4 byte align */
3493 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
3494 ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
3495 0);
3496 if (!ioc->sense_dma_pool) {
3497 pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
3498 ioc->name);
3499 goto out;
3500 }
3501 ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
3502 &ioc->sense_dma);
3503 if (!ioc->sense) {
3504 pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
3505 ioc->name);
3506 goto out;
3507 }
3508 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3509 "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
3510 "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
3511 SCSI_SENSE_BUFFERSIZE, sz/1024));
3512 dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
3513 ioc->name, (unsigned long long)ioc->sense_dma));
3514 total_sz += sz;
3515
3516 /* reply pool, 4 byte align */
3517 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
3518 ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
3519 0);
3520 if (!ioc->reply_dma_pool) {
3521 pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
3522 ioc->name);
3523 goto out;
3524 }
3525 ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
3526 &ioc->reply_dma);
3527 if (!ioc->reply) {
3528 pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
3529 ioc->name);
3530 goto out;
3531 }
3532 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
3533 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
3534 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3535 "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
3536 ioc->name, ioc->reply,
3537 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
3538 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
3539 ioc->name, (unsigned long long)ioc->reply_dma));
3540 total_sz += sz;
3541
3542 /* reply free queue, 16 byte align */
3543 sz = ioc->reply_free_queue_depth * 4;
3544 ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
3545 ioc->pdev, sz, 16, 0);
3546 if (!ioc->reply_free_dma_pool) {
3547 pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
3548 ioc->name);
3549 goto out;
3550 }
3551 ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
3552 &ioc->reply_free_dma);
3553 if (!ioc->reply_free) {
3554 pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
3555 ioc->name);
3556 goto out;
3557 }
3558 memset(ioc->reply_free, 0, sz);
3559 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
3560 "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
3561 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
3562 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3563 "reply_free_dma (0x%llx)\n",
3564 ioc->name, (unsigned long long)ioc->reply_free_dma));
3565 total_sz += sz;
3566
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303567 ioc->config_page_sz = 512;
3568 ioc->config_page = pci_alloc_consistent(ioc->pdev,
3569 ioc->config_page_sz, &ioc->config_page_dma);
3570 if (!ioc->config_page) {
3571 pr_err(MPT3SAS_FMT
3572 "config page: pci_pool_alloc failed\n",
3573 ioc->name);
3574 goto out;
3575 }
3576 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3577 "config page(0x%p): size(%d)\n",
3578 ioc->name, ioc->config_page, ioc->config_page_sz));
3579 dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
3580 ioc->name, (unsigned long long)ioc->config_page_dma));
3581 total_sz += ioc->config_page_sz;
3582
3583 pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
3584 ioc->name, total_sz/1024);
3585 pr_info(MPT3SAS_FMT
3586 "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
3587 ioc->name, ioc->shost->can_queue, facts->RequestCredit);
3588 pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
3589 ioc->name, ioc->shost->sg_tablesize);
3590 return 0;
3591
3592 out:
3593 return -ENOMEM;
3594}
3595
3596/**
3597 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
3598 * @ioc: Pointer to MPT_ADAPTER structure
3599 * @cooked: Request raw or cooked IOC state
3600 *
3601 * Returns all IOC Doorbell register bits if cooked==0, else just the
3602 * Doorbell bits in MPI_IOC_STATE_MASK.
3603 */
3604u32
3605mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
3606{
3607 u32 s, sc;
3608
3609 s = readl(&ioc->chip->Doorbell);
3610 sc = s & MPI2_IOC_STATE_MASK;
3611 return cooked ? sc : s;
3612}
3613
3614/**
3615 * _base_wait_on_iocstate - waiting on a particular ioc state
3616 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
3617 * @timeout: timeout in second
3618 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3619 *
3620 * Returns 0 for success, non-zero for failure.
3621 */
3622static int
3623_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
3624 int sleep_flag)
3625{
3626 u32 count, cntdn;
3627 u32 current_state;
3628
3629 count = 0;
3630 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3631 do {
3632 current_state = mpt3sas_base_get_iocstate(ioc, 1);
3633 if (current_state == ioc_state)
3634 return 0;
3635 if (count && current_state == MPI2_IOC_STATE_FAULT)
3636 break;
3637 if (sleep_flag == CAN_SLEEP)
3638 usleep_range(1000, 1500);
3639 else
3640 udelay(500);
3641 count++;
3642 } while (--cntdn);
3643
3644 return current_state;
3645}
3646
3647/**
3648 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
3649 * a write to the doorbell)
3650 * @ioc: per adapter object
3651 * @timeout: timeout in second
3652 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3653 *
3654 * Returns 0 for success, non-zero for failure.
3655 *
3656 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
3657 */
3658static int
Sreekanth Reddy4dc8c802015-06-30 12:24:48 +05303659_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
3660
3661static int
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303662_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
3663 int sleep_flag)
3664{
3665 u32 cntdn, count;
3666 u32 int_status;
3667
3668 count = 0;
3669 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3670 do {
3671 int_status = readl(&ioc->chip->HostInterruptStatus);
3672 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3673 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3674 "%s: successful count(%d), timeout(%d)\n",
3675 ioc->name, __func__, count, timeout));
3676 return 0;
3677 }
3678 if (sleep_flag == CAN_SLEEP)
3679 usleep_range(1000, 1500);
3680 else
3681 udelay(500);
3682 count++;
3683 } while (--cntdn);
3684
3685 pr_err(MPT3SAS_FMT
3686 "%s: failed due to timeout count(%d), int_status(%x)!\n",
3687 ioc->name, __func__, count, int_status);
3688 return -EFAULT;
3689}
3690
3691/**
3692 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
3693 * @ioc: per adapter object
3694 * @timeout: timeout in second
3695 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3696 *
3697 * Returns 0 for success, non-zero for failure.
3698 *
3699 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
3700 * doorbell.
3701 */
3702static int
3703_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
3704 int sleep_flag)
3705{
3706 u32 cntdn, count;
3707 u32 int_status;
3708 u32 doorbell;
3709
3710 count = 0;
3711 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3712 do {
3713 int_status = readl(&ioc->chip->HostInterruptStatus);
3714 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
3715 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3716 "%s: successful count(%d), timeout(%d)\n",
3717 ioc->name, __func__, count, timeout));
3718 return 0;
3719 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3720 doorbell = readl(&ioc->chip->Doorbell);
3721 if ((doorbell & MPI2_IOC_STATE_MASK) ==
3722 MPI2_IOC_STATE_FAULT) {
3723 mpt3sas_base_fault_info(ioc , doorbell);
3724 return -EFAULT;
3725 }
3726 } else if (int_status == 0xFFFFFFFF)
3727 goto out;
3728
3729 if (sleep_flag == CAN_SLEEP)
3730 usleep_range(1000, 1500);
3731 else
3732 udelay(500);
3733 count++;
3734 } while (--cntdn);
3735
3736 out:
3737 pr_err(MPT3SAS_FMT
3738 "%s: failed due to timeout count(%d), int_status(%x)!\n",
3739 ioc->name, __func__, count, int_status);
3740 return -EFAULT;
3741}
3742
3743/**
3744 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
3745 * @ioc: per adapter object
3746 * @timeout: timeout in second
3747 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3748 *
3749 * Returns 0 for success, non-zero for failure.
3750 *
3751 */
3752static int
3753_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
3754 int sleep_flag)
3755{
3756 u32 cntdn, count;
3757 u32 doorbell_reg;
3758
3759 count = 0;
3760 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3761 do {
3762 doorbell_reg = readl(&ioc->chip->Doorbell);
3763 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
3764 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3765 "%s: successful count(%d), timeout(%d)\n",
3766 ioc->name, __func__, count, timeout));
3767 return 0;
3768 }
3769 if (sleep_flag == CAN_SLEEP)
3770 usleep_range(1000, 1500);
3771 else
3772 udelay(500);
3773 count++;
3774 } while (--cntdn);
3775
3776 pr_err(MPT3SAS_FMT
3777 "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
3778 ioc->name, __func__, count, doorbell_reg);
3779 return -EFAULT;
3780}
3781
3782/**
3783 * _base_send_ioc_reset - send doorbell reset
3784 * @ioc: per adapter object
3785 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
3786 * @timeout: timeout in second
3787 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3788 *
3789 * Returns 0 for success, non-zero for failure.
3790 */
3791static int
3792_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
3793 int sleep_flag)
3794{
3795 u32 ioc_state;
3796 int r = 0;
3797
3798 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
3799 pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
3800 ioc->name, __func__);
3801 return -EFAULT;
3802 }
3803
3804 if (!(ioc->facts.IOCCapabilities &
3805 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
3806 return -EFAULT;
3807
3808 pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
3809
3810 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
3811 &ioc->chip->Doorbell);
3812 if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
3813 r = -EFAULT;
3814 goto out;
3815 }
3816 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
3817 timeout, sleep_flag);
3818 if (ioc_state) {
3819 pr_err(MPT3SAS_FMT
3820 "%s: failed going to ready state (ioc_state=0x%x)\n",
3821 ioc->name, __func__, ioc_state);
3822 r = -EFAULT;
3823 goto out;
3824 }
3825 out:
3826 pr_info(MPT3SAS_FMT "message unit reset: %s\n",
3827 ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3828 return r;
3829}
3830
3831/**
3832 * _base_handshake_req_reply_wait - send request thru doorbell interface
3833 * @ioc: per adapter object
3834 * @request_bytes: request length
3835 * @request: pointer having request payload
3836 * @reply_bytes: reply length
3837 * @reply: pointer to reply payload
3838 * @timeout: timeout in second
3839 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3840 *
3841 * Returns 0 for success, non-zero for failure.
3842 */
3843static int
3844_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3845 u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
3846{
3847 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3848 int i;
3849 u8 failed;
3850 u16 dummy;
3851 __le32 *mfp;
3852
3853 /* make sure doorbell is not in use */
3854 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
3855 pr_err(MPT3SAS_FMT
3856 "doorbell is in use (line=%d)\n",
3857 ioc->name, __LINE__);
3858 return -EFAULT;
3859 }
3860
3861 /* clear pending doorbell interrupts from previous state changes */
3862 if (readl(&ioc->chip->HostInterruptStatus) &
3863 MPI2_HIS_IOC2SYS_DB_STATUS)
3864 writel(0, &ioc->chip->HostInterruptStatus);
3865
3866 /* send message to ioc */
3867 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
3868 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3869 &ioc->chip->Doorbell);
3870
3871 if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
3872 pr_err(MPT3SAS_FMT
3873 "doorbell handshake int failed (line=%d)\n",
3874 ioc->name, __LINE__);
3875 return -EFAULT;
3876 }
3877 writel(0, &ioc->chip->HostInterruptStatus);
3878
3879 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
3880 pr_err(MPT3SAS_FMT
3881 "doorbell handshake ack failed (line=%d)\n",
3882 ioc->name, __LINE__);
3883 return -EFAULT;
3884 }
3885
3886 /* send message 32-bits at a time */
3887 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3888 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3889 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
3890 failed = 1;
3891 }
3892
3893 if (failed) {
3894 pr_err(MPT3SAS_FMT
3895 "doorbell handshake sending request failed (line=%d)\n",
3896 ioc->name, __LINE__);
3897 return -EFAULT;
3898 }
3899
3900 /* now wait for the reply */
3901 if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
3902 pr_err(MPT3SAS_FMT
3903 "doorbell handshake int failed (line=%d)\n",
3904 ioc->name, __LINE__);
3905 return -EFAULT;
3906 }
3907
3908 /* read the first two 16-bits, it gives the total length of the reply */
3909 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3910 & MPI2_DOORBELL_DATA_MASK);
3911 writel(0, &ioc->chip->HostInterruptStatus);
3912 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3913 pr_err(MPT3SAS_FMT
3914 "doorbell handshake int failed (line=%d)\n",
3915 ioc->name, __LINE__);
3916 return -EFAULT;
3917 }
3918 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3919 & MPI2_DOORBELL_DATA_MASK);
3920 writel(0, &ioc->chip->HostInterruptStatus);
3921
3922 for (i = 2; i < default_reply->MsgLength * 2; i++) {
3923 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3924 pr_err(MPT3SAS_FMT
3925 "doorbell handshake int failed (line=%d)\n",
3926 ioc->name, __LINE__);
3927 return -EFAULT;
3928 }
3929 if (i >= reply_bytes/2) /* overflow case */
3930 dummy = readl(&ioc->chip->Doorbell);
3931 else
3932 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3933 & MPI2_DOORBELL_DATA_MASK);
3934 writel(0, &ioc->chip->HostInterruptStatus);
3935 }
3936
3937 _base_wait_for_doorbell_int(ioc, 5, sleep_flag);
3938 if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
3939 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3940 "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
3941 }
3942 writel(0, &ioc->chip->HostInterruptStatus);
3943
3944 if (ioc->logging_level & MPT_DEBUG_INIT) {
3945 mfp = (__le32 *)reply;
3946 pr_info("\toffset:data\n");
3947 for (i = 0; i < reply_bytes/4; i++)
3948 pr_info("\t[0x%02x]:%08x\n", i*4,
3949 le32_to_cpu(mfp[i]));
3950 }
3951 return 0;
3952}
3953
3954/**
3955 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
3956 * @ioc: per adapter object
3957 * @mpi_reply: the reply payload from FW
3958 * @mpi_request: the request payload sent to FW
3959 *
3960 * The SAS IO Unit Control Request message allows the host to perform low-level
3961 * operations, such as resets on the PHYs of the IO Unit, also allows the host
3962 * to obtain the IOC assigned device handles for a device if it has other
3963 * identifying information about the device, in addition allows the host to
3964 * remove IOC resources associated with the device.
3965 *
3966 * Returns 0 for success, non-zero for failure.
3967 */
3968int
3969mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
3970 Mpi2SasIoUnitControlReply_t *mpi_reply,
3971 Mpi2SasIoUnitControlRequest_t *mpi_request)
3972{
3973 u16 smid;
3974 u32 ioc_state;
3975 unsigned long timeleft;
Dan Carpentereb445522014-12-04 13:57:05 +03003976 bool issue_reset = false;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303977 int rc;
3978 void *request;
3979 u16 wait_state_count;
3980
3981 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3982 __func__));
3983
3984 mutex_lock(&ioc->base_cmds.mutex);
3985
3986 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
3987 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
3988 ioc->name, __func__);
3989 rc = -EAGAIN;
3990 goto out;
3991 }
3992
3993 wait_state_count = 0;
3994 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3995 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3996 if (wait_state_count++ == 10) {
3997 pr_err(MPT3SAS_FMT
3998 "%s: failed due to ioc not operational\n",
3999 ioc->name, __func__);
4000 rc = -EFAULT;
4001 goto out;
4002 }
4003 ssleep(1);
4004 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4005 pr_info(MPT3SAS_FMT
4006 "%s: waiting for operational state(count=%d)\n",
4007 ioc->name, __func__, wait_state_count);
4008 }
4009
4010 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4011 if (!smid) {
4012 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4013 ioc->name, __func__);
4014 rc = -EAGAIN;
4015 goto out;
4016 }
4017
4018 rc = 0;
4019 ioc->base_cmds.status = MPT3_CMD_PENDING;
4020 request = mpt3sas_base_get_msg_frame(ioc, smid);
4021 ioc->base_cmds.smid = smid;
4022 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
4023 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
4024 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
4025 ioc->ioc_link_reset_in_progress = 1;
4026 init_completion(&ioc->base_cmds.done);
4027 mpt3sas_base_put_smid_default(ioc, smid);
4028 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
4029 msecs_to_jiffies(10000));
4030 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
4031 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
4032 ioc->ioc_link_reset_in_progress)
4033 ioc->ioc_link_reset_in_progress = 0;
4034 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4035 pr_err(MPT3SAS_FMT "%s: timeout\n",
4036 ioc->name, __func__);
4037 _debug_dump_mf(mpi_request,
4038 sizeof(Mpi2SasIoUnitControlRequest_t)/4);
4039 if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
Dan Carpentereb445522014-12-04 13:57:05 +03004040 issue_reset = true;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304041 goto issue_host_reset;
4042 }
4043 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
4044 memcpy(mpi_reply, ioc->base_cmds.reply,
4045 sizeof(Mpi2SasIoUnitControlReply_t));
4046 else
4047 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
4048 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4049 goto out;
4050
4051 issue_host_reset:
4052 if (issue_reset)
4053 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
4054 FORCE_BIG_HAMMER);
4055 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4056 rc = -EFAULT;
4057 out:
4058 mutex_unlock(&ioc->base_cmds.mutex);
4059 return rc;
4060}
4061
4062/**
4063 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
4064 * @ioc: per adapter object
4065 * @mpi_reply: the reply payload from FW
4066 * @mpi_request: the request payload sent to FW
4067 *
4068 * The SCSI Enclosure Processor request message causes the IOC to
4069 * communicate with SES devices to control LED status signals.
4070 *
4071 * Returns 0 for success, non-zero for failure.
4072 */
4073int
4074mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
4075 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
4076{
4077 u16 smid;
4078 u32 ioc_state;
4079 unsigned long timeleft;
Dan Carpentereb445522014-12-04 13:57:05 +03004080 bool issue_reset = false;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304081 int rc;
4082 void *request;
4083 u16 wait_state_count;
4084
4085 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4086 __func__));
4087
4088 mutex_lock(&ioc->base_cmds.mutex);
4089
4090 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
4091 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
4092 ioc->name, __func__);
4093 rc = -EAGAIN;
4094 goto out;
4095 }
4096
4097 wait_state_count = 0;
4098 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4099 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4100 if (wait_state_count++ == 10) {
4101 pr_err(MPT3SAS_FMT
4102 "%s: failed due to ioc not operational\n",
4103 ioc->name, __func__);
4104 rc = -EFAULT;
4105 goto out;
4106 }
4107 ssleep(1);
4108 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4109 pr_info(MPT3SAS_FMT
4110 "%s: waiting for operational state(count=%d)\n",
4111 ioc->name,
4112 __func__, wait_state_count);
4113 }
4114
4115 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4116 if (!smid) {
4117 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4118 ioc->name, __func__);
4119 rc = -EAGAIN;
4120 goto out;
4121 }
4122
4123 rc = 0;
4124 ioc->base_cmds.status = MPT3_CMD_PENDING;
4125 request = mpt3sas_base_get_msg_frame(ioc, smid);
4126 ioc->base_cmds.smid = smid;
4127 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
4128 init_completion(&ioc->base_cmds.done);
4129 mpt3sas_base_put_smid_default(ioc, smid);
4130 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
4131 msecs_to_jiffies(10000));
4132 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4133 pr_err(MPT3SAS_FMT "%s: timeout\n",
4134 ioc->name, __func__);
4135 _debug_dump_mf(mpi_request,
4136 sizeof(Mpi2SepRequest_t)/4);
4137 if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
Dan Carpentereb445522014-12-04 13:57:05 +03004138 issue_reset = false;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304139 goto issue_host_reset;
4140 }
4141 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
4142 memcpy(mpi_reply, ioc->base_cmds.reply,
4143 sizeof(Mpi2SepReply_t));
4144 else
4145 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
4146 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4147 goto out;
4148
4149 issue_host_reset:
4150 if (issue_reset)
4151 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
4152 FORCE_BIG_HAMMER);
4153 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4154 rc = -EFAULT;
4155 out:
4156 mutex_unlock(&ioc->base_cmds.mutex);
4157 return rc;
4158}
4159
4160/**
4161 * _base_get_port_facts - obtain port facts reply and save in ioc
4162 * @ioc: per adapter object
4163 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4164 *
4165 * Returns 0 for success, non-zero for failure.
4166 */
4167static int
4168_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
4169{
4170 Mpi2PortFactsRequest_t mpi_request;
4171 Mpi2PortFactsReply_t mpi_reply;
4172 struct mpt3sas_port_facts *pfacts;
4173 int mpi_reply_sz, mpi_request_sz, r;
4174
4175 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4176 __func__));
4177
4178 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
4179 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
4180 memset(&mpi_request, 0, mpi_request_sz);
4181 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
4182 mpi_request.PortNumber = port;
4183 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
4184 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
4185
4186 if (r != 0) {
4187 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4188 ioc->name, __func__, r);
4189 return r;
4190 }
4191
4192 pfacts = &ioc->pfacts[port];
4193 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
4194 pfacts->PortNumber = mpi_reply.PortNumber;
4195 pfacts->VP_ID = mpi_reply.VP_ID;
4196 pfacts->VF_ID = mpi_reply.VF_ID;
4197 pfacts->MaxPostedCmdBuffers =
4198 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
4199
4200 return 0;
4201}
4202
4203/**
Sreekanth Reddy4dc8c802015-06-30 12:24:48 +05304204 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
4205 * @ioc: per adapter object
4206 * @timeout:
4207 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4208 *
4209 * Returns 0 for success, non-zero for failure.
4210 */
4211static int
4212_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
4213 int sleep_flag)
4214{
4215 u32 ioc_state;
4216 int rc;
4217
4218 dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
4219 __func__));
4220
4221 if (ioc->pci_error_recovery) {
4222 dfailprintk(ioc, printk(MPT3SAS_FMT
4223 "%s: host in pci error recovery\n", ioc->name, __func__));
4224 return -EFAULT;
4225 }
4226
4227 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4228 dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4229 ioc->name, __func__, ioc_state));
4230
4231 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
4232 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4233 return 0;
4234
4235 if (ioc_state & MPI2_DOORBELL_USED) {
4236 dhsprintk(ioc, printk(MPT3SAS_FMT
4237 "unexpected doorbell active!\n", ioc->name));
4238 goto issue_diag_reset;
4239 }
4240
4241 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4242 mpt3sas_base_fault_info(ioc, ioc_state &
4243 MPI2_DOORBELL_DATA_MASK);
4244 goto issue_diag_reset;
4245 }
4246
4247 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
4248 timeout, sleep_flag);
4249 if (ioc_state) {
4250 dfailprintk(ioc, printk(MPT3SAS_FMT
4251 "%s: failed going to ready state (ioc_state=0x%x)\n",
4252 ioc->name, __func__, ioc_state));
4253 return -EFAULT;
4254 }
4255
4256 issue_diag_reset:
4257 rc = _base_diag_reset(ioc, sleep_flag);
4258 return rc;
4259}
4260
4261/**
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304262 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
4263 * @ioc: per adapter object
4264 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4265 *
4266 * Returns 0 for success, non-zero for failure.
4267 */
4268static int
4269_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4270{
4271 Mpi2IOCFactsRequest_t mpi_request;
4272 Mpi2IOCFactsReply_t mpi_reply;
4273 struct mpt3sas_facts *facts;
4274 int mpi_reply_sz, mpi_request_sz, r;
4275
4276 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4277 __func__));
4278
Sreekanth Reddy4dc8c802015-06-30 12:24:48 +05304279 r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
4280 if (r) {
4281 dfailprintk(ioc, printk(MPT3SAS_FMT
4282 "%s: failed getting to correct state\n",
4283 ioc->name, __func__));
4284 return r;
4285 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304286 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
4287 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
4288 memset(&mpi_request, 0, mpi_request_sz);
4289 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
4290 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
4291 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
4292
4293 if (r != 0) {
4294 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4295 ioc->name, __func__, r);
4296 return r;
4297 }
4298
4299 facts = &ioc->facts;
4300 memset(facts, 0, sizeof(struct mpt3sas_facts));
4301 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
4302 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
4303 facts->VP_ID = mpi_reply.VP_ID;
4304 facts->VF_ID = mpi_reply.VF_ID;
4305 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
4306 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
4307 facts->WhoInit = mpi_reply.WhoInit;
4308 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
4309 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
4310 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
4311 facts->MaxReplyDescriptorPostQueueDepth =
4312 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
4313 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
4314 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
4315 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
4316 ioc->ir_firmware = 1;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05304317 if ((facts->IOCCapabilities &
4318 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
4319 ioc->rdpq_array_capable = 1;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304320 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
4321 facts->IOCRequestFrameSize =
4322 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
4323 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
4324 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
4325 ioc->shost->max_id = -1;
4326 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
4327 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
4328 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
4329 facts->HighPriorityCredit =
4330 le16_to_cpu(mpi_reply.HighPriorityCredit);
4331 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
4332 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
4333
4334 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4335 "hba queue depth(%d), max chains per io(%d)\n",
4336 ioc->name, facts->RequestCredit,
4337 facts->MaxChainDepth));
4338 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4339 "request frame size(%d), reply frame size(%d)\n", ioc->name,
4340 facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
4341 return 0;
4342}
4343
4344/**
4345 * _base_send_ioc_init - send ioc_init to firmware
4346 * @ioc: per adapter object
4347 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4348 *
4349 * Returns 0 for success, non-zero for failure.
4350 */
4351static int
4352_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4353{
4354 Mpi2IOCInitRequest_t mpi_request;
4355 Mpi2IOCInitReply_t mpi_reply;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05304356 int i, r = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304357 struct timeval current_time;
4358 u16 ioc_status;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05304359 u32 reply_post_free_array_sz = 0;
4360 Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
4361 dma_addr_t reply_post_free_array_dma;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304362
4363 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4364 __func__));
4365
4366 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
4367 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
4368 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
4369 mpi_request.VF_ID = 0; /* TODO */
4370 mpi_request.VP_ID = 0;
Sreekanth Reddyd357e842015-11-11 17:30:22 +05304371 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304372 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
4373
4374 if (_base_is_controller_msix_enabled(ioc))
4375 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
4376 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
4377 mpi_request.ReplyDescriptorPostQueueDepth =
4378 cpu_to_le16(ioc->reply_post_queue_depth);
4379 mpi_request.ReplyFreeQueueDepth =
4380 cpu_to_le16(ioc->reply_free_queue_depth);
4381
4382 mpi_request.SenseBufferAddressHigh =
4383 cpu_to_le32((u64)ioc->sense_dma >> 32);
4384 mpi_request.SystemReplyAddressHigh =
4385 cpu_to_le32((u64)ioc->reply_dma >> 32);
4386 mpi_request.SystemRequestFrameBaseAddress =
4387 cpu_to_le64((u64)ioc->request_dma);
4388 mpi_request.ReplyFreeQueueAddress =
4389 cpu_to_le64((u64)ioc->reply_free_dma);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304390
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05304391 if (ioc->rdpq_array_enable) {
4392 reply_post_free_array_sz = ioc->reply_queue_count *
4393 sizeof(Mpi2IOCInitRDPQArrayEntry);
4394 reply_post_free_array = pci_alloc_consistent(ioc->pdev,
4395 reply_post_free_array_sz, &reply_post_free_array_dma);
4396 if (!reply_post_free_array) {
4397 pr_err(MPT3SAS_FMT
4398 "reply_post_free_array: pci_alloc_consistent failed\n",
4399 ioc->name);
4400 r = -ENOMEM;
4401 goto out;
4402 }
4403 memset(reply_post_free_array, 0, reply_post_free_array_sz);
4404 for (i = 0; i < ioc->reply_queue_count; i++)
4405 reply_post_free_array[i].RDPQBaseAddress =
4406 cpu_to_le64(
4407 (u64)ioc->reply_post[i].reply_post_free_dma);
4408 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
4409 mpi_request.ReplyDescriptorPostQueueAddress =
4410 cpu_to_le64((u64)reply_post_free_array_dma);
4411 } else {
4412 mpi_request.ReplyDescriptorPostQueueAddress =
4413 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
4414 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304415
4416 /* This time stamp specifies number of milliseconds
4417 * since epoch ~ midnight January 1, 1970.
4418 */
4419 do_gettimeofday(&current_time);
4420 mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
4421 (current_time.tv_usec / 1000));
4422
4423 if (ioc->logging_level & MPT_DEBUG_INIT) {
4424 __le32 *mfp;
4425 int i;
4426
4427 mfp = (__le32 *)&mpi_request;
4428 pr_info("\toffset:data\n");
4429 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
4430 pr_info("\t[0x%02x]:%08x\n", i*4,
4431 le32_to_cpu(mfp[i]));
4432 }
4433
4434 r = _base_handshake_req_reply_wait(ioc,
4435 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
4436 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
4437 sleep_flag);
4438
4439 if (r != 0) {
4440 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4441 ioc->name, __func__, r);
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05304442 goto out;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304443 }
4444
4445 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
4446 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
4447 mpi_reply.IOCLogInfo) {
4448 pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
4449 r = -EIO;
4450 }
4451
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05304452out:
4453 if (reply_post_free_array)
4454 pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
4455 reply_post_free_array,
4456 reply_post_free_array_dma);
4457 return r;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304458}
4459
4460/**
4461 * mpt3sas_port_enable_done - command completion routine for port enable
4462 * @ioc: per adapter object
4463 * @smid: system request message index
4464 * @msix_index: MSIX table index supplied by the OS
4465 * @reply: reply message frame(lower 32bit addr)
4466 *
4467 * Return 1 meaning mf should be freed from _base_interrupt
4468 * 0 means the mf is freed from this function.
4469 */
4470u8
4471mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4472 u32 reply)
4473{
4474 MPI2DefaultReply_t *mpi_reply;
4475 u16 ioc_status;
4476
4477 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
4478 return 1;
4479
4480 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
4481 if (!mpi_reply)
4482 return 1;
4483
4484 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
4485 return 1;
4486
4487 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
4488 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
4489 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
4490 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
4491 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
4492 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
4493 ioc->port_enable_failed = 1;
4494
4495 if (ioc->is_driver_loading) {
4496 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4497 mpt3sas_port_enable_complete(ioc);
4498 return 1;
4499 } else {
4500 ioc->start_scan_failed = ioc_status;
4501 ioc->start_scan = 0;
4502 return 1;
4503 }
4504 }
4505 complete(&ioc->port_enable_cmds.done);
4506 return 1;
4507}
4508
4509/**
4510 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
4511 * @ioc: per adapter object
4512 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4513 *
4514 * Returns 0 for success, non-zero for failure.
4515 */
4516static int
4517_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4518{
4519 Mpi2PortEnableRequest_t *mpi_request;
4520 Mpi2PortEnableReply_t *mpi_reply;
4521 unsigned long timeleft;
4522 int r = 0;
4523 u16 smid;
4524 u16 ioc_status;
4525
4526 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
4527
4528 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4529 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4530 ioc->name, __func__);
4531 return -EAGAIN;
4532 }
4533
4534 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
4535 if (!smid) {
4536 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4537 ioc->name, __func__);
4538 return -EAGAIN;
4539 }
4540
4541 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
4542 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4543 ioc->port_enable_cmds.smid = smid;
4544 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
4545 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4546
4547 init_completion(&ioc->port_enable_cmds.done);
4548 mpt3sas_base_put_smid_default(ioc, smid);
4549 timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
4550 300*HZ);
4551 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
4552 pr_err(MPT3SAS_FMT "%s: timeout\n",
4553 ioc->name, __func__);
4554 _debug_dump_mf(mpi_request,
4555 sizeof(Mpi2PortEnableRequest_t)/4);
4556 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
4557 r = -EFAULT;
4558 else
4559 r = -ETIME;
4560 goto out;
4561 }
4562
4563 mpi_reply = ioc->port_enable_cmds.reply;
4564 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
4565 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4566 pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
4567 ioc->name, __func__, ioc_status);
4568 r = -EFAULT;
4569 goto out;
4570 }
4571
4572 out:
4573 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
4574 pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
4575 "SUCCESS" : "FAILED"));
4576 return r;
4577}
4578
4579/**
4580 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
4581 * @ioc: per adapter object
4582 *
4583 * Returns 0 for success, non-zero for failure.
4584 */
4585int
4586mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
4587{
4588 Mpi2PortEnableRequest_t *mpi_request;
4589 u16 smid;
4590
4591 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
4592
4593 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4594 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4595 ioc->name, __func__);
4596 return -EAGAIN;
4597 }
4598
4599 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
4600 if (!smid) {
4601 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4602 ioc->name, __func__);
4603 return -EAGAIN;
4604 }
4605
4606 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
4607 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4608 ioc->port_enable_cmds.smid = smid;
4609 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
4610 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4611
4612 mpt3sas_base_put_smid_default(ioc, smid);
4613 return 0;
4614}
4615
4616/**
4617 * _base_determine_wait_on_discovery - desposition
4618 * @ioc: per adapter object
4619 *
4620 * Decide whether to wait on discovery to complete. Used to either
4621 * locate boot device, or report volumes ahead of physical devices.
4622 *
4623 * Returns 1 for wait, 0 for don't wait
4624 */
4625static int
4626_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
4627{
4628 /* We wait for discovery to complete if IR firmware is loaded.
4629 * The sas topology events arrive before PD events, so we need time to
4630 * turn on the bit in ioc->pd_handles to indicate PD
4631 * Also, it maybe required to report Volumes ahead of physical
4632 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
4633 */
4634 if (ioc->ir_firmware)
4635 return 1;
4636
4637 /* if no Bios, then we don't need to wait */
4638 if (!ioc->bios_pg3.BiosVersion)
4639 return 0;
4640
4641 /* Bios is present, then we drop down here.
4642 *
4643 * If there any entries in the Bios Page 2, then we wait
4644 * for discovery to complete.
4645 */
4646
4647 /* Current Boot Device */
4648 if ((ioc->bios_pg2.CurrentBootDeviceForm &
4649 MPI2_BIOSPAGE2_FORM_MASK) ==
4650 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
4651 /* Request Boot Device */
4652 (ioc->bios_pg2.ReqBootDeviceForm &
4653 MPI2_BIOSPAGE2_FORM_MASK) ==
4654 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
4655 /* Alternate Request Boot Device */
4656 (ioc->bios_pg2.ReqAltBootDeviceForm &
4657 MPI2_BIOSPAGE2_FORM_MASK) ==
4658 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
4659 return 0;
4660
4661 return 1;
4662}
4663
4664/**
4665 * _base_unmask_events - turn on notification for this event
4666 * @ioc: per adapter object
4667 * @event: firmware event
4668 *
4669 * The mask is stored in ioc->event_masks.
4670 */
4671static void
4672_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
4673{
4674 u32 desired_event;
4675
4676 if (event >= 128)
4677 return;
4678
4679 desired_event = (1 << (event % 32));
4680
4681 if (event < 32)
4682 ioc->event_masks[0] &= ~desired_event;
4683 else if (event < 64)
4684 ioc->event_masks[1] &= ~desired_event;
4685 else if (event < 96)
4686 ioc->event_masks[2] &= ~desired_event;
4687 else if (event < 128)
4688 ioc->event_masks[3] &= ~desired_event;
4689}
4690
4691/**
4692 * _base_event_notification - send event notification
4693 * @ioc: per adapter object
4694 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4695 *
4696 * Returns 0 for success, non-zero for failure.
4697 */
4698static int
4699_base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4700{
4701 Mpi2EventNotificationRequest_t *mpi_request;
4702 unsigned long timeleft;
4703 u16 smid;
4704 int r = 0;
4705 int i;
4706
4707 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4708 __func__));
4709
4710 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4711 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4712 ioc->name, __func__);
4713 return -EAGAIN;
4714 }
4715
4716 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4717 if (!smid) {
4718 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4719 ioc->name, __func__);
4720 return -EAGAIN;
4721 }
4722 ioc->base_cmds.status = MPT3_CMD_PENDING;
4723 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4724 ioc->base_cmds.smid = smid;
4725 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
4726 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
4727 mpi_request->VF_ID = 0; /* TODO */
4728 mpi_request->VP_ID = 0;
4729 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4730 mpi_request->EventMasks[i] =
4731 cpu_to_le32(ioc->event_masks[i]);
4732 init_completion(&ioc->base_cmds.done);
4733 mpt3sas_base_put_smid_default(ioc, smid);
4734 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
4735 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4736 pr_err(MPT3SAS_FMT "%s: timeout\n",
4737 ioc->name, __func__);
4738 _debug_dump_mf(mpi_request,
4739 sizeof(Mpi2EventNotificationRequest_t)/4);
4740 if (ioc->base_cmds.status & MPT3_CMD_RESET)
4741 r = -EFAULT;
4742 else
4743 r = -ETIME;
4744 } else
4745 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
4746 ioc->name, __func__));
4747 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4748 return r;
4749}
4750
4751/**
4752 * mpt3sas_base_validate_event_type - validating event types
4753 * @ioc: per adapter object
4754 * @event: firmware event
4755 *
4756 * This will turn on firmware event notification when application
4757 * ask for that event. We don't mask events that are already enabled.
4758 */
4759void
4760mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
4761{
4762 int i, j;
4763 u32 event_mask, desired_event;
4764 u8 send_update_to_fw;
4765
4766 for (i = 0, send_update_to_fw = 0; i <
4767 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
4768 event_mask = ~event_type[i];
4769 desired_event = 1;
4770 for (j = 0; j < 32; j++) {
4771 if (!(event_mask & desired_event) &&
4772 (ioc->event_masks[i] & desired_event)) {
4773 ioc->event_masks[i] &= ~desired_event;
4774 send_update_to_fw = 1;
4775 }
4776 desired_event = (desired_event << 1);
4777 }
4778 }
4779
4780 if (!send_update_to_fw)
4781 return;
4782
4783 mutex_lock(&ioc->base_cmds.mutex);
4784 _base_event_notification(ioc, CAN_SLEEP);
4785 mutex_unlock(&ioc->base_cmds.mutex);
4786}
4787
4788/**
4789 * _base_diag_reset - the "big hammer" start of day reset
4790 * @ioc: per adapter object
4791 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4792 *
4793 * Returns 0 for success, non-zero for failure.
4794 */
4795static int
4796_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4797{
4798 u32 host_diagnostic;
4799 u32 ioc_state;
4800 u32 count;
4801 u32 hcb_size;
4802
4803 pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
4804
4805 drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
4806 ioc->name));
4807
4808 count = 0;
4809 do {
4810 /* Write magic sequence to WriteSequence register
4811 * Loop until in diagnostic mode
4812 */
4813 drsprintk(ioc, pr_info(MPT3SAS_FMT
4814 "write magic sequence\n", ioc->name));
4815 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4816 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
4817 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
4818 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
4819 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
4820 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
4821 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
4822
4823 /* wait 100 msec */
4824 if (sleep_flag == CAN_SLEEP)
4825 msleep(100);
4826 else
4827 mdelay(100);
4828
4829 if (count++ > 20)
4830 goto out;
4831
4832 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4833 drsprintk(ioc, pr_info(MPT3SAS_FMT
4834 "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
4835 ioc->name, count, host_diagnostic));
4836
4837 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
4838
4839 hcb_size = readl(&ioc->chip->HCBSize);
4840
4841 drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
4842 ioc->name));
4843 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
4844 &ioc->chip->HostDiagnostic);
4845
Sreekanth Reddyb453ff82013-06-29 03:51:19 +05304846 /*This delay allows the chip PCIe hardware time to finish reset tasks*/
4847 if (sleep_flag == CAN_SLEEP)
4848 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4849 else
4850 mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304851
Sreekanth Reddyb453ff82013-06-29 03:51:19 +05304852 /* Approximately 300 second max wait */
4853 for (count = 0; count < (300000000 /
4854 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304855
4856 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4857
4858 if (host_diagnostic == 0xFFFFFFFF)
4859 goto out;
4860 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
4861 break;
4862
Sreekanth Reddyb453ff82013-06-29 03:51:19 +05304863 /* Wait to pass the second read delay window */
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304864 if (sleep_flag == CAN_SLEEP)
Sreekanth Reddyb453ff82013-06-29 03:51:19 +05304865 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4866 / 1000);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304867 else
Sreekanth Reddyb453ff82013-06-29 03:51:19 +05304868 mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4869 / 1000);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304870 }
4871
4872 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
4873
4874 drsprintk(ioc, pr_info(MPT3SAS_FMT
4875 "restart the adapter assuming the HCB Address points to good F/W\n",
4876 ioc->name));
4877 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
4878 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
4879 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
4880
4881 drsprintk(ioc, pr_info(MPT3SAS_FMT
4882 "re-enable the HCDW\n", ioc->name));
4883 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
4884 &ioc->chip->HCBSize);
4885 }
4886
4887 drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
4888 ioc->name));
4889 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
4890 &ioc->chip->HostDiagnostic);
4891
4892 drsprintk(ioc, pr_info(MPT3SAS_FMT
4893 "disable writes to the diagnostic register\n", ioc->name));
4894 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4895
4896 drsprintk(ioc, pr_info(MPT3SAS_FMT
4897 "Wait for FW to go to the READY state\n", ioc->name));
4898 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
4899 sleep_flag);
4900 if (ioc_state) {
4901 pr_err(MPT3SAS_FMT
4902 "%s: failed going to ready state (ioc_state=0x%x)\n",
4903 ioc->name, __func__, ioc_state);
4904 goto out;
4905 }
4906
4907 pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
4908 return 0;
4909
4910 out:
4911 pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
4912 return -EFAULT;
4913}
4914
4915/**
4916 * _base_make_ioc_ready - put controller in READY state
4917 * @ioc: per adapter object
4918 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4919 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4920 *
4921 * Returns 0 for success, non-zero for failure.
4922 */
4923static int
4924_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
4925 enum reset_type type)
4926{
4927 u32 ioc_state;
4928 int rc;
4929 int count;
4930
4931 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4932 __func__));
4933
4934 if (ioc->pci_error_recovery)
4935 return 0;
4936
4937 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4938 dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4939 ioc->name, __func__, ioc_state));
4940
4941 /* if in RESET state, it should move to READY state shortly */
4942 count = 0;
4943 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
4944 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
4945 MPI2_IOC_STATE_READY) {
4946 if (count++ == 10) {
4947 pr_err(MPT3SAS_FMT
4948 "%s: failed going to ready state (ioc_state=0x%x)\n",
4949 ioc->name, __func__, ioc_state);
4950 return -EFAULT;
4951 }
4952 if (sleep_flag == CAN_SLEEP)
4953 ssleep(1);
4954 else
4955 mdelay(1000);
4956 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4957 }
4958 }
4959
4960 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
4961 return 0;
4962
4963 if (ioc_state & MPI2_DOORBELL_USED) {
4964 dhsprintk(ioc, pr_info(MPT3SAS_FMT
4965 "unexpected doorbell active!\n",
4966 ioc->name));
4967 goto issue_diag_reset;
4968 }
4969
4970 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4971 mpt3sas_base_fault_info(ioc, ioc_state &
4972 MPI2_DOORBELL_DATA_MASK);
4973 goto issue_diag_reset;
4974 }
4975
4976 if (type == FORCE_BIG_HAMMER)
4977 goto issue_diag_reset;
4978
4979 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4980 if (!(_base_send_ioc_reset(ioc,
4981 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
4982 return 0;
4983 }
4984
4985 issue_diag_reset:
4986 rc = _base_diag_reset(ioc, CAN_SLEEP);
4987 return rc;
4988}
4989
4990/**
4991 * _base_make_ioc_operational - put controller in OPERATIONAL state
4992 * @ioc: per adapter object
4993 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4994 *
4995 * Returns 0 for success, non-zero for failure.
4996 */
4997static int
4998_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4999{
5000 int r, i;
5001 unsigned long flags;
5002 u32 reply_address;
5003 u16 smid;
5004 struct _tr_list *delayed_tr, *delayed_tr_next;
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05305005 struct _sc_list *delayed_sc, *delayed_sc_next;
5006 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305007 u8 hide_flag;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305008 struct adapter_reply_queue *reply_q;
5009 long reply_post_free;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05305010 u32 reply_post_free_sz, index = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305011
5012 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5013 __func__));
5014
5015 /* clean the delayed target reset list */
5016 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
5017 &ioc->delayed_tr_list, list) {
5018 list_del(&delayed_tr->list);
5019 kfree(delayed_tr);
5020 }
5021
5022
5023 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
5024 &ioc->delayed_tr_volume_list, list) {
5025 list_del(&delayed_tr->list);
5026 kfree(delayed_tr);
5027 }
5028
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05305029 list_for_each_entry_safe(delayed_sc, delayed_sc_next,
5030 &ioc->delayed_sc_list, list) {
5031 list_del(&delayed_sc->list);
5032 kfree(delayed_sc);
5033 }
5034
5035 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
5036 &ioc->delayed_event_ack_list, list) {
5037 list_del(&delayed_event_ack->list);
5038 kfree(delayed_event_ack);
5039 }
5040
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305041 /* initialize the scsi lookup free list */
5042 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5043 INIT_LIST_HEAD(&ioc->free_list);
5044 smid = 1;
5045 for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
5046 INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
5047 ioc->scsi_lookup[i].cb_idx = 0xFF;
5048 ioc->scsi_lookup[i].smid = smid;
5049 ioc->scsi_lookup[i].scmd = NULL;
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305050 ioc->scsi_lookup[i].direct_io = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305051 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
5052 &ioc->free_list);
5053 }
5054
5055 /* hi-priority queue */
5056 INIT_LIST_HEAD(&ioc->hpr_free_list);
5057 smid = ioc->hi_priority_smid;
5058 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
5059 ioc->hpr_lookup[i].cb_idx = 0xFF;
5060 ioc->hpr_lookup[i].smid = smid;
5061 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
5062 &ioc->hpr_free_list);
5063 }
5064
5065 /* internal queue */
5066 INIT_LIST_HEAD(&ioc->internal_free_list);
5067 smid = ioc->internal_smid;
5068 for (i = 0; i < ioc->internal_depth; i++, smid++) {
5069 ioc->internal_lookup[i].cb_idx = 0xFF;
5070 ioc->internal_lookup[i].smid = smid;
5071 list_add_tail(&ioc->internal_lookup[i].tracker_list,
5072 &ioc->internal_free_list);
5073 }
5074
5075 /* chain pool */
5076 INIT_LIST_HEAD(&ioc->free_chain_list);
5077 for (i = 0; i < ioc->chain_depth; i++)
5078 list_add_tail(&ioc->chain_lookup[i].tracker_list,
5079 &ioc->free_chain_list);
5080
5081 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5082
5083 /* initialize Reply Free Queue */
5084 for (i = 0, reply_address = (u32)ioc->reply_dma ;
5085 i < ioc->reply_free_queue_depth ; i++, reply_address +=
5086 ioc->reply_sz)
5087 ioc->reply_free[i] = cpu_to_le32(reply_address);
5088
5089 /* initialize reply queues */
5090 if (ioc->is_driver_loading)
5091 _base_assign_reply_queues(ioc);
5092
5093 /* initialize Reply Post Free Queue */
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305094 reply_post_free_sz = ioc->reply_post_queue_depth *
5095 sizeof(Mpi2DefaultReplyDescriptor_t);
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05305096 reply_post_free = (long)ioc->reply_post[index].reply_post_free;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305097 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5098 reply_q->reply_post_host_index = 0;
5099 reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
5100 reply_post_free;
5101 for (i = 0; i < ioc->reply_post_queue_depth; i++)
5102 reply_q->reply_post_free[i].Words =
5103 cpu_to_le64(ULLONG_MAX);
5104 if (!_base_is_controller_msix_enabled(ioc))
5105 goto skip_init_reply_post_free_queue;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05305106 /*
5107 * If RDPQ is enabled, switch to the next allocation.
5108 * Otherwise advance within the contiguous region.
5109 */
5110 if (ioc->rdpq_array_enable)
5111 reply_post_free = (long)
5112 ioc->reply_post[++index].reply_post_free;
5113 else
5114 reply_post_free += reply_post_free_sz;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305115 }
5116 skip_init_reply_post_free_queue:
5117
5118 r = _base_send_ioc_init(ioc, sleep_flag);
5119 if (r)
5120 return r;
5121
5122 /* initialize reply free host index */
5123 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
5124 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
5125
5126 /* initialize reply post host index */
5127 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
Sreekanth Reddyfb77bb52015-06-30 12:24:47 +05305128 if (ioc->msix96_vector)
5129 writel((reply_q->msix_index & 7)<<
5130 MPI2_RPHI_MSIX_INDEX_SHIFT,
5131 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
5132 else
5133 writel(reply_q->msix_index <<
5134 MPI2_RPHI_MSIX_INDEX_SHIFT,
5135 &ioc->chip->ReplyPostHostIndex);
5136
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305137 if (!_base_is_controller_msix_enabled(ioc))
5138 goto skip_init_reply_post_host_index;
5139 }
5140
5141 skip_init_reply_post_host_index:
5142
5143 _base_unmask_interrupts(ioc);
5144 r = _base_event_notification(ioc, sleep_flag);
5145 if (r)
5146 return r;
5147
5148 if (sleep_flag == CAN_SLEEP)
5149 _base_static_config_pages(ioc);
5150
5151
5152 if (ioc->is_driver_loading) {
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305153
5154 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
5155 == 0x80) {
5156 hide_flag = (u8) (
5157 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
5158 MFG_PAGE10_HIDE_SSDS_MASK);
5159 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
5160 ioc->mfg_pg10_hide_flag = hide_flag;
5161 }
5162
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305163 ioc->wait_for_discovery_to_complete =
5164 _base_determine_wait_on_discovery(ioc);
5165
5166 return r; /* scan_start and scan_finished support */
5167 }
5168
5169 r = _base_send_port_enable(ioc, sleep_flag);
5170 if (r)
5171 return r;
5172
5173 return r;
5174}
5175
5176/**
5177 * mpt3sas_base_free_resources - free resources controller resources
5178 * @ioc: per adapter object
5179 *
5180 * Return nothing.
5181 */
5182void
5183mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
5184{
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305185 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5186 __func__));
5187
Sreekanth Reddy08c4d552015-11-11 17:30:33 +05305188 /* synchronizing freeing resource with pci_access_mutex lock */
5189 mutex_lock(&ioc->pci_access_mutex);
Joe Lawrencecf9bd21a2013-08-08 16:45:39 -04005190 if (ioc->chip_phys && ioc->chip) {
5191 _base_mask_interrupts(ioc);
5192 ioc->shost_recovery = 1;
5193 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
5194 ioc->shost_recovery = 0;
5195 }
5196
Sreekanth Reddy580d4e32015-06-30 12:24:50 +05305197 mpt3sas_base_unmap_resources(ioc);
Sreekanth Reddy08c4d552015-11-11 17:30:33 +05305198 mutex_unlock(&ioc->pci_access_mutex);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305199 return;
5200}
5201
5202/**
5203 * mpt3sas_base_attach - attach controller instance
5204 * @ioc: per adapter object
5205 *
5206 * Returns 0 for success, non-zero for failure.
5207 */
5208int
5209mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5210{
5211 int r, i;
5212 int cpu_id, last_cpu_id = 0;
5213
5214 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5215 __func__));
5216
5217 /* setup cpu_msix_table */
5218 ioc->cpu_count = num_online_cpus();
5219 for_each_online_cpu(cpu_id)
5220 last_cpu_id = cpu_id;
5221 ioc->cpu_msix_table_sz = last_cpu_id + 1;
5222 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
5223 ioc->reply_queue_count = 1;
5224 if (!ioc->cpu_msix_table) {
5225 dfailprintk(ioc, pr_info(MPT3SAS_FMT
5226 "allocation for cpu_msix_table failed!!!\n",
5227 ioc->name));
5228 r = -ENOMEM;
5229 goto out_free_resources;
5230 }
5231
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305232 if (ioc->is_warpdrive) {
5233 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
5234 sizeof(resource_size_t *), GFP_KERNEL);
5235 if (!ioc->reply_post_host_index) {
5236 dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
5237 "for cpu_msix_table failed!!!\n", ioc->name));
5238 r = -ENOMEM;
5239 goto out_free_resources;
5240 }
5241 }
5242
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05305243 ioc->rdpq_array_enable_assigned = 0;
5244 ioc->dma_mask = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305245 r = mpt3sas_base_map_resources(ioc);
5246 if (r)
5247 goto out_free_resources;
5248
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305249 if (ioc->is_warpdrive) {
5250 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
5251 &ioc->chip->ReplyPostHostIndex;
5252
5253 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
5254 ioc->reply_post_host_index[i] =
5255 (resource_size_t __iomem *)
5256 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
5257 * 4)));
5258 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305259
5260 pci_set_drvdata(ioc->pdev, ioc->shost);
5261 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
5262 if (r)
5263 goto out_free_resources;
5264
Sreekanth Reddy471ef9d2015-11-11 17:30:24 +05305265 switch (ioc->hba_mpi_version_belonged) {
5266 case MPI2_VERSION:
5267 ioc->build_sg_scmd = &_base_build_sg_scmd;
5268 ioc->build_sg = &_base_build_sg;
5269 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
5270 break;
5271 case MPI25_VERSION:
Suganath prabu Subramanib130b0d2016-01-28 12:06:58 +05305272 case MPI26_VERSION:
Sreekanth Reddy471ef9d2015-11-11 17:30:24 +05305273 /*
5274 * In SAS3.0,
5275 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
5276 * Target Status - all require the IEEE formated scatter gather
5277 * elements.
5278 */
5279 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
5280 ioc->build_sg = &_base_build_sg_ieee;
5281 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
5282 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
5283 break;
5284 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305285
5286 /*
5287 * These function pointers for other requests that don't
5288 * the require IEEE scatter gather elements.
5289 *
5290 * For example Configuration Pages and SAS IOUNIT Control don't.
5291 */
5292 ioc->build_sg_mpi = &_base_build_sg;
5293 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
5294
5295 r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
5296 if (r)
5297 goto out_free_resources;
5298
5299 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
5300 sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
5301 if (!ioc->pfacts) {
5302 r = -ENOMEM;
5303 goto out_free_resources;
5304 }
5305
5306 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
5307 r = _base_get_port_facts(ioc, i, CAN_SLEEP);
5308 if (r)
5309 goto out_free_resources;
5310 }
5311
5312 r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
5313 if (r)
5314 goto out_free_resources;
5315
5316 init_waitqueue_head(&ioc->reset_wq);
5317
5318 /* allocate memory pd handle bitmask list */
5319 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
5320 if (ioc->facts.MaxDevHandle % 8)
5321 ioc->pd_handles_sz++;
5322 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
5323 GFP_KERNEL);
5324 if (!ioc->pd_handles) {
5325 r = -ENOMEM;
5326 goto out_free_resources;
5327 }
5328 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
5329 GFP_KERNEL);
5330 if (!ioc->blocking_handles) {
5331 r = -ENOMEM;
5332 goto out_free_resources;
5333 }
5334
5335 ioc->fwfault_debug = mpt3sas_fwfault_debug;
5336
5337 /* base internal command bits */
5338 mutex_init(&ioc->base_cmds.mutex);
5339 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5340 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5341
5342 /* port_enable command bits */
5343 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5344 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
5345
5346 /* transport internal command bits */
5347 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5348 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
5349 mutex_init(&ioc->transport_cmds.mutex);
5350
5351 /* scsih internal command bits */
5352 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5353 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
5354 mutex_init(&ioc->scsih_cmds.mutex);
5355
5356 /* task management internal command bits */
5357 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5358 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
5359 mutex_init(&ioc->tm_cmds.mutex);
5360
5361 /* config page internal command bits */
5362 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5363 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
5364 mutex_init(&ioc->config_cmds.mutex);
5365
5366 /* ctl module internal command bits */
5367 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5368 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
5369 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
5370 mutex_init(&ioc->ctl_cmds.mutex);
5371
5372 if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
5373 !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
5374 !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
5375 !ioc->ctl_cmds.sense) {
5376 r = -ENOMEM;
5377 goto out_free_resources;
5378 }
5379
5380 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5381 ioc->event_masks[i] = -1;
5382
5383 /* here we enable the events we care about */
5384 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
5385 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
5386 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
5387 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5388 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
5389 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
5390 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
5391 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
5392 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
5393 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
Sreekanth Reddy2d8ce8c2015-01-12 11:38:56 +05305394 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305395
5396 r = _base_make_ioc_operational(ioc, CAN_SLEEP);
5397 if (r)
5398 goto out_free_resources;
5399
Sreekanth Reddy16e179b2015-11-11 17:30:27 +05305400 ioc->non_operational_loop = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305401 return 0;
5402
5403 out_free_resources:
5404
5405 ioc->remove_host = 1;
5406
5407 mpt3sas_base_free_resources(ioc);
5408 _base_release_memory_pools(ioc);
5409 pci_set_drvdata(ioc->pdev, NULL);
5410 kfree(ioc->cpu_msix_table);
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305411 if (ioc->is_warpdrive)
5412 kfree(ioc->reply_post_host_index);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305413 kfree(ioc->pd_handles);
5414 kfree(ioc->blocking_handles);
5415 kfree(ioc->tm_cmds.reply);
5416 kfree(ioc->transport_cmds.reply);
5417 kfree(ioc->scsih_cmds.reply);
5418 kfree(ioc->config_cmds.reply);
5419 kfree(ioc->base_cmds.reply);
5420 kfree(ioc->port_enable_cmds.reply);
5421 kfree(ioc->ctl_cmds.reply);
5422 kfree(ioc->ctl_cmds.sense);
5423 kfree(ioc->pfacts);
5424 ioc->ctl_cmds.reply = NULL;
5425 ioc->base_cmds.reply = NULL;
5426 ioc->tm_cmds.reply = NULL;
5427 ioc->scsih_cmds.reply = NULL;
5428 ioc->transport_cmds.reply = NULL;
5429 ioc->config_cmds.reply = NULL;
5430 ioc->pfacts = NULL;
5431 return r;
5432}
5433
5434
5435/**
5436 * mpt3sas_base_detach - remove controller instance
5437 * @ioc: per adapter object
5438 *
5439 * Return nothing.
5440 */
5441void
5442mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
5443{
5444 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5445 __func__));
5446
5447 mpt3sas_base_stop_watchdog(ioc);
5448 mpt3sas_base_free_resources(ioc);
5449 _base_release_memory_pools(ioc);
5450 pci_set_drvdata(ioc->pdev, NULL);
5451 kfree(ioc->cpu_msix_table);
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305452 if (ioc->is_warpdrive)
5453 kfree(ioc->reply_post_host_index);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305454 kfree(ioc->pd_handles);
5455 kfree(ioc->blocking_handles);
5456 kfree(ioc->pfacts);
5457 kfree(ioc->ctl_cmds.reply);
5458 kfree(ioc->ctl_cmds.sense);
5459 kfree(ioc->base_cmds.reply);
5460 kfree(ioc->port_enable_cmds.reply);
5461 kfree(ioc->tm_cmds.reply);
5462 kfree(ioc->transport_cmds.reply);
5463 kfree(ioc->scsih_cmds.reply);
5464 kfree(ioc->config_cmds.reply);
5465}
5466
5467/**
5468 * _base_reset_handler - reset callback handler (for base)
5469 * @ioc: per adapter object
5470 * @reset_phase: phase
5471 *
5472 * The handler for doing any required cleanup or initialization.
5473 *
5474 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
5475 * MPT3_IOC_DONE_RESET
5476 *
5477 * Return nothing.
5478 */
5479static void
5480_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
5481{
5482 mpt3sas_scsih_reset_handler(ioc, reset_phase);
5483 mpt3sas_ctl_reset_handler(ioc, reset_phase);
5484 switch (reset_phase) {
5485 case MPT3_IOC_PRE_RESET:
5486 dtmprintk(ioc, pr_info(MPT3SAS_FMT
5487 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
5488 break;
5489 case MPT3_IOC_AFTER_RESET:
5490 dtmprintk(ioc, pr_info(MPT3SAS_FMT
5491 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
5492 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
5493 ioc->transport_cmds.status |= MPT3_CMD_RESET;
5494 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
5495 complete(&ioc->transport_cmds.done);
5496 }
5497 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
5498 ioc->base_cmds.status |= MPT3_CMD_RESET;
5499 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
5500 complete(&ioc->base_cmds.done);
5501 }
5502 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5503 ioc->port_enable_failed = 1;
5504 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
5505 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
5506 if (ioc->is_driver_loading) {
5507 ioc->start_scan_failed =
5508 MPI2_IOCSTATUS_INTERNAL_ERROR;
5509 ioc->start_scan = 0;
5510 ioc->port_enable_cmds.status =
5511 MPT3_CMD_NOT_USED;
5512 } else
5513 complete(&ioc->port_enable_cmds.done);
5514 }
5515 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
5516 ioc->config_cmds.status |= MPT3_CMD_RESET;
5517 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
5518 ioc->config_cmds.smid = USHRT_MAX;
5519 complete(&ioc->config_cmds.done);
5520 }
5521 break;
5522 case MPT3_IOC_DONE_RESET:
5523 dtmprintk(ioc, pr_info(MPT3SAS_FMT
5524 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
5525 break;
5526 }
5527}
5528
5529/**
5530 * _wait_for_commands_to_complete - reset controller
5531 * @ioc: Pointer to MPT_ADAPTER structure
5532 * @sleep_flag: CAN_SLEEP or NO_SLEEP
5533 *
5534 * This function waiting(3s) for all pending commands to complete
5535 * prior to putting controller in reset.
5536 */
5537static void
5538_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5539{
5540 u32 ioc_state;
5541 unsigned long flags;
5542 u16 i;
5543
5544 ioc->pending_io_count = 0;
5545 if (sleep_flag != CAN_SLEEP)
5546 return;
5547
5548 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5549 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
5550 return;
5551
5552 /* pending command count */
5553 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5554 for (i = 0; i < ioc->scsiio_depth; i++)
5555 if (ioc->scsi_lookup[i].cb_idx != 0xFF)
5556 ioc->pending_io_count++;
5557 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5558
5559 if (!ioc->pending_io_count)
5560 return;
5561
5562 /* wait for pending commands to complete */
5563 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
5564}
5565
5566/**
5567 * mpt3sas_base_hard_reset_handler - reset controller
5568 * @ioc: Pointer to MPT_ADAPTER structure
5569 * @sleep_flag: CAN_SLEEP or NO_SLEEP
5570 * @type: FORCE_BIG_HAMMER or SOFT_RESET
5571 *
5572 * Returns 0 for success, non-zero for failure.
5573 */
5574int
5575mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
5576 enum reset_type type)
5577{
5578 int r;
5579 unsigned long flags;
5580 u32 ioc_state;
5581 u8 is_fault = 0, is_trigger = 0;
5582
5583 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
5584 __func__));
5585
5586 if (ioc->pci_error_recovery) {
5587 pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
5588 ioc->name, __func__);
5589 r = 0;
5590 goto out_unlocked;
5591 }
5592
5593 if (mpt3sas_fwfault_debug)
5594 mpt3sas_halt_firmware(ioc);
5595
5596 /* TODO - What we really should be doing is pulling
5597 * out all the code associated with NO_SLEEP; its never used.
5598 * That is legacy code from mpt fusion driver, ported over.
5599 * I will leave this BUG_ON here for now till its been resolved.
5600 */
5601 BUG_ON(sleep_flag == NO_SLEEP);
5602
5603 /* wait for an active reset in progress to complete */
5604 if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
5605 do {
5606 ssleep(1);
5607 } while (ioc->shost_recovery == 1);
5608 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
5609 __func__));
5610 return ioc->ioc_reset_in_progress_status;
5611 }
5612
5613 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5614 ioc->shost_recovery = 1;
5615 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5616
5617 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
5618 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
5619 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
5620 MPT3_DIAG_BUFFER_IS_RELEASED))) {
5621 is_trigger = 1;
5622 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5623 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
5624 is_fault = 1;
5625 }
5626 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
5627 _wait_for_commands_to_complete(ioc, sleep_flag);
5628 _base_mask_interrupts(ioc);
5629 r = _base_make_ioc_ready(ioc, sleep_flag, type);
5630 if (r)
5631 goto out;
5632 _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
5633
5634 /* If this hard reset is called while port enable is active, then
5635 * there is no reason to call make_ioc_operational
5636 */
5637 if (ioc->is_driver_loading && ioc->port_enable_failed) {
5638 ioc->remove_host = 1;
5639 r = -EFAULT;
5640 goto out;
5641 }
5642 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
5643 if (r)
5644 goto out;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05305645
5646 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
5647 panic("%s: Issue occurred with flashing controller firmware."
5648 "Please reboot the system and ensure that the correct"
5649 " firmware version is running\n", ioc->name);
5650
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305651 r = _base_make_ioc_operational(ioc, sleep_flag);
5652 if (!r)
5653 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
5654
5655 out:
5656 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
5657 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
5658
5659 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5660 ioc->ioc_reset_in_progress_status = r;
5661 ioc->shost_recovery = 0;
5662 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5663 ioc->ioc_reset_count++;
5664 mutex_unlock(&ioc->reset_in_progress_mutex);
5665
5666 out_unlocked:
5667 if ((r == 0) && is_trigger) {
5668 if (is_fault)
5669 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
5670 else
5671 mpt3sas_trigger_master(ioc,
5672 MASTER_TRIGGER_ADAPTER_RESET);
5673 }
5674 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
5675 __func__));
5676 return r;
5677}