blob: b7f4e664f5f38ff6ed68bffeb48ed1d3b9eac0b0 [file] [log] [blame]
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
Sreekanth Reddya4ffce02014-09-12 15:35:29 +05306 * Copyright (C) 2012-2014 LSI Corporation
Sreekanth Reddya03bd152015-01-12 11:39:02 +05307 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05309 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * NO WARRANTY
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
30
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
43 * USA.
44 */
45
Sreekanth Reddyf92363d2012-11-30 07:44:21 +053046#include <linux/kernel.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/slab.h>
51#include <linux/types.h>
52#include <linux/pci.h>
53#include <linux/kdev_t.h>
54#include <linux/blkdev.h>
55#include <linux/delay.h>
56#include <linux/interrupt.h>
57#include <linux/dma-mapping.h>
58#include <linux/io.h>
59#include <linux/time.h>
Tina Ruchandani23409bd2016-04-13 00:01:40 -070060#include <linux/ktime.h>
Sreekanth Reddyf92363d2012-11-30 07:44:21 +053061#include <linux/kthread.h>
62#include <linux/aer.h>
63
64
65#include "mpt3sas_base.h"
66
67static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
68
69
70#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
71
72 /* maximum controller queue depth */
73#define MAX_HBA_QUEUE_DEPTH 30000
74#define MAX_CHAIN_DEPTH 100000
75static int max_queue_depth = -1;
76module_param(max_queue_depth, int, 0);
77MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
78
79static int max_sgl_entries = -1;
80module_param(max_sgl_entries, int, 0);
81MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
82
83static int msix_disable = -1;
84module_param(msix_disable, int, 0);
85MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
86
Suganath Prabu Subramani64038302016-02-08 22:13:39 +053087static int smp_affinity_enable = 1;
88module_param(smp_affinity_enable, int, S_IRUGO);
89MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
90
Sreekanth Reddyfb77bb52015-06-30 12:24:47 +053091static int max_msix_vectors = -1;
Sreekanth Reddy9c500062013-08-14 18:23:20 +053092module_param(max_msix_vectors, int, 0);
93MODULE_PARM_DESC(max_msix_vectors,
Sreekanth Reddyfb77bb52015-06-30 12:24:47 +053094 " max msix vectors");
Sreekanth Reddyf92363d2012-11-30 07:44:21 +053095
96static int mpt3sas_fwfault_debug;
97MODULE_PARM_DESC(mpt3sas_fwfault_debug,
98 " enable detection of firmware fault and halt firmware - (default=0)");
99
Sreekanth Reddy9b05c912014-09-12 15:35:31 +0530100static int
101_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530102
103/**
104 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
105 *
106 */
107static int
108_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
109{
110 int ret = param_set_int(val, kp);
111 struct MPT3SAS_ADAPTER *ioc;
112
113 if (ret)
114 return ret;
115
Sreekanth Reddy08c4d552015-11-11 17:30:33 +0530116 /* global ioc spinlock to protect controller list on list operations */
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530117 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
Sreekanth Reddy08c4d552015-11-11 17:30:33 +0530118 spin_lock(&gioc_lock);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530119 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
120 ioc->fwfault_debug = mpt3sas_fwfault_debug;
Sreekanth Reddy08c4d552015-11-11 17:30:33 +0530121 spin_unlock(&gioc_lock);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530122 return 0;
123}
124module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
125 param_get_int, &mpt3sas_fwfault_debug, 0644);
126
127/**
128 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
129 * @arg: input argument, used to derive ioc
130 *
131 * Return 0 if controller is removed from pci subsystem.
132 * Return -1 for other case.
133 */
134static int mpt3sas_remove_dead_ioc_func(void *arg)
135{
136 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
137 struct pci_dev *pdev;
138
139 if ((ioc == NULL))
140 return -1;
141
142 pdev = ioc->pdev;
143 if ((pdev == NULL))
144 return -1;
Rafael J. Wysocki64cdb412014-01-10 15:27:56 +0100145 pci_stop_and_remove_bus_device_locked(pdev);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530146 return 0;
147}
148
149/**
150 * _base_fault_reset_work - workq handling ioc fault conditions
151 * @work: input argument, used to derive ioc
152 * Context: sleep.
153 *
154 * Return nothing.
155 */
156static void
157_base_fault_reset_work(struct work_struct *work)
158{
159 struct MPT3SAS_ADAPTER *ioc =
160 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
161 unsigned long flags;
162 u32 doorbell;
163 int rc;
164 struct task_struct *p;
165
166
167 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
Sreekanth Reddy16e179b2015-11-11 17:30:27 +0530168 if (ioc->shost_recovery || ioc->pci_error_recovery)
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530169 goto rearm_timer;
170 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
171
172 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
173 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
174 pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
175 ioc->name);
176
Sreekanth Reddy16e179b2015-11-11 17:30:27 +0530177 /* It may be possible that EEH recovery can resolve some of
178 * pci bus failure issues rather removing the dead ioc function
179 * by considering controller is in a non-operational state. So
180 * here priority is given to the EEH recovery. If it doesn't
181 * not resolve this issue, mpt3sas driver will consider this
182 * controller to non-operational state and remove the dead ioc
183 * function.
184 */
185 if (ioc->non_operational_loop++ < 5) {
186 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
187 flags);
188 goto rearm_timer;
189 }
190
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530191 /*
192 * Call _scsih_flush_pending_cmds callback so that we flush all
193 * pending commands back to OS. This call is required to aovid
194 * deadlock at block layer. Dead IOC will fail to do diag reset,
195 * and this call is safe since dead ioc will never return any
196 * command back from HW.
197 */
198 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
199 /*
200 * Set remove_host flag early since kernel thread will
201 * take some time to execute.
202 */
203 ioc->remove_host = 1;
204 /*Remove the Dead Host */
205 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
Sreekanth Reddyc84b06a2015-11-11 17:30:35 +0530206 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530207 if (IS_ERR(p))
208 pr_err(MPT3SAS_FMT
209 "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
210 ioc->name, __func__);
211 else
212 pr_err(MPT3SAS_FMT
213 "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
214 ioc->name, __func__);
215 return; /* don't rearm timer */
216 }
217
Sreekanth Reddy16e179b2015-11-11 17:30:27 +0530218 ioc->non_operational_loop = 0;
219
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530220 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
221 rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
222 FORCE_BIG_HAMMER);
223 pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
224 __func__, (rc == 0) ? "success" : "failed");
225 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
226 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
227 mpt3sas_base_fault_info(ioc, doorbell &
228 MPI2_DOORBELL_DATA_MASK);
229 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
230 MPI2_IOC_STATE_OPERATIONAL)
231 return; /* don't rearm timer */
232 }
233
234 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
235 rearm_timer:
236 if (ioc->fault_reset_work_q)
237 queue_delayed_work(ioc->fault_reset_work_q,
238 &ioc->fault_reset_work,
239 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
240 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
241}
242
243/**
244 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
245 * @ioc: per adapter object
246 * Context: sleep.
247 *
248 * Return nothing.
249 */
250void
251mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
252{
253 unsigned long flags;
254
255 if (ioc->fault_reset_work_q)
256 return;
257
258 /* initialize fault polling */
259
260 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
261 snprintf(ioc->fault_reset_work_q_name,
Sreekanth Reddyc84b06a2015-11-11 17:30:35 +0530262 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
263 ioc->driver_name, ioc->id);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530264 ioc->fault_reset_work_q =
265 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
266 if (!ioc->fault_reset_work_q) {
267 pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
268 ioc->name, __func__, __LINE__);
269 return;
270 }
271 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
272 if (ioc->fault_reset_work_q)
273 queue_delayed_work(ioc->fault_reset_work_q,
274 &ioc->fault_reset_work,
275 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
276 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
277}
278
279/**
280 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
281 * @ioc: per adapter object
282 * Context: sleep.
283 *
284 * Return nothing.
285 */
286void
287mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
288{
289 unsigned long flags;
290 struct workqueue_struct *wq;
291
292 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
293 wq = ioc->fault_reset_work_q;
294 ioc->fault_reset_work_q = NULL;
295 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
296 if (wq) {
Reddy, Sreekanth4dc06fd2014-07-14 12:01:35 +0530297 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530298 flush_workqueue(wq);
299 destroy_workqueue(wq);
300 }
301}
302
303/**
304 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
305 * @ioc: per adapter object
306 * @fault_code: fault code
307 *
308 * Return nothing.
309 */
310void
311mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
312{
313 pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
314 ioc->name, fault_code);
315}
316
317/**
318 * mpt3sas_halt_firmware - halt's mpt controller firmware
319 * @ioc: per adapter object
320 *
321 * For debugging timeout related issues. Writing 0xCOFFEE00
322 * to the doorbell register will halt controller firmware. With
323 * the purpose to stop both driver and firmware, the enduser can
324 * obtain a ring buffer from controller UART.
325 */
326void
327mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
328{
329 u32 doorbell;
330
331 if (!ioc->fwfault_debug)
332 return;
333
334 dump_stack();
335
336 doorbell = readl(&ioc->chip->Doorbell);
337 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
338 mpt3sas_base_fault_info(ioc , doorbell);
339 else {
340 writel(0xC0FFEE00, &ioc->chip->Doorbell);
341 pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
342 ioc->name);
343 }
344
345 if (ioc->fwfault_debug == 2)
346 for (;;)
347 ;
348 else
349 panic("panic in %s\n", __func__);
350}
351
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530352/**
353 * _base_sas_ioc_info - verbose translation of the ioc status
354 * @ioc: per adapter object
355 * @mpi_reply: reply mf payload returned from firmware
356 * @request_hdr: request mf
357 *
358 * Return nothing.
359 */
360static void
361_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
362 MPI2RequestHeader_t *request_hdr)
363{
364 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
365 MPI2_IOCSTATUS_MASK;
366 char *desc = NULL;
367 u16 frame_sz;
368 char *func_str = NULL;
369
370 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
371 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
372 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
373 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
374 return;
375
376 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
377 return;
378
379 switch (ioc_status) {
380
381/****************************************************************************
382* Common IOCStatus values for all replies
383****************************************************************************/
384
385 case MPI2_IOCSTATUS_INVALID_FUNCTION:
386 desc = "invalid function";
387 break;
388 case MPI2_IOCSTATUS_BUSY:
389 desc = "busy";
390 break;
391 case MPI2_IOCSTATUS_INVALID_SGL:
392 desc = "invalid sgl";
393 break;
394 case MPI2_IOCSTATUS_INTERNAL_ERROR:
395 desc = "internal error";
396 break;
397 case MPI2_IOCSTATUS_INVALID_VPID:
398 desc = "invalid vpid";
399 break;
400 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
401 desc = "insufficient resources";
402 break;
Suganath prabu Subramanib130b0d2016-01-28 12:06:58 +0530403 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
404 desc = "insufficient power";
405 break;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530406 case MPI2_IOCSTATUS_INVALID_FIELD:
407 desc = "invalid field";
408 break;
409 case MPI2_IOCSTATUS_INVALID_STATE:
410 desc = "invalid state";
411 break;
412 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
413 desc = "op state not supported";
414 break;
415
416/****************************************************************************
417* Config IOCStatus values
418****************************************************************************/
419
420 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
421 desc = "config invalid action";
422 break;
423 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
424 desc = "config invalid type";
425 break;
426 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
427 desc = "config invalid page";
428 break;
429 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
430 desc = "config invalid data";
431 break;
432 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
433 desc = "config no defaults";
434 break;
435 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
436 desc = "config cant commit";
437 break;
438
439/****************************************************************************
440* SCSI IO Reply
441****************************************************************************/
442
443 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
444 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
445 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
446 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
447 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
448 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
449 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
450 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
451 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
452 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
453 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
454 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
455 break;
456
457/****************************************************************************
458* For use by SCSI Initiator and SCSI Target end-to-end data protection
459****************************************************************************/
460
461 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
462 desc = "eedp guard error";
463 break;
464 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
465 desc = "eedp ref tag error";
466 break;
467 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
468 desc = "eedp app tag error";
469 break;
470
471/****************************************************************************
472* SCSI Target values
473****************************************************************************/
474
475 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
476 desc = "target invalid io index";
477 break;
478 case MPI2_IOCSTATUS_TARGET_ABORTED:
479 desc = "target aborted";
480 break;
481 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
482 desc = "target no conn retryable";
483 break;
484 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
485 desc = "target no connection";
486 break;
487 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
488 desc = "target xfer count mismatch";
489 break;
490 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
491 desc = "target data offset error";
492 break;
493 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
494 desc = "target too much write data";
495 break;
496 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
497 desc = "target iu too short";
498 break;
499 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
500 desc = "target ack nak timeout";
501 break;
502 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
503 desc = "target nak received";
504 break;
505
506/****************************************************************************
507* Serial Attached SCSI values
508****************************************************************************/
509
510 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
511 desc = "smp request failed";
512 break;
513 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
514 desc = "smp data overrun";
515 break;
516
517/****************************************************************************
518* Diagnostic Buffer Post / Diagnostic Release values
519****************************************************************************/
520
521 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
522 desc = "diagnostic released";
523 break;
524 default:
525 break;
526 }
527
528 if (!desc)
529 return;
530
531 switch (request_hdr->Function) {
532 case MPI2_FUNCTION_CONFIG:
533 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
534 func_str = "config_page";
535 break;
536 case MPI2_FUNCTION_SCSI_TASK_MGMT:
537 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
538 func_str = "task_mgmt";
539 break;
540 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
541 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
542 func_str = "sas_iounit_ctl";
543 break;
544 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
545 frame_sz = sizeof(Mpi2SepRequest_t);
546 func_str = "enclosure";
547 break;
548 case MPI2_FUNCTION_IOC_INIT:
549 frame_sz = sizeof(Mpi2IOCInitRequest_t);
550 func_str = "ioc_init";
551 break;
552 case MPI2_FUNCTION_PORT_ENABLE:
553 frame_sz = sizeof(Mpi2PortEnableRequest_t);
554 func_str = "port_enable";
555 break;
556 case MPI2_FUNCTION_SMP_PASSTHROUGH:
557 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
558 func_str = "smp_passthru";
559 break;
560 default:
561 frame_sz = 32;
562 func_str = "unknown";
563 break;
564 }
565
566 pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
567 ioc->name, desc, ioc_status, request_hdr, func_str);
568
569 _debug_dump_mf(request_hdr, frame_sz/4);
570}
571
572/**
573 * _base_display_event_data - verbose translation of firmware asyn events
574 * @ioc: per adapter object
575 * @mpi_reply: reply mf payload returned from firmware
576 *
577 * Return nothing.
578 */
579static void
580_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
581 Mpi2EventNotificationReply_t *mpi_reply)
582{
583 char *desc = NULL;
584 u16 event;
585
586 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
587 return;
588
589 event = le16_to_cpu(mpi_reply->Event);
590
591 switch (event) {
592 case MPI2_EVENT_LOG_DATA:
593 desc = "Log Data";
594 break;
595 case MPI2_EVENT_STATE_CHANGE:
596 desc = "Status Change";
597 break;
598 case MPI2_EVENT_HARD_RESET_RECEIVED:
599 desc = "Hard Reset Received";
600 break;
601 case MPI2_EVENT_EVENT_CHANGE:
602 desc = "Event Change";
603 break;
604 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
605 desc = "Device Status Change";
606 break;
607 case MPI2_EVENT_IR_OPERATION_STATUS:
Sreekanth Reddy7786ab62015-11-11 17:30:28 +0530608 if (!ioc->hide_ir_msg)
609 desc = "IR Operation Status";
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530610 break;
611 case MPI2_EVENT_SAS_DISCOVERY:
612 {
613 Mpi2EventDataSasDiscovery_t *event_data =
614 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
615 pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
616 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
617 "start" : "stop");
618 if (event_data->DiscoveryStatus)
619 pr_info("discovery_status(0x%08x)",
620 le32_to_cpu(event_data->DiscoveryStatus));
621 pr_info("\n");
622 return;
623 }
624 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
625 desc = "SAS Broadcast Primitive";
626 break;
627 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
628 desc = "SAS Init Device Status Change";
629 break;
630 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
631 desc = "SAS Init Table Overflow";
632 break;
633 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
634 desc = "SAS Topology Change List";
635 break;
636 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
637 desc = "SAS Enclosure Device Status Change";
638 break;
639 case MPI2_EVENT_IR_VOLUME:
Sreekanth Reddy7786ab62015-11-11 17:30:28 +0530640 if (!ioc->hide_ir_msg)
641 desc = "IR Volume";
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530642 break;
643 case MPI2_EVENT_IR_PHYSICAL_DISK:
Sreekanth Reddy7786ab62015-11-11 17:30:28 +0530644 if (!ioc->hide_ir_msg)
645 desc = "IR Physical Disk";
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530646 break;
647 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
Sreekanth Reddy7786ab62015-11-11 17:30:28 +0530648 if (!ioc->hide_ir_msg)
649 desc = "IR Configuration Change List";
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530650 break;
651 case MPI2_EVENT_LOG_ENTRY_ADDED:
Sreekanth Reddy7786ab62015-11-11 17:30:28 +0530652 if (!ioc->hide_ir_msg)
653 desc = "Log Entry Added";
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530654 break;
Sreekanth Reddy2d8ce8c2015-01-12 11:38:56 +0530655 case MPI2_EVENT_TEMP_THRESHOLD:
656 desc = "Temperature Threshold";
657 break;
Chaitra P Ba470a512016-05-06 14:29:27 +0530658 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
659 desc = "Active cable exception";
660 break;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530661 }
662
663 if (!desc)
664 return;
665
666 pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
667}
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530668
669/**
670 * _base_sas_log_info - verbose translation of firmware log info
671 * @ioc: per adapter object
672 * @log_info: log info
673 *
674 * Return nothing.
675 */
676static void
677_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
678{
679 union loginfo_type {
680 u32 loginfo;
681 struct {
682 u32 subcode:16;
683 u32 code:8;
684 u32 originator:4;
685 u32 bus_type:4;
686 } dw;
687 };
688 union loginfo_type sas_loginfo;
689 char *originator_str = NULL;
690
691 sas_loginfo.loginfo = log_info;
692 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
693 return;
694
695 /* each nexus loss loginfo */
696 if (log_info == 0x31170000)
697 return;
698
699 /* eat the loginfos associated with task aborts */
700 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
701 0x31140000 || log_info == 0x31130000))
702 return;
703
704 switch (sas_loginfo.dw.originator) {
705 case 0:
706 originator_str = "IOP";
707 break;
708 case 1:
709 originator_str = "PL";
710 break;
711 case 2:
Sreekanth Reddy7786ab62015-11-11 17:30:28 +0530712 if (!ioc->hide_ir_msg)
713 originator_str = "IR";
714 else
715 originator_str = "WarpDrive";
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530716 break;
717 }
718
719 pr_warn(MPT3SAS_FMT
720 "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
721 ioc->name, log_info,
722 originator_str, sas_loginfo.dw.code,
723 sas_loginfo.dw.subcode);
724}
725
726/**
727 * _base_display_reply_info -
728 * @ioc: per adapter object
729 * @smid: system request message index
730 * @msix_index: MSIX table index supplied by the OS
731 * @reply: reply message frame(lower 32bit addr)
732 *
733 * Return nothing.
734 */
735static void
736_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
737 u32 reply)
738{
739 MPI2DefaultReply_t *mpi_reply;
740 u16 ioc_status;
741 u32 loginfo = 0;
742
743 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
744 if (unlikely(!mpi_reply)) {
745 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
746 ioc->name, __FILE__, __LINE__, __func__);
747 return;
748 }
749 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
Sreekanth Reddyaf009412015-11-11 17:30:23 +0530750
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530751 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
752 (ioc->logging_level & MPT_DEBUG_REPLY)) {
753 _base_sas_ioc_info(ioc , mpi_reply,
754 mpt3sas_base_get_msg_frame(ioc, smid));
755 }
Sreekanth Reddyaf009412015-11-11 17:30:23 +0530756
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530757 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
758 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
759 _base_sas_log_info(ioc, loginfo);
760 }
761
762 if (ioc_status || loginfo) {
763 ioc_status &= MPI2_IOCSTATUS_MASK;
764 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
765 }
766}
767
768/**
769 * mpt3sas_base_done - base internal command completion routine
770 * @ioc: per adapter object
771 * @smid: system request message index
772 * @msix_index: MSIX table index supplied by the OS
773 * @reply: reply message frame(lower 32bit addr)
774 *
775 * Return 1 meaning mf should be freed from _base_interrupt
776 * 0 means the mf is freed from this function.
777 */
778u8
779mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
780 u32 reply)
781{
782 MPI2DefaultReply_t *mpi_reply;
783
784 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
785 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +0530786 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530787
788 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
789 return 1;
790
791 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
792 if (mpi_reply) {
793 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
794 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
795 }
796 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
797
798 complete(&ioc->base_cmds.done);
799 return 1;
800}
801
802/**
803 * _base_async_event - main callback handler for firmware asyn events
804 * @ioc: per adapter object
805 * @msix_index: MSIX table index supplied by the OS
806 * @reply: reply message frame(lower 32bit addr)
807 *
808 * Return 1 meaning mf should be freed from _base_interrupt
809 * 0 means the mf is freed from this function.
810 */
811static u8
812_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
813{
814 Mpi2EventNotificationReply_t *mpi_reply;
815 Mpi2EventAckRequest_t *ack_request;
816 u16 smid;
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +0530817 struct _event_ack_list *delayed_event_ack;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530818
819 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
820 if (!mpi_reply)
821 return 1;
822 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
823 return 1;
Sreekanth Reddyaf009412015-11-11 17:30:23 +0530824
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530825 _base_display_event_data(ioc, mpi_reply);
Sreekanth Reddyaf009412015-11-11 17:30:23 +0530826
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530827 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
828 goto out;
829 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
830 if (!smid) {
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +0530831 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
832 GFP_ATOMIC);
833 if (!delayed_event_ack)
834 goto out;
835 INIT_LIST_HEAD(&delayed_event_ack->list);
836 delayed_event_ack->Event = mpi_reply->Event;
837 delayed_event_ack->EventContext = mpi_reply->EventContext;
838 list_add_tail(&delayed_event_ack->list,
839 &ioc->delayed_event_ack_list);
840 dewtprintk(ioc, pr_info(MPT3SAS_FMT
841 "DELAYED: EVENT ACK: event (0x%04x)\n",
842 ioc->name, le16_to_cpu(mpi_reply->Event)));
Sreekanth Reddyf92363d2012-11-30 07:44:21 +0530843 goto out;
844 }
845
846 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
847 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
848 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
849 ack_request->Event = mpi_reply->Event;
850 ack_request->EventContext = mpi_reply->EventContext;
851 ack_request->VF_ID = 0; /* TODO */
852 ack_request->VP_ID = 0;
853 mpt3sas_base_put_smid_default(ioc, smid);
854
855 out:
856
857 /* scsih callback handler */
858 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
859
860 /* ctl callback handler */
861 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
862
863 return 1;
864}
865
866/**
867 * _base_get_cb_idx - obtain the callback index
868 * @ioc: per adapter object
869 * @smid: system request message index
870 *
871 * Return callback index.
872 */
873static u8
874_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
875{
876 int i;
877 u8 cb_idx;
878
879 if (smid < ioc->hi_priority_smid) {
880 i = smid - 1;
881 cb_idx = ioc->scsi_lookup[i].cb_idx;
882 } else if (smid < ioc->internal_smid) {
883 i = smid - ioc->hi_priority_smid;
884 cb_idx = ioc->hpr_lookup[i].cb_idx;
885 } else if (smid <= ioc->hba_queue_depth) {
886 i = smid - ioc->internal_smid;
887 cb_idx = ioc->internal_lookup[i].cb_idx;
888 } else
889 cb_idx = 0xFF;
890 return cb_idx;
891}
892
893/**
894 * _base_mask_interrupts - disable interrupts
895 * @ioc: per adapter object
896 *
897 * Disabling ResetIRQ, Reply and Doorbell Interrupts
898 *
899 * Return nothing.
900 */
901static void
902_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
903{
904 u32 him_register;
905
906 ioc->mask_interrupts = 1;
907 him_register = readl(&ioc->chip->HostInterruptMask);
908 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
909 writel(him_register, &ioc->chip->HostInterruptMask);
910 readl(&ioc->chip->HostInterruptMask);
911}
912
913/**
914 * _base_unmask_interrupts - enable interrupts
915 * @ioc: per adapter object
916 *
917 * Enabling only Reply Interrupts
918 *
919 * Return nothing.
920 */
921static void
922_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
923{
924 u32 him_register;
925
926 him_register = readl(&ioc->chip->HostInterruptMask);
927 him_register &= ~MPI2_HIM_RIM;
928 writel(him_register, &ioc->chip->HostInterruptMask);
929 ioc->mask_interrupts = 0;
930}
931
932union reply_descriptor {
933 u64 word;
934 struct {
935 u32 low;
936 u32 high;
937 } u;
938};
939
940/**
941 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
942 * @irq: irq number (not used)
943 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
944 * @r: pt_regs pointer (not used)
945 *
946 * Return IRQ_HANDLE if processed, else IRQ_NONE.
947 */
948static irqreturn_t
949_base_interrupt(int irq, void *bus_id)
950{
951 struct adapter_reply_queue *reply_q = bus_id;
952 union reply_descriptor rd;
953 u32 completed_cmds;
954 u8 request_desript_type;
955 u16 smid;
956 u8 cb_idx;
957 u32 reply;
958 u8 msix_index = reply_q->msix_index;
959 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
960 Mpi2ReplyDescriptorsUnion_t *rpf;
961 u8 rc;
962
963 if (ioc->mask_interrupts)
964 return IRQ_NONE;
965
966 if (!atomic_add_unless(&reply_q->busy, 1, 1))
967 return IRQ_NONE;
968
969 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
970 request_desript_type = rpf->Default.ReplyFlags
971 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
972 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
973 atomic_dec(&reply_q->busy);
974 return IRQ_NONE;
975 }
976
977 completed_cmds = 0;
978 cb_idx = 0xFF;
979 do {
980 rd.word = le64_to_cpu(rpf->Words);
981 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
982 goto out;
983 reply = 0;
984 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
985 if (request_desript_type ==
986 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
987 request_desript_type ==
988 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
989 cb_idx = _base_get_cb_idx(ioc, smid);
990 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
991 (likely(mpt_callbacks[cb_idx] != NULL))) {
992 rc = mpt_callbacks[cb_idx](ioc, smid,
993 msix_index, 0);
994 if (rc)
995 mpt3sas_base_free_smid(ioc, smid);
996 }
997 } else if (request_desript_type ==
998 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
999 reply = le32_to_cpu(
1000 rpf->AddressReply.ReplyFrameAddress);
1001 if (reply > ioc->reply_dma_max_address ||
1002 reply < ioc->reply_dma_min_address)
1003 reply = 0;
1004 if (smid) {
1005 cb_idx = _base_get_cb_idx(ioc, smid);
1006 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1007 (likely(mpt_callbacks[cb_idx] != NULL))) {
1008 rc = mpt_callbacks[cb_idx](ioc, smid,
1009 msix_index, reply);
1010 if (reply)
1011 _base_display_reply_info(ioc,
1012 smid, msix_index, reply);
1013 if (rc)
1014 mpt3sas_base_free_smid(ioc,
1015 smid);
1016 }
1017 } else {
1018 _base_async_event(ioc, msix_index, reply);
1019 }
1020
1021 /* reply free queue handling */
1022 if (reply) {
1023 ioc->reply_free_host_index =
1024 (ioc->reply_free_host_index ==
1025 (ioc->reply_free_queue_depth - 1)) ?
1026 0 : ioc->reply_free_host_index + 1;
1027 ioc->reply_free[ioc->reply_free_host_index] =
1028 cpu_to_le32(reply);
1029 wmb();
1030 writel(ioc->reply_free_host_index,
1031 &ioc->chip->ReplyFreeHostIndex);
1032 }
1033 }
1034
1035 rpf->Words = cpu_to_le64(ULLONG_MAX);
1036 reply_q->reply_post_host_index =
1037 (reply_q->reply_post_host_index ==
1038 (ioc->reply_post_queue_depth - 1)) ? 0 :
1039 reply_q->reply_post_host_index + 1;
1040 request_desript_type =
1041 reply_q->reply_post_free[reply_q->reply_post_host_index].
1042 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1043 completed_cmds++;
1044 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1045 goto out;
1046 if (!reply_q->reply_post_host_index)
1047 rpf = reply_q->reply_post_free;
1048 else
1049 rpf++;
1050 } while (1);
1051
1052 out:
1053
1054 if (!completed_cmds) {
1055 atomic_dec(&reply_q->busy);
1056 return IRQ_NONE;
1057 }
1058
1059 wmb();
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05301060 if (ioc->is_warpdrive) {
1061 writel(reply_q->reply_post_host_index,
1062 ioc->reply_post_host_index[msix_index]);
1063 atomic_dec(&reply_q->busy);
1064 return IRQ_HANDLED;
1065 }
Sreekanth Reddyfb77bb52015-06-30 12:24:47 +05301066
1067 /* Update Reply Post Host Index.
1068 * For those HBA's which support combined reply queue feature
1069 * 1. Get the correct Supplemental Reply Post Host Index Register.
1070 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1071 * Index Register address bank i.e replyPostRegisterIndex[],
1072 * 2. Then update this register with new reply host index value
1073 * in ReplyPostIndex field and the MSIxIndex field with
1074 * msix_index value reduced to a value between 0 and 7,
1075 * using a modulo 8 operation. Since each Supplemental Reply Post
1076 * Host Index Register supports 8 MSI-X vectors.
1077 *
1078 * For other HBA's just update the Reply Post Host Index register with
1079 * new reply host index value in ReplyPostIndex Field and msix_index
1080 * value in MSIxIndex field.
1081 */
1082 if (ioc->msix96_vector)
1083 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1084 MPI2_RPHI_MSIX_INDEX_SHIFT),
1085 ioc->replyPostRegisterIndex[msix_index/8]);
1086 else
1087 writel(reply_q->reply_post_host_index | (msix_index <<
1088 MPI2_RPHI_MSIX_INDEX_SHIFT),
1089 &ioc->chip->ReplyPostHostIndex);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301090 atomic_dec(&reply_q->busy);
1091 return IRQ_HANDLED;
1092}
1093
1094/**
1095 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1096 * @ioc: per adapter object
1097 *
1098 */
1099static inline int
1100_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1101{
1102 return (ioc->facts.IOCCapabilities &
1103 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1104}
1105
1106/**
1107 * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
1108 * @ioc: per adapter object
1109 * Context: ISR conext
1110 *
1111 * Called when a Task Management request has completed. We want
1112 * to flush the other reply queues so all the outstanding IO has been
1113 * completed back to OS before we process the TM completetion.
1114 *
1115 * Return nothing.
1116 */
1117void
1118mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1119{
1120 struct adapter_reply_queue *reply_q;
1121
1122 /* If MSIX capability is turned off
1123 * then multi-queues are not enabled
1124 */
1125 if (!_base_is_controller_msix_enabled(ioc))
1126 return;
1127
1128 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1129 if (ioc->shost_recovery)
1130 return;
1131 /* TMs are on msix_index == 0 */
1132 if (reply_q->msix_index == 0)
1133 continue;
1134 _base_interrupt(reply_q->vector, (void *)reply_q);
1135 }
1136}
1137
1138/**
1139 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1140 * @cb_idx: callback index
1141 *
1142 * Return nothing.
1143 */
1144void
1145mpt3sas_base_release_callback_handler(u8 cb_idx)
1146{
1147 mpt_callbacks[cb_idx] = NULL;
1148}
1149
1150/**
1151 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1152 * @cb_func: callback function
1153 *
1154 * Returns cb_func.
1155 */
1156u8
1157mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1158{
1159 u8 cb_idx;
1160
1161 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1162 if (mpt_callbacks[cb_idx] == NULL)
1163 break;
1164
1165 mpt_callbacks[cb_idx] = cb_func;
1166 return cb_idx;
1167}
1168
1169/**
1170 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1171 *
1172 * Return nothing.
1173 */
1174void
1175mpt3sas_base_initialize_callback_handler(void)
1176{
1177 u8 cb_idx;
1178
1179 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1180 mpt3sas_base_release_callback_handler(cb_idx);
1181}
1182
1183
1184/**
1185 * _base_build_zero_len_sge - build zero length sg entry
1186 * @ioc: per adapter object
1187 * @paddr: virtual address for SGE
1188 *
1189 * Create a zero length scatter gather entry to insure the IOCs hardware has
1190 * something to use if the target device goes brain dead and tries
1191 * to send data even when none is asked for.
1192 *
1193 * Return nothing.
1194 */
1195static void
1196_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1197{
1198 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1199 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1200 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1201 MPI2_SGE_FLAGS_SHIFT);
1202 ioc->base_add_sg_single(paddr, flags_length, -1);
1203}
1204
1205/**
1206 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1207 * @paddr: virtual address for SGE
1208 * @flags_length: SGE flags and data transfer length
1209 * @dma_addr: Physical address
1210 *
1211 * Return nothing.
1212 */
1213static void
1214_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1215{
1216 Mpi2SGESimple32_t *sgel = paddr;
1217
1218 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1219 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1220 sgel->FlagsLength = cpu_to_le32(flags_length);
1221 sgel->Address = cpu_to_le32(dma_addr);
1222}
1223
1224
1225/**
1226 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1227 * @paddr: virtual address for SGE
1228 * @flags_length: SGE flags and data transfer length
1229 * @dma_addr: Physical address
1230 *
1231 * Return nothing.
1232 */
1233static void
1234_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1235{
1236 Mpi2SGESimple64_t *sgel = paddr;
1237
1238 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1239 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1240 sgel->FlagsLength = cpu_to_le32(flags_length);
1241 sgel->Address = cpu_to_le64(dma_addr);
1242}
1243
1244/**
1245 * _base_get_chain_buffer_tracker - obtain chain tracker
1246 * @ioc: per adapter object
1247 * @smid: smid associated to an IO request
1248 *
1249 * Returns chain tracker(from ioc->free_chain_list)
1250 */
1251static struct chain_tracker *
1252_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1253{
1254 struct chain_tracker *chain_req;
1255 unsigned long flags;
1256
1257 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1258 if (list_empty(&ioc->free_chain_list)) {
1259 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1260 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
1261 "chain buffers not available\n", ioc->name));
1262 return NULL;
1263 }
1264 chain_req = list_entry(ioc->free_chain_list.next,
1265 struct chain_tracker, tracker_list);
1266 list_del_init(&chain_req->tracker_list);
1267 list_add_tail(&chain_req->tracker_list,
1268 &ioc->scsi_lookup[smid - 1].chain_list);
1269 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1270 return chain_req;
1271}
1272
1273
1274/**
1275 * _base_build_sg - build generic sg
1276 * @ioc: per adapter object
1277 * @psge: virtual address for SGE
1278 * @data_out_dma: physical address for WRITES
1279 * @data_out_sz: data xfer size for WRITES
1280 * @data_in_dma: physical address for READS
1281 * @data_in_sz: data xfer size for READS
1282 *
1283 * Return nothing.
1284 */
1285static void
1286_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1287 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1288 size_t data_in_sz)
1289{
1290 u32 sgl_flags;
1291
1292 if (!data_out_sz && !data_in_sz) {
1293 _base_build_zero_len_sge(ioc, psge);
1294 return;
1295 }
1296
1297 if (data_out_sz && data_in_sz) {
1298 /* WRITE sgel first */
1299 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1300 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1301 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1302 ioc->base_add_sg_single(psge, sgl_flags |
1303 data_out_sz, data_out_dma);
1304
1305 /* incr sgel */
1306 psge += ioc->sge_size;
1307
1308 /* READ sgel last */
1309 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1310 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1311 MPI2_SGE_FLAGS_END_OF_LIST);
1312 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1313 ioc->base_add_sg_single(psge, sgl_flags |
1314 data_in_sz, data_in_dma);
1315 } else if (data_out_sz) /* WRITE */ {
1316 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1317 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1318 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1319 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1320 ioc->base_add_sg_single(psge, sgl_flags |
1321 data_out_sz, data_out_dma);
1322 } else if (data_in_sz) /* READ */ {
1323 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1324 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1325 MPI2_SGE_FLAGS_END_OF_LIST);
1326 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1327 ioc->base_add_sg_single(psge, sgl_flags |
1328 data_in_sz, data_in_dma);
1329 }
1330}
1331
1332/* IEEE format sgls */
1333
1334/**
1335 * _base_add_sg_single_ieee - add sg element for IEEE format
1336 * @paddr: virtual address for SGE
1337 * @flags: SGE flags
1338 * @chain_offset: number of 128 byte elements from start of segment
1339 * @length: data transfer length
1340 * @dma_addr: Physical address
1341 *
1342 * Return nothing.
1343 */
1344static void
1345_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
1346 dma_addr_t dma_addr)
1347{
1348 Mpi25IeeeSgeChain64_t *sgel = paddr;
1349
1350 sgel->Flags = flags;
1351 sgel->NextChainOffset = chain_offset;
1352 sgel->Length = cpu_to_le32(length);
1353 sgel->Address = cpu_to_le64(dma_addr);
1354}
1355
1356/**
1357 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
1358 * @ioc: per adapter object
1359 * @paddr: virtual address for SGE
1360 *
1361 * Create a zero length scatter gather entry to insure the IOCs hardware has
1362 * something to use if the target device goes brain dead and tries
1363 * to send data even when none is asked for.
1364 *
1365 * Return nothing.
1366 */
1367static void
1368_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1369{
1370 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1371 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
1372 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
Suganath prabu Subramanib130b0d2016-01-28 12:06:58 +05301373
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301374 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
1375}
1376
1377/**
Sreekanth Reddy471ef9d2015-11-11 17:30:24 +05301378 * _base_build_sg_scmd - main sg creation routine
1379 * @ioc: per adapter object
1380 * @scmd: scsi command
1381 * @smid: system request message index
1382 * Context: none.
1383 *
1384 * The main routine that builds scatter gather table from a given
1385 * scsi request sent via the .queuecommand main handler.
1386 *
1387 * Returns 0 success, anything else error
1388 */
1389static int
1390_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
1391 struct scsi_cmnd *scmd, u16 smid)
1392{
1393 Mpi2SCSIIORequest_t *mpi_request;
1394 dma_addr_t chain_dma;
1395 struct scatterlist *sg_scmd;
1396 void *sg_local, *chain;
1397 u32 chain_offset;
1398 u32 chain_length;
1399 u32 chain_flags;
1400 int sges_left;
1401 u32 sges_in_segment;
1402 u32 sgl_flags;
1403 u32 sgl_flags_last_element;
1404 u32 sgl_flags_end_buffer;
1405 struct chain_tracker *chain_req;
1406
1407 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1408
1409 /* init scatter gather flags */
1410 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
1411 if (scmd->sc_data_direction == DMA_TO_DEVICE)
1412 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
1413 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
1414 << MPI2_SGE_FLAGS_SHIFT;
1415 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
1416 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
1417 << MPI2_SGE_FLAGS_SHIFT;
1418 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1419
1420 sg_scmd = scsi_sglist(scmd);
1421 sges_left = scsi_dma_map(scmd);
1422 if (sges_left < 0) {
1423 sdev_printk(KERN_ERR, scmd->device,
1424 "pci_map_sg failed: request for %d bytes!\n",
1425 scsi_bufflen(scmd));
1426 return -ENOMEM;
1427 }
1428
1429 sg_local = &mpi_request->SGL;
1430 sges_in_segment = ioc->max_sges_in_main_message;
1431 if (sges_left <= sges_in_segment)
1432 goto fill_in_last_segment;
1433
1434 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
1435 (sges_in_segment * ioc->sge_size))/4;
1436
1437 /* fill in main message segment when there is a chain following */
1438 while (sges_in_segment) {
1439 if (sges_in_segment == 1)
1440 ioc->base_add_sg_single(sg_local,
1441 sgl_flags_last_element | sg_dma_len(sg_scmd),
1442 sg_dma_address(sg_scmd));
1443 else
1444 ioc->base_add_sg_single(sg_local, sgl_flags |
1445 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1446 sg_scmd = sg_next(sg_scmd);
1447 sg_local += ioc->sge_size;
1448 sges_left--;
1449 sges_in_segment--;
1450 }
1451
1452 /* initializing the chain flags and pointers */
1453 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
1454 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1455 if (!chain_req)
1456 return -1;
1457 chain = chain_req->chain_buffer;
1458 chain_dma = chain_req->chain_buffer_dma;
1459 do {
1460 sges_in_segment = (sges_left <=
1461 ioc->max_sges_in_chain_message) ? sges_left :
1462 ioc->max_sges_in_chain_message;
1463 chain_offset = (sges_left == sges_in_segment) ?
1464 0 : (sges_in_segment * ioc->sge_size)/4;
1465 chain_length = sges_in_segment * ioc->sge_size;
1466 if (chain_offset) {
1467 chain_offset = chain_offset <<
1468 MPI2_SGE_CHAIN_OFFSET_SHIFT;
1469 chain_length += ioc->sge_size;
1470 }
1471 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
1472 chain_length, chain_dma);
1473 sg_local = chain;
1474 if (!chain_offset)
1475 goto fill_in_last_segment;
1476
1477 /* fill in chain segments */
1478 while (sges_in_segment) {
1479 if (sges_in_segment == 1)
1480 ioc->base_add_sg_single(sg_local,
1481 sgl_flags_last_element |
1482 sg_dma_len(sg_scmd),
1483 sg_dma_address(sg_scmd));
1484 else
1485 ioc->base_add_sg_single(sg_local, sgl_flags |
1486 sg_dma_len(sg_scmd),
1487 sg_dma_address(sg_scmd));
1488 sg_scmd = sg_next(sg_scmd);
1489 sg_local += ioc->sge_size;
1490 sges_left--;
1491 sges_in_segment--;
1492 }
1493
1494 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1495 if (!chain_req)
1496 return -1;
1497 chain = chain_req->chain_buffer;
1498 chain_dma = chain_req->chain_buffer_dma;
1499 } while (1);
1500
1501
1502 fill_in_last_segment:
1503
1504 /* fill the last segment */
1505 while (sges_left) {
1506 if (sges_left == 1)
1507 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
1508 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1509 else
1510 ioc->base_add_sg_single(sg_local, sgl_flags |
1511 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1512 sg_scmd = sg_next(sg_scmd);
1513 sg_local += ioc->sge_size;
1514 sges_left--;
1515 }
1516
1517 return 0;
1518}
1519
1520/**
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301521 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
1522 * @ioc: per adapter object
1523 * @scmd: scsi command
1524 * @smid: system request message index
1525 * Context: none.
1526 *
1527 * The main routine that builds scatter gather table from a given
1528 * scsi request sent via the .queuecommand main handler.
1529 *
1530 * Returns 0 success, anything else error
1531 */
1532static int
1533_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1534 struct scsi_cmnd *scmd, u16 smid)
1535{
1536 Mpi2SCSIIORequest_t *mpi_request;
1537 dma_addr_t chain_dma;
1538 struct scatterlist *sg_scmd;
1539 void *sg_local, *chain;
1540 u32 chain_offset;
1541 u32 chain_length;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301542 int sges_left;
1543 u32 sges_in_segment;
1544 u8 simple_sgl_flags;
1545 u8 simple_sgl_flags_last;
1546 u8 chain_sgl_flags;
1547 struct chain_tracker *chain_req;
1548
1549 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1550
1551 /* init scatter gather flags */
1552 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1553 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1554 simple_sgl_flags_last = simple_sgl_flags |
1555 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1556 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1557 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1558
1559 sg_scmd = scsi_sglist(scmd);
1560 sges_left = scsi_dma_map(scmd);
Sreekanth Reddy62f5c742015-06-30 12:25:01 +05301561 if (sges_left < 0) {
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301562 sdev_printk(KERN_ERR, scmd->device,
1563 "pci_map_sg failed: request for %d bytes!\n",
1564 scsi_bufflen(scmd));
1565 return -ENOMEM;
1566 }
1567
1568 sg_local = &mpi_request->SGL;
1569 sges_in_segment = (ioc->request_sz -
1570 offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
1571 if (sges_left <= sges_in_segment)
1572 goto fill_in_last_segment;
1573
1574 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
1575 (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
1576
1577 /* fill in main message segment when there is a chain following */
1578 while (sges_in_segment > 1) {
1579 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1580 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1581 sg_scmd = sg_next(sg_scmd);
1582 sg_local += ioc->sge_size_ieee;
1583 sges_left--;
1584 sges_in_segment--;
1585 }
1586
Wei Yongjun25ef16d2012-12-12 02:26:51 +05301587 /* initializing the pointers */
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301588 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1589 if (!chain_req)
1590 return -1;
1591 chain = chain_req->chain_buffer;
1592 chain_dma = chain_req->chain_buffer_dma;
1593 do {
1594 sges_in_segment = (sges_left <=
1595 ioc->max_sges_in_chain_message) ? sges_left :
1596 ioc->max_sges_in_chain_message;
1597 chain_offset = (sges_left == sges_in_segment) ?
1598 0 : sges_in_segment;
1599 chain_length = sges_in_segment * ioc->sge_size_ieee;
1600 if (chain_offset)
1601 chain_length += ioc->sge_size_ieee;
1602 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
1603 chain_offset, chain_length, chain_dma);
1604
1605 sg_local = chain;
1606 if (!chain_offset)
1607 goto fill_in_last_segment;
1608
1609 /* fill in chain segments */
1610 while (sges_in_segment) {
1611 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1612 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1613 sg_scmd = sg_next(sg_scmd);
1614 sg_local += ioc->sge_size_ieee;
1615 sges_left--;
1616 sges_in_segment--;
1617 }
1618
1619 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1620 if (!chain_req)
1621 return -1;
1622 chain = chain_req->chain_buffer;
1623 chain_dma = chain_req->chain_buffer_dma;
1624 } while (1);
1625
1626
1627 fill_in_last_segment:
1628
1629 /* fill the last segment */
Sreekanth Reddy62f5c742015-06-30 12:25:01 +05301630 while (sges_left > 0) {
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301631 if (sges_left == 1)
1632 _base_add_sg_single_ieee(sg_local,
1633 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
1634 sg_dma_address(sg_scmd));
1635 else
1636 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1637 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1638 sg_scmd = sg_next(sg_scmd);
1639 sg_local += ioc->sge_size_ieee;
1640 sges_left--;
1641 }
1642
1643 return 0;
1644}
1645
1646/**
1647 * _base_build_sg_ieee - build generic sg for IEEE format
1648 * @ioc: per adapter object
1649 * @psge: virtual address for SGE
1650 * @data_out_dma: physical address for WRITES
1651 * @data_out_sz: data xfer size for WRITES
1652 * @data_in_dma: physical address for READS
1653 * @data_in_sz: data xfer size for READS
1654 *
1655 * Return nothing.
1656 */
1657static void
1658_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
1659 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1660 size_t data_in_sz)
1661{
1662 u8 sgl_flags;
1663
1664 if (!data_out_sz && !data_in_sz) {
1665 _base_build_zero_len_sge_ieee(ioc, psge);
1666 return;
1667 }
1668
1669 if (data_out_sz && data_in_sz) {
1670 /* WRITE sgel first */
1671 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1672 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1673 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1674 data_out_dma);
1675
1676 /* incr sgel */
1677 psge += ioc->sge_size_ieee;
1678
1679 /* READ sgel last */
1680 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1681 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1682 data_in_dma);
1683 } else if (data_out_sz) /* WRITE */ {
1684 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1685 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1686 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1687 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1688 data_out_dma);
1689 } else if (data_in_sz) /* READ */ {
1690 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1691 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1692 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1693 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1694 data_in_dma);
1695 }
1696}
1697
1698#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1699
1700/**
1701 * _base_config_dma_addressing - set dma addressing
1702 * @ioc: per adapter object
1703 * @pdev: PCI device struct
1704 *
1705 * Returns 0 for success, non-zero for failure.
1706 */
1707static int
1708_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
1709{
1710 struct sysinfo s;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301711 u64 consistent_dma_mask;
1712
1713 if (ioc->dma_mask)
1714 consistent_dma_mask = DMA_BIT_MASK(64);
1715 else
1716 consistent_dma_mask = DMA_BIT_MASK(32);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301717
1718 if (sizeof(dma_addr_t) > 4) {
1719 const uint64_t required_mask =
1720 dma_get_required_mask(&pdev->dev);
1721 if ((required_mask > DMA_BIT_MASK(32)) &&
1722 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301723 !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301724 ioc->base_add_sg_single = &_base_add_sg_single_64;
1725 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301726 ioc->dma_mask = 64;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301727 goto out;
1728 }
1729 }
1730
1731 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1732 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1733 ioc->base_add_sg_single = &_base_add_sg_single_32;
1734 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301735 ioc->dma_mask = 32;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301736 } else
1737 return -ENODEV;
1738
1739 out:
1740 si_meminfo(&s);
1741 pr_info(MPT3SAS_FMT
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301742 "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
1743 ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301744
1745 return 0;
1746}
1747
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301748static int
1749_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
1750 struct pci_dev *pdev)
1751{
1752 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1753 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1754 return -ENODEV;
1755 }
1756 return 0;
1757}
1758
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301759/**
1760 * _base_check_enable_msix - checks MSIX capabable.
1761 * @ioc: per adapter object
1762 *
1763 * Check to see if card is capable of MSIX, and set number
1764 * of available msix vectors
1765 */
1766static int
1767_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1768{
1769 int base;
1770 u16 message_control;
1771
Sreekanth Reddy42081172015-11-11 17:30:26 +05301772 /* Check whether controller SAS2008 B0 controller,
1773 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
1774 */
1775 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
1776 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
1777 return -EINVAL;
1778 }
1779
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301780 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1781 if (!base) {
1782 dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
1783 ioc->name));
1784 return -EINVAL;
1785 }
1786
1787 /* get msix vector count */
Sreekanth Reddy42081172015-11-11 17:30:26 +05301788 /* NUMA_IO not supported for older controllers */
1789 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1790 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1791 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1792 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1793 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1794 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1795 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1796 ioc->msix_vector_count = 1;
1797 else {
1798 pci_read_config_word(ioc->pdev, base + 2, &message_control);
1799 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1800 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301801 dinitprintk(ioc, pr_info(MPT3SAS_FMT
1802 "msix is supported, vector_count(%d)\n",
1803 ioc->name, ioc->msix_vector_count));
1804 return 0;
1805}
1806
1807/**
1808 * _base_free_irq - free irq
1809 * @ioc: per adapter object
1810 *
1811 * Freeing respective reply_queue from the list.
1812 */
1813static void
1814_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
1815{
1816 struct adapter_reply_queue *reply_q, *next;
1817
1818 if (list_empty(&ioc->reply_queue_list))
1819 return;
1820
1821 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1822 list_del(&reply_q->list);
Suganath Prabu Subramani64038302016-02-08 22:13:39 +05301823 if (smp_affinity_enable) {
1824 irq_set_affinity_hint(reply_q->vector, NULL);
1825 free_cpumask_var(reply_q->affinity_hint);
1826 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301827 free_irq(reply_q->vector, reply_q);
1828 kfree(reply_q);
1829 }
1830}
1831
1832/**
1833 * _base_request_irq - request irq
1834 * @ioc: per adapter object
1835 * @index: msix index into vector table
1836 * @vector: irq vector
1837 *
1838 * Inserting respective reply_queue into the list.
1839 */
1840static int
1841_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
1842{
1843 struct adapter_reply_queue *reply_q;
1844 int r;
1845
1846 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1847 if (!reply_q) {
1848 pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
1849 ioc->name, (int)sizeof(struct adapter_reply_queue));
1850 return -ENOMEM;
1851 }
1852 reply_q->ioc = ioc;
1853 reply_q->msix_index = index;
1854 reply_q->vector = vector;
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301855
Suganath Prabu Subramani64038302016-02-08 22:13:39 +05301856 if (smp_affinity_enable) {
1857 if (!zalloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) {
1858 kfree(reply_q);
1859 return -ENOMEM;
1860 }
Suganath Prabu Subramani64038302016-02-08 22:13:39 +05301861 }
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301862
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301863 atomic_set(&reply_q->busy, 0);
1864 if (ioc->msix_enable)
1865 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
Sreekanth Reddyc84b06a2015-11-11 17:30:35 +05301866 ioc->driver_name, ioc->id, index);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301867 else
1868 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
Sreekanth Reddyc84b06a2015-11-11 17:30:35 +05301869 ioc->driver_name, ioc->id);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301870 r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1871 reply_q);
1872 if (r) {
1873 pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
1874 reply_q->name, vector);
Suganath Prabu Subramani64038302016-02-08 22:13:39 +05301875 free_cpumask_var(reply_q->affinity_hint);
Suganath prabu Subramanida3cec22016-02-11 15:02:55 +05301876 kfree(reply_q);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301877 return -EBUSY;
1878 }
1879
1880 INIT_LIST_HEAD(&reply_q->list);
1881 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1882 return 0;
1883}
1884
1885/**
1886 * _base_assign_reply_queues - assigning msix index for each cpu
1887 * @ioc: per adapter object
1888 *
1889 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1890 *
1891 * It would nice if we could call irq_set_affinity, however it is not
1892 * an exported symbol
1893 */
1894static void
1895_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1896{
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001897 unsigned int cpu, nr_cpus, nr_msix, index = 0;
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301898 struct adapter_reply_queue *reply_q;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301899
1900 if (!_base_is_controller_msix_enabled(ioc))
1901 return;
1902
1903 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1904
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001905 nr_cpus = num_online_cpus();
1906 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
1907 ioc->facts.MaxMSIxVectors);
1908 if (!nr_msix)
1909 return;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301910
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001911 cpu = cpumask_first(cpu_online_mask);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301912
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301913 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1914
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001915 unsigned int i, group = nr_cpus / nr_msix;
1916
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301917 if (cpu >= nr_cpus)
1918 break;
1919
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001920 if (index < nr_cpus % nr_msix)
1921 group++;
1922
1923 for (i = 0 ; i < group ; i++) {
1924 ioc->cpu_msix_table[cpu] = index;
Suganath Prabu Subramani64038302016-02-08 22:13:39 +05301925 if (smp_affinity_enable)
1926 cpumask_or(reply_q->affinity_hint,
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301927 reply_q->affinity_hint, get_cpu_mask(cpu));
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001928 cpu = cpumask_next(cpu, cpu_online_mask);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301929 }
Suganath Prabu Subramani64038302016-02-08 22:13:39 +05301930 if (smp_affinity_enable)
1931 if (irq_set_affinity_hint(reply_q->vector,
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301932 reply_q->affinity_hint))
Suganath Prabu Subramani64038302016-02-08 22:13:39 +05301933 dinitprintk(ioc, pr_info(MPT3SAS_FMT
1934 "Err setting affinity hint to irq vector %d\n",
1935 ioc->name, reply_q->vector));
Martin K. Petersen91b265b2014-01-03 19:16:56 -05001936 index++;
Sreekanth Reddy14b31142015-01-12 11:39:03 +05301937 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301938}
1939
1940/**
1941 * _base_disable_msix - disables msix
1942 * @ioc: per adapter object
1943 *
1944 */
1945static void
1946_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
1947{
1948 if (!ioc->msix_enable)
1949 return;
1950 pci_disable_msix(ioc->pdev);
1951 ioc->msix_enable = 0;
1952}
1953
1954/**
1955 * _base_enable_msix - enables msix, failback to io_apic
1956 * @ioc: per adapter object
1957 *
1958 */
1959static int
1960_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1961{
1962 struct msix_entry *entries, *a;
1963 int r;
1964 int i;
1965 u8 try_msix = 0;
1966
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301967 if (msix_disable == -1 || msix_disable == 0)
1968 try_msix = 1;
1969
1970 if (!try_msix)
1971 goto try_ioapic;
1972
1973 if (_base_check_enable_msix(ioc) != 0)
1974 goto try_ioapic;
1975
1976 ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1977 ioc->msix_vector_count);
1978
Sreekanth Reddy9c500062013-08-14 18:23:20 +05301979 printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
1980 ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
1981 ioc->cpu_count, max_msix_vectors);
1982
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301983 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
1984 max_msix_vectors = 8;
1985
Sreekanth Reddy9c500062013-08-14 18:23:20 +05301986 if (max_msix_vectors > 0) {
1987 ioc->reply_queue_count = min_t(int, max_msix_vectors,
1988 ioc->reply_queue_count);
1989 ioc->msix_vector_count = ioc->reply_queue_count;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05301990 } else if (max_msix_vectors == 0)
1991 goto try_ioapic;
Sreekanth Reddy9c500062013-08-14 18:23:20 +05301992
Suganath Prabu Subramani64038302016-02-08 22:13:39 +05301993 if (ioc->msix_vector_count < ioc->cpu_count)
1994 smp_affinity_enable = 0;
1995
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05301996 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1997 GFP_KERNEL);
1998 if (!entries) {
1999 dfailprintk(ioc, pr_info(MPT3SAS_FMT
2000 "kcalloc failed @ at %s:%d/%s() !!!\n",
2001 ioc->name, __FILE__, __LINE__, __func__));
2002 goto try_ioapic;
2003 }
2004
2005 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
2006 a->entry = i;
2007
Alexander Gordeev6bfa6902014-08-18 08:01:46 +02002008 r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302009 if (r) {
2010 dfailprintk(ioc, pr_info(MPT3SAS_FMT
Alexander Gordeev6bfa6902014-08-18 08:01:46 +02002011 "pci_enable_msix_exact failed (r=%d) !!!\n",
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302012 ioc->name, r));
2013 kfree(entries);
2014 goto try_ioapic;
2015 }
2016
2017 ioc->msix_enable = 1;
2018 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
2019 r = _base_request_irq(ioc, i, a->vector);
2020 if (r) {
2021 _base_free_irq(ioc);
2022 _base_disable_msix(ioc);
2023 kfree(entries);
2024 goto try_ioapic;
2025 }
2026 }
2027
2028 kfree(entries);
2029 return 0;
2030
2031/* failback to io_apic interrupt routing */
2032 try_ioapic:
2033
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05302034 ioc->reply_queue_count = 1;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302035 r = _base_request_irq(ioc, 0, ioc->pdev->irq);
2036
2037 return r;
2038}
2039
2040/**
Sreekanth Reddy580d4e32015-06-30 12:24:50 +05302041 * mpt3sas_base_unmap_resources - free controller resources
2042 * @ioc: per adapter object
2043 */
2044void
2045mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
2046{
2047 struct pci_dev *pdev = ioc->pdev;
2048
2049 dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
2050 ioc->name, __func__));
2051
2052 _base_free_irq(ioc);
2053 _base_disable_msix(ioc);
2054
Tomas Henzl5f985d82015-12-23 14:21:47 +01002055 if (ioc->msix96_vector) {
Sreekanth Reddy580d4e32015-06-30 12:24:50 +05302056 kfree(ioc->replyPostRegisterIndex);
Tomas Henzl5f985d82015-12-23 14:21:47 +01002057 ioc->replyPostRegisterIndex = NULL;
2058 }
Sreekanth Reddy580d4e32015-06-30 12:24:50 +05302059
2060 if (ioc->chip_phys) {
2061 iounmap(ioc->chip);
2062 ioc->chip_phys = 0;
2063 }
2064
2065 if (pci_is_enabled(pdev)) {
2066 pci_release_selected_regions(ioc->pdev, ioc->bars);
2067 pci_disable_pcie_error_reporting(pdev);
2068 pci_disable_device(pdev);
2069 }
2070}
2071
2072/**
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302073 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
2074 * @ioc: per adapter object
2075 *
2076 * Returns 0 for success, non-zero for failure.
2077 */
2078int
2079mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2080{
2081 struct pci_dev *pdev = ioc->pdev;
2082 u32 memap_sz;
2083 u32 pio_sz;
2084 int i, r = 0;
2085 u64 pio_chip = 0;
2086 u64 chip_phys = 0;
2087 struct adapter_reply_queue *reply_q;
2088
2089 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
2090 ioc->name, __func__));
2091
2092 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
2093 if (pci_enable_device_mem(pdev)) {
2094 pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
2095 ioc->name);
Joe Lawrencecf9bd21a2013-08-08 16:45:39 -04002096 ioc->bars = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302097 return -ENODEV;
2098 }
2099
2100
2101 if (pci_request_selected_regions(pdev, ioc->bars,
Sreekanth Reddyc84b06a2015-11-11 17:30:35 +05302102 ioc->driver_name)) {
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302103 pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
2104 ioc->name);
Joe Lawrencecf9bd21a2013-08-08 16:45:39 -04002105 ioc->bars = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302106 r = -ENODEV;
2107 goto out_fail;
2108 }
2109
2110/* AER (Advanced Error Reporting) hooks */
2111 pci_enable_pcie_error_reporting(pdev);
2112
2113 pci_set_master(pdev);
2114
2115
2116 if (_base_config_dma_addressing(ioc, pdev) != 0) {
2117 pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
2118 ioc->name, pci_name(pdev));
2119 r = -ENODEV;
2120 goto out_fail;
2121 }
2122
Sreekanth Reddy5aeeb782015-07-15 10:19:56 +05302123 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
2124 (!memap_sz || !pio_sz); i++) {
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302125 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
2126 if (pio_sz)
2127 continue;
2128 pio_chip = (u64)pci_resource_start(pdev, i);
2129 pio_sz = pci_resource_len(pdev, i);
2130 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
2131 if (memap_sz)
2132 continue;
2133 ioc->chip_phys = pci_resource_start(pdev, i);
2134 chip_phys = (u64)ioc->chip_phys;
2135 memap_sz = pci_resource_len(pdev, i);
2136 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302137 }
2138 }
2139
Sreekanth Reddy5aeeb782015-07-15 10:19:56 +05302140 if (ioc->chip == NULL) {
2141 pr_err(MPT3SAS_FMT "unable to map adapter memory! "
2142 " or resource not found\n", ioc->name);
2143 r = -EINVAL;
2144 goto out_fail;
2145 }
2146
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302147 _base_mask_interrupts(ioc);
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05302148
2149 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
2150 if (r)
2151 goto out_fail;
2152
2153 if (!ioc->rdpq_array_enable_assigned) {
2154 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
2155 ioc->rdpq_array_enable_assigned = 1;
2156 }
2157
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302158 r = _base_enable_msix(ioc);
2159 if (r)
2160 goto out_fail;
2161
Sreekanth Reddyfb77bb52015-06-30 12:24:47 +05302162 /* Use the Combined reply queue feature only for SAS3 C0 & higher
2163 * revision HBAs and also only when reply queue count is greater than 8
2164 */
2165 if (ioc->msix96_vector && ioc->reply_queue_count > 8) {
2166 /* Determine the Supplemental Reply Post Host Index Registers
2167 * Addresse. Supplemental Reply Post Host Index Registers
2168 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
2169 * each register is at offset bytes of
2170 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
2171 */
2172 ioc->replyPostRegisterIndex = kcalloc(
2173 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT,
2174 sizeof(resource_size_t *), GFP_KERNEL);
2175 if (!ioc->replyPostRegisterIndex) {
2176 dfailprintk(ioc, printk(MPT3SAS_FMT
2177 "allocation for reply Post Register Index failed!!!\n",
2178 ioc->name));
2179 r = -ENOMEM;
2180 goto out_fail;
2181 }
2182
2183 for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) {
2184 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
2185 ((u8 *)&ioc->chip->Doorbell +
2186 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2187 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
2188 }
2189 } else
2190 ioc->msix96_vector = 0;
2191
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302192 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
2193 pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
2194 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
2195 "IO-APIC enabled"), reply_q->vector);
2196
2197 pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
2198 ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
2199 pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
2200 ioc->name, (unsigned long long)pio_chip, pio_sz);
2201
2202 /* Save PCI configuration state for recovery from PCI AER/EEH errors */
2203 pci_save_state(pdev);
2204 return 0;
2205
2206 out_fail:
Sreekanth Reddy580d4e32015-06-30 12:24:50 +05302207 mpt3sas_base_unmap_resources(ioc);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302208 return r;
2209}
2210
2211/**
2212 * mpt3sas_base_get_msg_frame - obtain request mf pointer
2213 * @ioc: per adapter object
2214 * @smid: system request message index(smid zero is invalid)
2215 *
2216 * Returns virt pointer to message frame.
2217 */
2218void *
2219mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2220{
2221 return (void *)(ioc->request + (smid * ioc->request_sz));
2222}
2223
2224/**
2225 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
2226 * @ioc: per adapter object
2227 * @smid: system request message index
2228 *
2229 * Returns virt pointer to sense buffer.
2230 */
2231void *
2232mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2233{
2234 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
2235}
2236
2237/**
2238 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
2239 * @ioc: per adapter object
2240 * @smid: system request message index
2241 *
2242 * Returns phys pointer to the low 32bit address of the sense buffer.
2243 */
2244__le32
2245mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2246{
2247 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
2248 SCSI_SENSE_BUFFERSIZE));
2249}
2250
2251/**
2252 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
2253 * @ioc: per adapter object
2254 * @phys_addr: lower 32 physical addr of the reply
2255 *
2256 * Converts 32bit lower physical addr into a virt address.
2257 */
2258void *
2259mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
2260{
2261 if (!phys_addr)
2262 return NULL;
2263 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
2264}
2265
Suganath prabu Subramani03d1fb32016-01-28 12:07:06 +05302266static inline u8
2267_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
2268{
2269 return ioc->cpu_msix_table[raw_smp_processor_id()];
2270}
2271
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302272/**
2273 * mpt3sas_base_get_smid - obtain a free smid from internal queue
2274 * @ioc: per adapter object
2275 * @cb_idx: callback index
2276 *
2277 * Returns smid (zero is invalid)
2278 */
2279u16
2280mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2281{
2282 unsigned long flags;
2283 struct request_tracker *request;
2284 u16 smid;
2285
2286 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2287 if (list_empty(&ioc->internal_free_list)) {
2288 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2289 pr_err(MPT3SAS_FMT "%s: smid not available\n",
2290 ioc->name, __func__);
2291 return 0;
2292 }
2293
2294 request = list_entry(ioc->internal_free_list.next,
2295 struct request_tracker, tracker_list);
2296 request->cb_idx = cb_idx;
2297 smid = request->smid;
2298 list_del(&request->tracker_list);
2299 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2300 return smid;
2301}
2302
2303/**
2304 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
2305 * @ioc: per adapter object
2306 * @cb_idx: callback index
2307 * @scmd: pointer to scsi command object
2308 *
2309 * Returns smid (zero is invalid)
2310 */
2311u16
2312mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
2313 struct scsi_cmnd *scmd)
2314{
2315 unsigned long flags;
2316 struct scsiio_tracker *request;
2317 u16 smid;
2318
2319 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2320 if (list_empty(&ioc->free_list)) {
2321 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2322 pr_err(MPT3SAS_FMT "%s: smid not available\n",
2323 ioc->name, __func__);
2324 return 0;
2325 }
2326
2327 request = list_entry(ioc->free_list.next,
2328 struct scsiio_tracker, tracker_list);
2329 request->scmd = scmd;
2330 request->cb_idx = cb_idx;
2331 smid = request->smid;
Suganath prabu Subramani03d1fb32016-01-28 12:07:06 +05302332 request->msix_io = _base_get_msix_index(ioc);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302333 list_del(&request->tracker_list);
2334 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2335 return smid;
2336}
2337
2338/**
2339 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
2340 * @ioc: per adapter object
2341 * @cb_idx: callback index
2342 *
2343 * Returns smid (zero is invalid)
2344 */
2345u16
2346mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2347{
2348 unsigned long flags;
2349 struct request_tracker *request;
2350 u16 smid;
2351
2352 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2353 if (list_empty(&ioc->hpr_free_list)) {
2354 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2355 return 0;
2356 }
2357
2358 request = list_entry(ioc->hpr_free_list.next,
2359 struct request_tracker, tracker_list);
2360 request->cb_idx = cb_idx;
2361 smid = request->smid;
2362 list_del(&request->tracker_list);
2363 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2364 return smid;
2365}
2366
2367/**
2368 * mpt3sas_base_free_smid - put smid back on free_list
2369 * @ioc: per adapter object
2370 * @smid: system request message index
2371 *
2372 * Return nothing.
2373 */
2374void
2375mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2376{
2377 unsigned long flags;
2378 int i;
2379 struct chain_tracker *chain_req, *next;
2380
2381 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2382 if (smid < ioc->hi_priority_smid) {
2383 /* scsiio queue */
2384 i = smid - 1;
2385 if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
2386 list_for_each_entry_safe(chain_req, next,
2387 &ioc->scsi_lookup[i].chain_list, tracker_list) {
2388 list_del_init(&chain_req->tracker_list);
2389 list_add(&chain_req->tracker_list,
2390 &ioc->free_chain_list);
2391 }
2392 }
2393 ioc->scsi_lookup[i].cb_idx = 0xFF;
2394 ioc->scsi_lookup[i].scmd = NULL;
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05302395 ioc->scsi_lookup[i].direct_io = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302396 list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
2397 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2398
2399 /*
2400 * See _wait_for_commands_to_complete() call with regards
2401 * to this code.
2402 */
2403 if (ioc->shost_recovery && ioc->pending_io_count) {
2404 if (ioc->pending_io_count == 1)
2405 wake_up(&ioc->reset_wq);
2406 ioc->pending_io_count--;
2407 }
2408 return;
2409 } else if (smid < ioc->internal_smid) {
2410 /* hi-priority */
2411 i = smid - ioc->hi_priority_smid;
2412 ioc->hpr_lookup[i].cb_idx = 0xFF;
2413 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
2414 } else if (smid <= ioc->hba_queue_depth) {
2415 /* internal queue */
2416 i = smid - ioc->internal_smid;
2417 ioc->internal_lookup[i].cb_idx = 0xFF;
2418 list_add(&ioc->internal_lookup[i].tracker_list,
2419 &ioc->internal_free_list);
2420 }
2421 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2422}
2423
2424/**
2425 * _base_writeq - 64 bit write to MMIO
2426 * @ioc: per adapter object
2427 * @b: data payload
2428 * @addr: address in MMIO space
2429 * @writeq_lock: spin lock
2430 *
2431 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
2432 * care of 32 bit environment where its not quarenteed to send the entire word
2433 * in one transfer.
2434 */
2435#if defined(writeq) && defined(CONFIG_64BIT)
2436static inline void
2437_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2438{
2439 writeq(cpu_to_le64(b), addr);
2440}
2441#else
2442static inline void
2443_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2444{
2445 unsigned long flags;
2446 __u64 data_out = cpu_to_le64(b);
2447
2448 spin_lock_irqsave(writeq_lock, flags);
2449 writel((u32)(data_out), addr);
2450 writel((u32)(data_out >> 32), (addr + 4));
2451 spin_unlock_irqrestore(writeq_lock, flags);
2452}
2453#endif
2454
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302455/**
2456 * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
2457 * @ioc: per adapter object
2458 * @smid: system request message index
2459 * @handle: device handle
2460 *
2461 * Return nothing.
2462 */
2463void
2464mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
2465{
2466 Mpi2RequestDescriptorUnion_t descriptor;
2467 u64 *request = (u64 *)&descriptor;
2468
2469
2470 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2471 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
2472 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2473 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2474 descriptor.SCSIIO.LMID = 0;
2475 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2476 &ioc->scsi_lookup_lock);
2477}
2478
2479/**
2480 * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
2481 * @ioc: per adapter object
2482 * @smid: system request message index
2483 * @handle: device handle
2484 *
2485 * Return nothing.
2486 */
2487void
2488mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2489 u16 handle)
2490{
2491 Mpi2RequestDescriptorUnion_t descriptor;
2492 u64 *request = (u64 *)&descriptor;
2493
2494 descriptor.SCSIIO.RequestFlags =
2495 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2496 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
2497 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2498 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2499 descriptor.SCSIIO.LMID = 0;
2500 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2501 &ioc->scsi_lookup_lock);
2502}
2503
2504/**
2505 * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
2506 * @ioc: per adapter object
2507 * @smid: system request message index
Suganath prabu Subramani03d1fb32016-01-28 12:07:06 +05302508 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302509 * Return nothing.
2510 */
2511void
Suganath prabu Subramani03d1fb32016-01-28 12:07:06 +05302512mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2513 u16 msix_task)
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302514{
2515 Mpi2RequestDescriptorUnion_t descriptor;
2516 u64 *request = (u64 *)&descriptor;
2517
2518 descriptor.HighPriority.RequestFlags =
2519 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
Suganath prabu Subramani03d1fb32016-01-28 12:07:06 +05302520 descriptor.HighPriority.MSIxIndex = msix_task;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302521 descriptor.HighPriority.SMID = cpu_to_le16(smid);
2522 descriptor.HighPriority.LMID = 0;
2523 descriptor.HighPriority.Reserved1 = 0;
2524 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2525 &ioc->scsi_lookup_lock);
2526}
2527
2528/**
2529 * mpt3sas_base_put_smid_default - Default, primarily used for config pages
2530 * @ioc: per adapter object
2531 * @smid: system request message index
2532 *
2533 * Return nothing.
2534 */
2535void
2536mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2537{
2538 Mpi2RequestDescriptorUnion_t descriptor;
2539 u64 *request = (u64 *)&descriptor;
2540
2541 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2542 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
2543 descriptor.Default.SMID = cpu_to_le16(smid);
2544 descriptor.Default.LMID = 0;
2545 descriptor.Default.DescriptorTypeDependent = 0;
2546 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2547 &ioc->scsi_lookup_lock);
2548}
2549
Sreekanth Reddy1117b312014-09-12 15:35:30 +05302550/**
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302551 * _base_display_OEMs_branding - Display branding string
Sreekanth Reddy1117b312014-09-12 15:35:30 +05302552 * @ioc: per adapter object
2553 *
2554 * Return nothing.
2555 */
2556static void
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302557_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
Sreekanth Reddy1117b312014-09-12 15:35:30 +05302558{
2559 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
2560 return;
2561
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302562 switch (ioc->pdev->subsystem_vendor) {
2563 case PCI_VENDOR_ID_INTEL:
2564 switch (ioc->pdev->device) {
2565 case MPI2_MFGPAGE_DEVID_SAS2008:
2566 switch (ioc->pdev->subsystem_device) {
2567 case MPT2SAS_INTEL_RMS2LL080_SSDID:
2568 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2569 MPT2SAS_INTEL_RMS2LL080_BRANDING);
2570 break;
2571 case MPT2SAS_INTEL_RMS2LL040_SSDID:
2572 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2573 MPT2SAS_INTEL_RMS2LL040_BRANDING);
2574 break;
2575 case MPT2SAS_INTEL_SSD910_SSDID:
2576 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2577 MPT2SAS_INTEL_SSD910_BRANDING);
2578 break;
2579 default:
2580 pr_info(MPT3SAS_FMT
2581 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2582 ioc->name, ioc->pdev->subsystem_device);
2583 break;
2584 }
2585 case MPI2_MFGPAGE_DEVID_SAS2308_2:
2586 switch (ioc->pdev->subsystem_device) {
2587 case MPT2SAS_INTEL_RS25GB008_SSDID:
2588 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2589 MPT2SAS_INTEL_RS25GB008_BRANDING);
2590 break;
2591 case MPT2SAS_INTEL_RMS25JB080_SSDID:
2592 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2593 MPT2SAS_INTEL_RMS25JB080_BRANDING);
2594 break;
2595 case MPT2SAS_INTEL_RMS25JB040_SSDID:
2596 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2597 MPT2SAS_INTEL_RMS25JB040_BRANDING);
2598 break;
2599 case MPT2SAS_INTEL_RMS25KB080_SSDID:
2600 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2601 MPT2SAS_INTEL_RMS25KB080_BRANDING);
2602 break;
2603 case MPT2SAS_INTEL_RMS25KB040_SSDID:
2604 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2605 MPT2SAS_INTEL_RMS25KB040_BRANDING);
2606 break;
2607 case MPT2SAS_INTEL_RMS25LB040_SSDID:
2608 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2609 MPT2SAS_INTEL_RMS25LB040_BRANDING);
2610 break;
2611 case MPT2SAS_INTEL_RMS25LB080_SSDID:
2612 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2613 MPT2SAS_INTEL_RMS25LB080_BRANDING);
2614 break;
2615 default:
2616 pr_info(MPT3SAS_FMT
2617 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2618 ioc->name, ioc->pdev->subsystem_device);
2619 break;
2620 }
2621 case MPI25_MFGPAGE_DEVID_SAS3008:
2622 switch (ioc->pdev->subsystem_device) {
2623 case MPT3SAS_INTEL_RMS3JC080_SSDID:
2624 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2625 MPT3SAS_INTEL_RMS3JC080_BRANDING);
2626 break;
Sreekanth Reddy1117b312014-09-12 15:35:30 +05302627
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302628 case MPT3SAS_INTEL_RS3GC008_SSDID:
2629 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2630 MPT3SAS_INTEL_RS3GC008_BRANDING);
2631 break;
2632 case MPT3SAS_INTEL_RS3FC044_SSDID:
2633 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2634 MPT3SAS_INTEL_RS3FC044_BRANDING);
2635 break;
2636 case MPT3SAS_INTEL_RS3UC080_SSDID:
2637 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2638 MPT3SAS_INTEL_RS3UC080_BRANDING);
2639 break;
2640 default:
2641 pr_info(MPT3SAS_FMT
2642 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2643 ioc->name, ioc->pdev->subsystem_device);
2644 break;
2645 }
Sreekanth Reddy1117b312014-09-12 15:35:30 +05302646 break;
2647 default:
2648 pr_info(MPT3SAS_FMT
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302649 "Intel(R) Controller: Subsystem ID: 0x%X\n",
Sreekanth Reddyd8eb4a42015-06-30 12:25:02 +05302650 ioc->name, ioc->pdev->subsystem_device);
2651 break;
2652 }
2653 break;
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302654 case PCI_VENDOR_ID_DELL:
2655 switch (ioc->pdev->device) {
2656 case MPI2_MFGPAGE_DEVID_SAS2008:
2657 switch (ioc->pdev->subsystem_device) {
2658 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
2659 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2660 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
2661 break;
2662 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
2663 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2664 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
2665 break;
2666 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
2667 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2668 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
2669 break;
2670 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
2671 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2672 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
2673 break;
2674 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
2675 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2676 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
2677 break;
2678 case MPT2SAS_DELL_PERC_H200_SSDID:
2679 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2680 MPT2SAS_DELL_PERC_H200_BRANDING);
2681 break;
2682 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
2683 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2684 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
2685 break;
2686 default:
2687 pr_info(MPT3SAS_FMT
2688 "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
2689 ioc->name, ioc->pdev->subsystem_device);
2690 break;
2691 }
2692 break;
2693 case MPI25_MFGPAGE_DEVID_SAS3008:
2694 switch (ioc->pdev->subsystem_device) {
2695 case MPT3SAS_DELL_12G_HBA_SSDID:
2696 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2697 MPT3SAS_DELL_12G_HBA_BRANDING);
2698 break;
2699 default:
2700 pr_info(MPT3SAS_FMT
2701 "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
2702 ioc->name, ioc->pdev->subsystem_device);
2703 break;
2704 }
2705 break;
2706 default:
2707 pr_info(MPT3SAS_FMT
2708 "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
2709 ioc->pdev->subsystem_device);
2710 break;
2711 }
2712 break;
2713 case PCI_VENDOR_ID_CISCO:
2714 switch (ioc->pdev->device) {
2715 case MPI25_MFGPAGE_DEVID_SAS3008:
2716 switch (ioc->pdev->subsystem_device) {
2717 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
2718 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2719 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
2720 break;
2721 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
2722 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2723 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
2724 break;
2725 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2726 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2727 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
2728 break;
2729 default:
2730 pr_info(MPT3SAS_FMT
2731 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2732 ioc->name, ioc->pdev->subsystem_device);
2733 break;
2734 }
2735 break;
2736 case MPI25_MFGPAGE_DEVID_SAS3108_1:
2737 switch (ioc->pdev->subsystem_device) {
2738 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2739 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2740 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
2741 break;
2742 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
2743 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2744 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
2745 );
2746 break;
2747 default:
2748 pr_info(MPT3SAS_FMT
2749 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2750 ioc->name, ioc->pdev->subsystem_device);
2751 break;
2752 }
2753 break;
2754 default:
2755 pr_info(MPT3SAS_FMT
2756 "Cisco SAS HBA: Subsystem ID: 0x%X\n",
2757 ioc->name, ioc->pdev->subsystem_device);
2758 break;
2759 }
2760 break;
2761 case MPT2SAS_HP_3PAR_SSVID:
2762 switch (ioc->pdev->device) {
2763 case MPI2_MFGPAGE_DEVID_SAS2004:
2764 switch (ioc->pdev->subsystem_device) {
2765 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
2766 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2767 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
2768 break;
2769 default:
2770 pr_info(MPT3SAS_FMT
2771 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
2772 ioc->name, ioc->pdev->subsystem_device);
2773 break;
2774 }
2775 case MPI2_MFGPAGE_DEVID_SAS2308_2:
2776 switch (ioc->pdev->subsystem_device) {
2777 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
2778 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2779 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
2780 break;
2781 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
2782 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2783 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
2784 break;
2785 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
2786 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2787 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
2788 break;
2789 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
2790 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2791 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
2792 break;
2793 default:
2794 pr_info(MPT3SAS_FMT
2795 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
2796 ioc->name, ioc->pdev->subsystem_device);
2797 break;
2798 }
2799 default:
2800 pr_info(MPT3SAS_FMT
2801 "HP SAS HBA: Subsystem ID: 0x%X\n",
2802 ioc->name, ioc->pdev->subsystem_device);
2803 break;
2804 }
Sreekanth Reddy38e41412015-06-30 12:24:57 +05302805 default:
Sreekanth Reddy38e41412015-06-30 12:24:57 +05302806 break;
2807 }
2808}
Sreekanth Reddyfb84dfc2015-06-30 12:24:56 +05302809
2810/**
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302811 * _base_display_ioc_capabilities - Disply IOC's capabilities.
2812 * @ioc: per adapter object
2813 *
2814 * Return nothing.
2815 */
2816static void
2817_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
2818{
2819 int i = 0;
2820 char desc[16];
2821 u32 iounit_pg1_flags;
2822 u32 bios_version;
2823
2824 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2825 strncpy(desc, ioc->manu_pg0.ChipName, 16);
2826 pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
2827 "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2828 ioc->name, desc,
2829 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2830 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2831 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2832 ioc->facts.FWVersion.Word & 0x000000FF,
2833 ioc->pdev->revision,
2834 (bios_version & 0xFF000000) >> 24,
2835 (bios_version & 0x00FF0000) >> 16,
2836 (bios_version & 0x0000FF00) >> 8,
2837 bios_version & 0x000000FF);
2838
Sreekanth Reddy989e43c2015-11-11 17:30:32 +05302839 _base_display_OEMs_branding(ioc);
Sreekanth Reddy1117b312014-09-12 15:35:30 +05302840
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302841 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
2842
2843 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2844 pr_info("Initiator");
2845 i++;
2846 }
2847
2848 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2849 pr_info("%sTarget", i ? "," : "");
2850 i++;
2851 }
2852
2853 i = 0;
2854 pr_info("), ");
2855 pr_info("Capabilities=(");
2856
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05302857 if (!ioc->hide_ir_msg) {
2858 if (ioc->facts.IOCCapabilities &
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302859 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2860 pr_info("Raid");
2861 i++;
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05302862 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05302863 }
2864
2865 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2866 pr_info("%sTLR", i ? "," : "");
2867 i++;
2868 }
2869
2870 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2871 pr_info("%sMulticast", i ? "," : "");
2872 i++;
2873 }
2874
2875 if (ioc->facts.IOCCapabilities &
2876 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2877 pr_info("%sBIDI Target", i ? "," : "");
2878 i++;
2879 }
2880
2881 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
2882 pr_info("%sEEDP", i ? "," : "");
2883 i++;
2884 }
2885
2886 if (ioc->facts.IOCCapabilities &
2887 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
2888 pr_info("%sSnapshot Buffer", i ? "," : "");
2889 i++;
2890 }
2891
2892 if (ioc->facts.IOCCapabilities &
2893 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
2894 pr_info("%sDiag Trace Buffer", i ? "," : "");
2895 i++;
2896 }
2897
2898 if (ioc->facts.IOCCapabilities &
2899 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
2900 pr_info("%sDiag Extended Buffer", i ? "," : "");
2901 i++;
2902 }
2903
2904 if (ioc->facts.IOCCapabilities &
2905 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
2906 pr_info("%sTask Set Full", i ? "," : "");
2907 i++;
2908 }
2909
2910 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2911 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
2912 pr_info("%sNCQ", i ? "," : "");
2913 i++;
2914 }
2915
2916 pr_info(")\n");
2917}
2918
2919/**
2920 * mpt3sas_base_update_missing_delay - change the missing delay timers
2921 * @ioc: per adapter object
2922 * @device_missing_delay: amount of time till device is reported missing
2923 * @io_missing_delay: interval IO is returned when there is a missing device
2924 *
2925 * Return nothing.
2926 *
2927 * Passed on the command line, this function will modify the device missing
2928 * delay, as well as the io missing delay. This should be called at driver
2929 * load time.
2930 */
2931void
2932mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
2933 u16 device_missing_delay, u8 io_missing_delay)
2934{
2935 u16 dmd, dmd_new, dmd_orignal;
2936 u8 io_missing_delay_original;
2937 u16 sz;
2938 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
2939 Mpi2ConfigReply_t mpi_reply;
2940 u8 num_phys = 0;
2941 u16 ioc_status;
2942
2943 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
2944 if (!num_phys)
2945 return;
2946
2947 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
2948 sizeof(Mpi2SasIOUnit1PhyData_t));
2949 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
2950 if (!sas_iounit_pg1) {
2951 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2952 ioc->name, __FILE__, __LINE__, __func__);
2953 goto out;
2954 }
2955 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
2956 sas_iounit_pg1, sz))) {
2957 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2958 ioc->name, __FILE__, __LINE__, __func__);
2959 goto out;
2960 }
2961 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2962 MPI2_IOCSTATUS_MASK;
2963 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2964 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2965 ioc->name, __FILE__, __LINE__, __func__);
2966 goto out;
2967 }
2968
2969 /* device missing delay */
2970 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
2971 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2972 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2973 else
2974 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2975 dmd_orignal = dmd;
2976 if (device_missing_delay > 0x7F) {
2977 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
2978 device_missing_delay;
2979 dmd = dmd / 16;
2980 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
2981 } else
2982 dmd = device_missing_delay;
2983 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
2984
2985 /* io missing delay */
2986 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
2987 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
2988
2989 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
2990 sz)) {
2991 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2992 dmd_new = (dmd &
2993 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2994 else
2995 dmd_new =
2996 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2997 pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
2998 ioc->name, dmd_orignal, dmd_new);
2999 pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
3000 ioc->name, io_missing_delay_original,
3001 io_missing_delay);
3002 ioc->device_missing_delay = dmd_new;
3003 ioc->io_missing_delay = io_missing_delay;
3004 }
3005
3006out:
3007 kfree(sas_iounit_pg1);
3008}
3009/**
3010 * _base_static_config_pages - static start of day config pages
3011 * @ioc: per adapter object
3012 *
3013 * Return nothing.
3014 */
3015static void
3016_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
3017{
3018 Mpi2ConfigReply_t mpi_reply;
3019 u32 iounit_pg1_flags;
3020
3021 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
3022 if (ioc->ir_firmware)
3023 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
3024 &ioc->manu_pg10);
3025
3026 /*
3027 * Ensure correct T10 PI operation if vendor left EEDPTagMode
3028 * flag unset in NVDATA.
3029 */
3030 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
3031 if (ioc->manu_pg11.EEDPTagMode == 0) {
3032 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
3033 ioc->name);
3034 ioc->manu_pg11.EEDPTagMode &= ~0x3;
3035 ioc->manu_pg11.EEDPTagMode |= 0x1;
3036 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
3037 &ioc->manu_pg11);
3038 }
3039
3040 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
3041 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
3042 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
3043 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
3044 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
Sreekanth Reddy2d8ce8c2015-01-12 11:38:56 +05303045 mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303046 _base_display_ioc_capabilities(ioc);
3047
3048 /*
3049 * Enable task_set_full handling in iounit_pg1 when the
3050 * facts capabilities indicate that its supported.
3051 */
3052 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
3053 if ((ioc->facts.IOCCapabilities &
3054 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
3055 iounit_pg1_flags &=
3056 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
3057 else
3058 iounit_pg1_flags |=
3059 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
3060 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
3061 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
Sreekanth Reddy2d8ce8c2015-01-12 11:38:56 +05303062
3063 if (ioc->iounit_pg8.NumSensors)
3064 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303065}
3066
3067/**
3068 * _base_release_memory_pools - release memory
3069 * @ioc: per adapter object
3070 *
3071 * Free memory allocated from _base_allocate_memory_pools.
3072 *
3073 * Return nothing.
3074 */
3075static void
3076_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3077{
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05303078 int i = 0;
3079 struct reply_post_struct *rps;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303080
3081 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3082 __func__));
3083
3084 if (ioc->request) {
3085 pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
3086 ioc->request, ioc->request_dma);
3087 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3088 "request_pool(0x%p): free\n",
3089 ioc->name, ioc->request));
3090 ioc->request = NULL;
3091 }
3092
3093 if (ioc->sense) {
3094 pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
3095 if (ioc->sense_dma_pool)
3096 pci_pool_destroy(ioc->sense_dma_pool);
3097 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3098 "sense_pool(0x%p): free\n",
3099 ioc->name, ioc->sense));
3100 ioc->sense = NULL;
3101 }
3102
3103 if (ioc->reply) {
3104 pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
3105 if (ioc->reply_dma_pool)
3106 pci_pool_destroy(ioc->reply_dma_pool);
3107 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3108 "reply_pool(0x%p): free\n",
3109 ioc->name, ioc->reply));
3110 ioc->reply = NULL;
3111 }
3112
3113 if (ioc->reply_free) {
3114 pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
3115 ioc->reply_free_dma);
3116 if (ioc->reply_free_dma_pool)
3117 pci_pool_destroy(ioc->reply_free_dma_pool);
3118 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3119 "reply_free_pool(0x%p): free\n",
3120 ioc->name, ioc->reply_free));
3121 ioc->reply_free = NULL;
3122 }
3123
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05303124 if (ioc->reply_post) {
3125 do {
3126 rps = &ioc->reply_post[i];
3127 if (rps->reply_post_free) {
3128 pci_pool_free(
3129 ioc->reply_post_free_dma_pool,
3130 rps->reply_post_free,
3131 rps->reply_post_free_dma);
3132 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3133 "reply_post_free_pool(0x%p): free\n",
3134 ioc->name, rps->reply_post_free));
3135 rps->reply_post_free = NULL;
3136 }
3137 } while (ioc->rdpq_array_enable &&
3138 (++i < ioc->reply_queue_count));
3139
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303140 if (ioc->reply_post_free_dma_pool)
3141 pci_pool_destroy(ioc->reply_post_free_dma_pool);
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05303142 kfree(ioc->reply_post);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303143 }
3144
3145 if (ioc->config_page) {
3146 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3147 "config_page(0x%p): free\n", ioc->name,
3148 ioc->config_page));
3149 pci_free_consistent(ioc->pdev, ioc->config_page_sz,
3150 ioc->config_page, ioc->config_page_dma);
3151 }
3152
3153 if (ioc->scsi_lookup) {
3154 free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
3155 ioc->scsi_lookup = NULL;
3156 }
3157 kfree(ioc->hpr_lookup);
3158 kfree(ioc->internal_lookup);
3159 if (ioc->chain_lookup) {
3160 for (i = 0; i < ioc->chain_depth; i++) {
3161 if (ioc->chain_lookup[i].chain_buffer)
3162 pci_pool_free(ioc->chain_dma_pool,
3163 ioc->chain_lookup[i].chain_buffer,
3164 ioc->chain_lookup[i].chain_buffer_dma);
3165 }
3166 if (ioc->chain_dma_pool)
3167 pci_pool_destroy(ioc->chain_dma_pool);
3168 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
3169 ioc->chain_lookup = NULL;
3170 }
3171}
3172
3173/**
3174 * _base_allocate_memory_pools - allocate start of day memory pools
3175 * @ioc: per adapter object
3176 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3177 *
3178 * Returns 0 success, anything else error
3179 */
3180static int
3181_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3182{
3183 struct mpt3sas_facts *facts;
3184 u16 max_sge_elements;
3185 u16 chains_needed_per_io;
3186 u32 sz, total_sz, reply_post_free_sz;
3187 u32 retry_sz;
3188 u16 max_request_credit;
3189 unsigned short sg_tablesize;
3190 u16 sge_size;
3191 int i;
3192
3193 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3194 __func__));
3195
3196
3197 retry_sz = 0;
3198 facts = &ioc->facts;
3199
3200 /* command line tunables for max sgl entries */
3201 if (max_sgl_entries != -1)
3202 sg_tablesize = max_sgl_entries;
Sreekanth Reddy471ef9d2015-11-11 17:30:24 +05303203 else {
3204 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
3205 sg_tablesize = MPT2SAS_SG_DEPTH;
3206 else
3207 sg_tablesize = MPT3SAS_SG_DEPTH;
3208 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303209
Sreekanth Reddy8a7e4c22015-11-11 17:30:18 +05303210 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
3211 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
3212 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
Sreekanth Reddyad666a02015-01-12 11:39:00 +05303213 sg_tablesize = min_t(unsigned short, sg_tablesize,
Ming Lin65e86172016-04-04 14:48:10 -07003214 SG_MAX_SEGMENTS);
Sreekanth Reddyad666a02015-01-12 11:39:00 +05303215 pr_warn(MPT3SAS_FMT
3216 "sg_tablesize(%u) is bigger than kernel"
Ming Lin65e86172016-04-04 14:48:10 -07003217 " defined SG_CHUNK_SIZE(%u)\n", ioc->name,
Sreekanth Reddy8a7e4c22015-11-11 17:30:18 +05303218 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
Sreekanth Reddyad666a02015-01-12 11:39:00 +05303219 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303220 ioc->shost->sg_tablesize = sg_tablesize;
3221
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05303222 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
3223 (facts->RequestCredit / 4));
3224 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
3225 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
3226 INTERNAL_SCSIIO_CMDS_COUNT)) {
3227 pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
3228 Credits, it has just %d number of credits\n",
3229 ioc->name, facts->RequestCredit);
3230 return -ENOMEM;
3231 }
3232 ioc->internal_depth = 10;
3233 }
3234
3235 ioc->hi_priority_depth = ioc->internal_depth - (5);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303236 /* command line tunables for max controller queue depth */
3237 if (max_queue_depth != -1 && max_queue_depth != 0) {
3238 max_request_credit = min_t(u16, max_queue_depth +
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05303239 ioc->internal_depth, facts->RequestCredit);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303240 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
3241 max_request_credit = MAX_HBA_QUEUE_DEPTH;
3242 } else
3243 max_request_credit = min_t(u16, facts->RequestCredit,
3244 MAX_HBA_QUEUE_DEPTH);
3245
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05303246 /* Firmware maintains additional facts->HighPriorityCredit number of
3247 * credits for HiPriprity Request messages, so hba queue depth will be
3248 * sum of max_request_credit and high priority queue depth.
3249 */
3250 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303251
3252 /* request frame size */
3253 ioc->request_sz = facts->IOCRequestFrameSize * 4;
3254
3255 /* reply frame size */
3256 ioc->reply_sz = facts->ReplyFrameSize * 4;
3257
Suganath prabu Subramaniebb30242016-01-28 12:07:04 +05303258 /* chain segment size */
3259 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3260 if (facts->IOCMaxChainSegmentSize)
3261 ioc->chain_segment_sz =
3262 facts->IOCMaxChainSegmentSize *
3263 MAX_CHAIN_ELEMT_SZ;
3264 else
3265 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
3266 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
3267 MAX_CHAIN_ELEMT_SZ;
3268 } else
3269 ioc->chain_segment_sz = ioc->request_sz;
3270
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303271 /* calculate the max scatter element size */
3272 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
3273
3274 retry_allocation:
3275 total_sz = 0;
3276 /* calculate number of sg elements left over in the 1st frame */
3277 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
3278 sizeof(Mpi2SGEIOUnion_t)) + sge_size);
3279 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
3280
3281 /* now do the same for a chain buffer */
Suganath prabu Subramaniebb30242016-01-28 12:07:04 +05303282 max_sge_elements = ioc->chain_segment_sz - sge_size;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303283 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
3284
3285 /*
3286 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
3287 */
3288 chains_needed_per_io = ((ioc->shost->sg_tablesize -
3289 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
3290 + 1;
3291 if (chains_needed_per_io > facts->MaxChainDepth) {
3292 chains_needed_per_io = facts->MaxChainDepth;
3293 ioc->shost->sg_tablesize = min_t(u16,
3294 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
3295 * chains_needed_per_io), ioc->shost->sg_tablesize);
3296 }
3297 ioc->chains_needed_per_io = chains_needed_per_io;
3298
3299 /* reply free queue sizing - taking into account for 64 FW events */
3300 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
3301
3302 /* calculate reply descriptor post queue depth */
3303 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
3304 ioc->reply_free_queue_depth + 1 ;
3305 /* align the reply post queue on the next 16 count boundary */
3306 if (ioc->reply_post_queue_depth % 16)
3307 ioc->reply_post_queue_depth += 16 -
3308 (ioc->reply_post_queue_depth % 16);
3309
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303310 if (ioc->reply_post_queue_depth >
3311 facts->MaxReplyDescriptorPostQueueDepth) {
3312 ioc->reply_post_queue_depth =
3313 facts->MaxReplyDescriptorPostQueueDepth -
3314 (facts->MaxReplyDescriptorPostQueueDepth % 16);
3315 ioc->hba_queue_depth =
3316 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
3317 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
3318 }
3319
3320 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
3321 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
3322 "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
3323 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
3324 ioc->chains_needed_per_io));
3325
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05303326 /* reply post queue, 16 byte align */
3327 reply_post_free_sz = ioc->reply_post_queue_depth *
3328 sizeof(Mpi2DefaultReplyDescriptor_t);
3329
3330 sz = reply_post_free_sz;
3331 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
3332 sz *= ioc->reply_queue_count;
3333
3334 ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
3335 (ioc->reply_queue_count):1,
3336 sizeof(struct reply_post_struct), GFP_KERNEL);
3337
3338 if (!ioc->reply_post) {
3339 pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
3340 ioc->name);
3341 goto out;
3342 }
3343 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
3344 ioc->pdev, sz, 16, 0);
3345 if (!ioc->reply_post_free_dma_pool) {
3346 pr_err(MPT3SAS_FMT
3347 "reply_post_free pool: pci_pool_create failed\n",
3348 ioc->name);
3349 goto out;
3350 }
3351 i = 0;
3352 do {
3353 ioc->reply_post[i].reply_post_free =
3354 pci_pool_alloc(ioc->reply_post_free_dma_pool,
3355 GFP_KERNEL,
3356 &ioc->reply_post[i].reply_post_free_dma);
3357 if (!ioc->reply_post[i].reply_post_free) {
3358 pr_err(MPT3SAS_FMT
3359 "reply_post_free pool: pci_pool_alloc failed\n",
3360 ioc->name);
3361 goto out;
3362 }
3363 memset(ioc->reply_post[i].reply_post_free, 0, sz);
3364 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3365 "reply post free pool (0x%p): depth(%d),"
3366 "element_size(%d), pool_size(%d kB)\n", ioc->name,
3367 ioc->reply_post[i].reply_post_free,
3368 ioc->reply_post_queue_depth, 8, sz/1024));
3369 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3370 "reply_post_free_dma = (0x%llx)\n", ioc->name,
3371 (unsigned long long)
3372 ioc->reply_post[i].reply_post_free_dma));
3373 total_sz += sz;
3374 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
3375
3376 if (ioc->dma_mask == 64) {
3377 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
3378 pr_warn(MPT3SAS_FMT
3379 "no suitable consistent DMA mask for %s\n",
3380 ioc->name, pci_name(ioc->pdev));
3381 goto out;
3382 }
3383 }
3384
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303385 ioc->scsiio_depth = ioc->hba_queue_depth -
3386 ioc->hi_priority_depth - ioc->internal_depth;
3387
3388 /* set the scsi host can_queue depth
3389 * with some internal commands that could be outstanding
3390 */
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05303391 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303392 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3393 "scsi host: can_queue depth (%d)\n",
3394 ioc->name, ioc->shost->can_queue));
3395
3396
3397 /* contiguous pool for request and chains, 16 byte align, one extra "
3398 * "frame for smid=0
3399 */
3400 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
3401 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
3402
3403 /* hi-priority queue */
3404 sz += (ioc->hi_priority_depth * ioc->request_sz);
3405
3406 /* internal queue */
3407 sz += (ioc->internal_depth * ioc->request_sz);
3408
3409 ioc->request_dma_sz = sz;
3410 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
3411 if (!ioc->request) {
3412 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
3413 "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
3414 "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
3415 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
3416 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
3417 goto out;
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05303418 retry_sz = 64;
3419 ioc->hba_queue_depth -= retry_sz;
Suganath prabu Subramani8ff045c2016-02-18 14:09:45 +05303420 _base_release_memory_pools(ioc);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303421 goto retry_allocation;
3422 }
3423
3424 if (retry_sz)
3425 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
3426 "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
3427 "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
3428 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
3429
3430 /* hi-priority queue */
3431 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
3432 ioc->request_sz);
3433 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
3434 ioc->request_sz);
3435
3436 /* internal queue */
3437 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
3438 ioc->request_sz);
3439 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
3440 ioc->request_sz);
3441
3442 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3443 "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
3444 ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
3445 (ioc->hba_queue_depth * ioc->request_sz)/1024));
3446
3447 dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
3448 ioc->name, (unsigned long long) ioc->request_dma));
3449 total_sz += sz;
3450
3451 sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
3452 ioc->scsi_lookup_pages = get_order(sz);
3453 ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
3454 GFP_KERNEL, ioc->scsi_lookup_pages);
3455 if (!ioc->scsi_lookup) {
3456 pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
3457 ioc->name, (int)sz);
3458 goto out;
3459 }
3460
3461 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
3462 ioc->name, ioc->request, ioc->scsiio_depth));
3463
3464 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
3465 sz = ioc->chain_depth * sizeof(struct chain_tracker);
3466 ioc->chain_pages = get_order(sz);
3467 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
3468 GFP_KERNEL, ioc->chain_pages);
3469 if (!ioc->chain_lookup) {
3470 pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
3471 ioc->name);
3472 goto out;
3473 }
3474 ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
Suganath prabu Subramaniebb30242016-01-28 12:07:04 +05303475 ioc->chain_segment_sz, 16, 0);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303476 if (!ioc->chain_dma_pool) {
3477 pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
3478 ioc->name);
3479 goto out;
3480 }
3481 for (i = 0; i < ioc->chain_depth; i++) {
3482 ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
3483 ioc->chain_dma_pool , GFP_KERNEL,
3484 &ioc->chain_lookup[i].chain_buffer_dma);
3485 if (!ioc->chain_lookup[i].chain_buffer) {
3486 ioc->chain_depth = i;
3487 goto chain_done;
3488 }
Suganath prabu Subramaniebb30242016-01-28 12:07:04 +05303489 total_sz += ioc->chain_segment_sz;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303490 }
3491 chain_done:
3492 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3493 "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
Suganath prabu Subramaniebb30242016-01-28 12:07:04 +05303494 ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
3495 ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303496
3497 /* initialize hi-priority queue smid's */
3498 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
3499 sizeof(struct request_tracker), GFP_KERNEL);
3500 if (!ioc->hpr_lookup) {
3501 pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
3502 ioc->name);
3503 goto out;
3504 }
3505 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
3506 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3507 "hi_priority(0x%p): depth(%d), start smid(%d)\n",
3508 ioc->name, ioc->hi_priority,
3509 ioc->hi_priority_depth, ioc->hi_priority_smid));
3510
3511 /* initialize internal queue smid's */
3512 ioc->internal_lookup = kcalloc(ioc->internal_depth,
3513 sizeof(struct request_tracker), GFP_KERNEL);
3514 if (!ioc->internal_lookup) {
3515 pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
3516 ioc->name);
3517 goto out;
3518 }
3519 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
3520 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3521 "internal(0x%p): depth(%d), start smid(%d)\n",
3522 ioc->name, ioc->internal,
3523 ioc->internal_depth, ioc->internal_smid));
3524
3525 /* sense buffers, 4 byte align */
3526 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
3527 ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
3528 0);
3529 if (!ioc->sense_dma_pool) {
3530 pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
3531 ioc->name);
3532 goto out;
3533 }
3534 ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
3535 &ioc->sense_dma);
3536 if (!ioc->sense) {
3537 pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
3538 ioc->name);
3539 goto out;
3540 }
3541 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3542 "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
3543 "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
3544 SCSI_SENSE_BUFFERSIZE, sz/1024));
3545 dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
3546 ioc->name, (unsigned long long)ioc->sense_dma));
3547 total_sz += sz;
3548
3549 /* reply pool, 4 byte align */
3550 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
3551 ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
3552 0);
3553 if (!ioc->reply_dma_pool) {
3554 pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
3555 ioc->name);
3556 goto out;
3557 }
3558 ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
3559 &ioc->reply_dma);
3560 if (!ioc->reply) {
3561 pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
3562 ioc->name);
3563 goto out;
3564 }
3565 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
3566 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
3567 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3568 "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
3569 ioc->name, ioc->reply,
3570 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
3571 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
3572 ioc->name, (unsigned long long)ioc->reply_dma));
3573 total_sz += sz;
3574
3575 /* reply free queue, 16 byte align */
3576 sz = ioc->reply_free_queue_depth * 4;
3577 ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
3578 ioc->pdev, sz, 16, 0);
3579 if (!ioc->reply_free_dma_pool) {
3580 pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
3581 ioc->name);
3582 goto out;
3583 }
3584 ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
3585 &ioc->reply_free_dma);
3586 if (!ioc->reply_free) {
3587 pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
3588 ioc->name);
3589 goto out;
3590 }
3591 memset(ioc->reply_free, 0, sz);
3592 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
3593 "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
3594 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
3595 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3596 "reply_free_dma (0x%llx)\n",
3597 ioc->name, (unsigned long long)ioc->reply_free_dma));
3598 total_sz += sz;
3599
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303600 ioc->config_page_sz = 512;
3601 ioc->config_page = pci_alloc_consistent(ioc->pdev,
3602 ioc->config_page_sz, &ioc->config_page_dma);
3603 if (!ioc->config_page) {
3604 pr_err(MPT3SAS_FMT
3605 "config page: pci_pool_alloc failed\n",
3606 ioc->name);
3607 goto out;
3608 }
3609 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3610 "config page(0x%p): size(%d)\n",
3611 ioc->name, ioc->config_page, ioc->config_page_sz));
3612 dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
3613 ioc->name, (unsigned long long)ioc->config_page_dma));
3614 total_sz += ioc->config_page_sz;
3615
3616 pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
3617 ioc->name, total_sz/1024);
3618 pr_info(MPT3SAS_FMT
3619 "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
3620 ioc->name, ioc->shost->can_queue, facts->RequestCredit);
3621 pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
3622 ioc->name, ioc->shost->sg_tablesize);
3623 return 0;
3624
3625 out:
3626 return -ENOMEM;
3627}
3628
3629/**
3630 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
3631 * @ioc: Pointer to MPT_ADAPTER structure
3632 * @cooked: Request raw or cooked IOC state
3633 *
3634 * Returns all IOC Doorbell register bits if cooked==0, else just the
3635 * Doorbell bits in MPI_IOC_STATE_MASK.
3636 */
3637u32
3638mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
3639{
3640 u32 s, sc;
3641
3642 s = readl(&ioc->chip->Doorbell);
3643 sc = s & MPI2_IOC_STATE_MASK;
3644 return cooked ? sc : s;
3645}
3646
3647/**
3648 * _base_wait_on_iocstate - waiting on a particular ioc state
3649 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
3650 * @timeout: timeout in second
3651 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3652 *
3653 * Returns 0 for success, non-zero for failure.
3654 */
3655static int
3656_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
3657 int sleep_flag)
3658{
3659 u32 count, cntdn;
3660 u32 current_state;
3661
3662 count = 0;
3663 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3664 do {
3665 current_state = mpt3sas_base_get_iocstate(ioc, 1);
3666 if (current_state == ioc_state)
3667 return 0;
3668 if (count && current_state == MPI2_IOC_STATE_FAULT)
3669 break;
3670 if (sleep_flag == CAN_SLEEP)
3671 usleep_range(1000, 1500);
3672 else
3673 udelay(500);
3674 count++;
3675 } while (--cntdn);
3676
3677 return current_state;
3678}
3679
3680/**
3681 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
3682 * a write to the doorbell)
3683 * @ioc: per adapter object
3684 * @timeout: timeout in second
3685 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3686 *
3687 * Returns 0 for success, non-zero for failure.
3688 *
3689 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
3690 */
3691static int
Sreekanth Reddy4dc8c802015-06-30 12:24:48 +05303692_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
3693
3694static int
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05303695_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
3696 int sleep_flag)
3697{
3698 u32 cntdn, count;
3699 u32 int_status;
3700
3701 count = 0;
3702 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3703 do {
3704 int_status = readl(&ioc->chip->HostInterruptStatus);
3705 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3706 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3707 "%s: successful count(%d), timeout(%d)\n",
3708 ioc->name, __func__, count, timeout));
3709 return 0;
3710 }
3711 if (sleep_flag == CAN_SLEEP)
3712 usleep_range(1000, 1500);
3713 else
3714 udelay(500);
3715 count++;
3716 } while (--cntdn);
3717
3718 pr_err(MPT3SAS_FMT
3719 "%s: failed due to timeout count(%d), int_status(%x)!\n",
3720 ioc->name, __func__, count, int_status);
3721 return -EFAULT;
3722}
3723
3724/**
3725 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
3726 * @ioc: per adapter object
3727 * @timeout: timeout in second
3728 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3729 *
3730 * Returns 0 for success, non-zero for failure.
3731 *
3732 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
3733 * doorbell.
3734 */
3735static int
3736_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
3737 int sleep_flag)
3738{
3739 u32 cntdn, count;
3740 u32 int_status;
3741 u32 doorbell;
3742
3743 count = 0;
3744 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3745 do {
3746 int_status = readl(&ioc->chip->HostInterruptStatus);
3747 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
3748 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3749 "%s: successful count(%d), timeout(%d)\n",
3750 ioc->name, __func__, count, timeout));
3751 return 0;
3752 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3753 doorbell = readl(&ioc->chip->Doorbell);
3754 if ((doorbell & MPI2_IOC_STATE_MASK) ==
3755 MPI2_IOC_STATE_FAULT) {
3756 mpt3sas_base_fault_info(ioc , doorbell);
3757 return -EFAULT;
3758 }
3759 } else if (int_status == 0xFFFFFFFF)
3760 goto out;
3761
3762 if (sleep_flag == CAN_SLEEP)
3763 usleep_range(1000, 1500);
3764 else
3765 udelay(500);
3766 count++;
3767 } while (--cntdn);
3768
3769 out:
3770 pr_err(MPT3SAS_FMT
3771 "%s: failed due to timeout count(%d), int_status(%x)!\n",
3772 ioc->name, __func__, count, int_status);
3773 return -EFAULT;
3774}
3775
3776/**
3777 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
3778 * @ioc: per adapter object
3779 * @timeout: timeout in second
3780 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3781 *
3782 * Returns 0 for success, non-zero for failure.
3783 *
3784 */
3785static int
3786_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
3787 int sleep_flag)
3788{
3789 u32 cntdn, count;
3790 u32 doorbell_reg;
3791
3792 count = 0;
3793 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3794 do {
3795 doorbell_reg = readl(&ioc->chip->Doorbell);
3796 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
3797 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3798 "%s: successful count(%d), timeout(%d)\n",
3799 ioc->name, __func__, count, timeout));
3800 return 0;
3801 }
3802 if (sleep_flag == CAN_SLEEP)
3803 usleep_range(1000, 1500);
3804 else
3805 udelay(500);
3806 count++;
3807 } while (--cntdn);
3808
3809 pr_err(MPT3SAS_FMT
3810 "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
3811 ioc->name, __func__, count, doorbell_reg);
3812 return -EFAULT;
3813}
3814
3815/**
3816 * _base_send_ioc_reset - send doorbell reset
3817 * @ioc: per adapter object
3818 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
3819 * @timeout: timeout in second
3820 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3821 *
3822 * Returns 0 for success, non-zero for failure.
3823 */
3824static int
3825_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
3826 int sleep_flag)
3827{
3828 u32 ioc_state;
3829 int r = 0;
3830
3831 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
3832 pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
3833 ioc->name, __func__);
3834 return -EFAULT;
3835 }
3836
3837 if (!(ioc->facts.IOCCapabilities &
3838 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
3839 return -EFAULT;
3840
3841 pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
3842
3843 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
3844 &ioc->chip->Doorbell);
3845 if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
3846 r = -EFAULT;
3847 goto out;
3848 }
3849 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
3850 timeout, sleep_flag);
3851 if (ioc_state) {
3852 pr_err(MPT3SAS_FMT
3853 "%s: failed going to ready state (ioc_state=0x%x)\n",
3854 ioc->name, __func__, ioc_state);
3855 r = -EFAULT;
3856 goto out;
3857 }
3858 out:
3859 pr_info(MPT3SAS_FMT "message unit reset: %s\n",
3860 ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3861 return r;
3862}
3863
3864/**
3865 * _base_handshake_req_reply_wait - send request thru doorbell interface
3866 * @ioc: per adapter object
3867 * @request_bytes: request length
3868 * @request: pointer having request payload
3869 * @reply_bytes: reply length
3870 * @reply: pointer to reply payload
3871 * @timeout: timeout in second
3872 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3873 *
3874 * Returns 0 for success, non-zero for failure.
3875 */
3876static int
3877_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3878 u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
3879{
3880 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3881 int i;
3882 u8 failed;
3883 u16 dummy;
3884 __le32 *mfp;
3885
3886 /* make sure doorbell is not in use */
3887 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
3888 pr_err(MPT3SAS_FMT
3889 "doorbell is in use (line=%d)\n",
3890 ioc->name, __LINE__);
3891 return -EFAULT;
3892 }
3893
3894 /* clear pending doorbell interrupts from previous state changes */
3895 if (readl(&ioc->chip->HostInterruptStatus) &
3896 MPI2_HIS_IOC2SYS_DB_STATUS)
3897 writel(0, &ioc->chip->HostInterruptStatus);
3898
3899 /* send message to ioc */
3900 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
3901 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3902 &ioc->chip->Doorbell);
3903
3904 if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
3905 pr_err(MPT3SAS_FMT
3906 "doorbell handshake int failed (line=%d)\n",
3907 ioc->name, __LINE__);
3908 return -EFAULT;
3909 }
3910 writel(0, &ioc->chip->HostInterruptStatus);
3911
3912 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
3913 pr_err(MPT3SAS_FMT
3914 "doorbell handshake ack failed (line=%d)\n",
3915 ioc->name, __LINE__);
3916 return -EFAULT;
3917 }
3918
3919 /* send message 32-bits at a time */
3920 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3921 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3922 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
3923 failed = 1;
3924 }
3925
3926 if (failed) {
3927 pr_err(MPT3SAS_FMT
3928 "doorbell handshake sending request failed (line=%d)\n",
3929 ioc->name, __LINE__);
3930 return -EFAULT;
3931 }
3932
3933 /* now wait for the reply */
3934 if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
3935 pr_err(MPT3SAS_FMT
3936 "doorbell handshake int failed (line=%d)\n",
3937 ioc->name, __LINE__);
3938 return -EFAULT;
3939 }
3940
3941 /* read the first two 16-bits, it gives the total length of the reply */
3942 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3943 & MPI2_DOORBELL_DATA_MASK);
3944 writel(0, &ioc->chip->HostInterruptStatus);
3945 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3946 pr_err(MPT3SAS_FMT
3947 "doorbell handshake int failed (line=%d)\n",
3948 ioc->name, __LINE__);
3949 return -EFAULT;
3950 }
3951 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3952 & MPI2_DOORBELL_DATA_MASK);
3953 writel(0, &ioc->chip->HostInterruptStatus);
3954
3955 for (i = 2; i < default_reply->MsgLength * 2; i++) {
3956 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3957 pr_err(MPT3SAS_FMT
3958 "doorbell handshake int failed (line=%d)\n",
3959 ioc->name, __LINE__);
3960 return -EFAULT;
3961 }
3962 if (i >= reply_bytes/2) /* overflow case */
3963 dummy = readl(&ioc->chip->Doorbell);
3964 else
3965 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3966 & MPI2_DOORBELL_DATA_MASK);
3967 writel(0, &ioc->chip->HostInterruptStatus);
3968 }
3969
3970 _base_wait_for_doorbell_int(ioc, 5, sleep_flag);
3971 if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
3972 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3973 "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
3974 }
3975 writel(0, &ioc->chip->HostInterruptStatus);
3976
3977 if (ioc->logging_level & MPT_DEBUG_INIT) {
3978 mfp = (__le32 *)reply;
3979 pr_info("\toffset:data\n");
3980 for (i = 0; i < reply_bytes/4; i++)
3981 pr_info("\t[0x%02x]:%08x\n", i*4,
3982 le32_to_cpu(mfp[i]));
3983 }
3984 return 0;
3985}
3986
3987/**
3988 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
3989 * @ioc: per adapter object
3990 * @mpi_reply: the reply payload from FW
3991 * @mpi_request: the request payload sent to FW
3992 *
3993 * The SAS IO Unit Control Request message allows the host to perform low-level
3994 * operations, such as resets on the PHYs of the IO Unit, also allows the host
3995 * to obtain the IOC assigned device handles for a device if it has other
3996 * identifying information about the device, in addition allows the host to
3997 * remove IOC resources associated with the device.
3998 *
3999 * Returns 0 for success, non-zero for failure.
4000 */
4001int
4002mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
4003 Mpi2SasIoUnitControlReply_t *mpi_reply,
4004 Mpi2SasIoUnitControlRequest_t *mpi_request)
4005{
4006 u16 smid;
4007 u32 ioc_state;
4008 unsigned long timeleft;
Dan Carpentereb445522014-12-04 13:57:05 +03004009 bool issue_reset = false;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304010 int rc;
4011 void *request;
4012 u16 wait_state_count;
4013
4014 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4015 __func__));
4016
4017 mutex_lock(&ioc->base_cmds.mutex);
4018
4019 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
4020 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
4021 ioc->name, __func__);
4022 rc = -EAGAIN;
4023 goto out;
4024 }
4025
4026 wait_state_count = 0;
4027 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4028 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4029 if (wait_state_count++ == 10) {
4030 pr_err(MPT3SAS_FMT
4031 "%s: failed due to ioc not operational\n",
4032 ioc->name, __func__);
4033 rc = -EFAULT;
4034 goto out;
4035 }
4036 ssleep(1);
4037 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4038 pr_info(MPT3SAS_FMT
4039 "%s: waiting for operational state(count=%d)\n",
4040 ioc->name, __func__, wait_state_count);
4041 }
4042
4043 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4044 if (!smid) {
4045 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4046 ioc->name, __func__);
4047 rc = -EAGAIN;
4048 goto out;
4049 }
4050
4051 rc = 0;
4052 ioc->base_cmds.status = MPT3_CMD_PENDING;
4053 request = mpt3sas_base_get_msg_frame(ioc, smid);
4054 ioc->base_cmds.smid = smid;
4055 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
4056 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
4057 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
4058 ioc->ioc_link_reset_in_progress = 1;
4059 init_completion(&ioc->base_cmds.done);
4060 mpt3sas_base_put_smid_default(ioc, smid);
4061 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
4062 msecs_to_jiffies(10000));
4063 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
4064 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
4065 ioc->ioc_link_reset_in_progress)
4066 ioc->ioc_link_reset_in_progress = 0;
4067 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4068 pr_err(MPT3SAS_FMT "%s: timeout\n",
4069 ioc->name, __func__);
4070 _debug_dump_mf(mpi_request,
4071 sizeof(Mpi2SasIoUnitControlRequest_t)/4);
4072 if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
Dan Carpentereb445522014-12-04 13:57:05 +03004073 issue_reset = true;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304074 goto issue_host_reset;
4075 }
4076 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
4077 memcpy(mpi_reply, ioc->base_cmds.reply,
4078 sizeof(Mpi2SasIoUnitControlReply_t));
4079 else
4080 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
4081 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4082 goto out;
4083
4084 issue_host_reset:
4085 if (issue_reset)
4086 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
4087 FORCE_BIG_HAMMER);
4088 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4089 rc = -EFAULT;
4090 out:
4091 mutex_unlock(&ioc->base_cmds.mutex);
4092 return rc;
4093}
4094
4095/**
4096 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
4097 * @ioc: per adapter object
4098 * @mpi_reply: the reply payload from FW
4099 * @mpi_request: the request payload sent to FW
4100 *
4101 * The SCSI Enclosure Processor request message causes the IOC to
4102 * communicate with SES devices to control LED status signals.
4103 *
4104 * Returns 0 for success, non-zero for failure.
4105 */
4106int
4107mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
4108 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
4109{
4110 u16 smid;
4111 u32 ioc_state;
4112 unsigned long timeleft;
Dan Carpentereb445522014-12-04 13:57:05 +03004113 bool issue_reset = false;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304114 int rc;
4115 void *request;
4116 u16 wait_state_count;
4117
4118 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4119 __func__));
4120
4121 mutex_lock(&ioc->base_cmds.mutex);
4122
4123 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
4124 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
4125 ioc->name, __func__);
4126 rc = -EAGAIN;
4127 goto out;
4128 }
4129
4130 wait_state_count = 0;
4131 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4132 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4133 if (wait_state_count++ == 10) {
4134 pr_err(MPT3SAS_FMT
4135 "%s: failed due to ioc not operational\n",
4136 ioc->name, __func__);
4137 rc = -EFAULT;
4138 goto out;
4139 }
4140 ssleep(1);
4141 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4142 pr_info(MPT3SAS_FMT
4143 "%s: waiting for operational state(count=%d)\n",
4144 ioc->name,
4145 __func__, wait_state_count);
4146 }
4147
4148 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4149 if (!smid) {
4150 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4151 ioc->name, __func__);
4152 rc = -EAGAIN;
4153 goto out;
4154 }
4155
4156 rc = 0;
4157 ioc->base_cmds.status = MPT3_CMD_PENDING;
4158 request = mpt3sas_base_get_msg_frame(ioc, smid);
4159 ioc->base_cmds.smid = smid;
4160 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
4161 init_completion(&ioc->base_cmds.done);
4162 mpt3sas_base_put_smid_default(ioc, smid);
4163 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
4164 msecs_to_jiffies(10000));
4165 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4166 pr_err(MPT3SAS_FMT "%s: timeout\n",
4167 ioc->name, __func__);
4168 _debug_dump_mf(mpi_request,
4169 sizeof(Mpi2SepRequest_t)/4);
4170 if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
Dan Carpentereb445522014-12-04 13:57:05 +03004171 issue_reset = false;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304172 goto issue_host_reset;
4173 }
4174 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
4175 memcpy(mpi_reply, ioc->base_cmds.reply,
4176 sizeof(Mpi2SepReply_t));
4177 else
4178 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
4179 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4180 goto out;
4181
4182 issue_host_reset:
4183 if (issue_reset)
4184 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
4185 FORCE_BIG_HAMMER);
4186 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4187 rc = -EFAULT;
4188 out:
4189 mutex_unlock(&ioc->base_cmds.mutex);
4190 return rc;
4191}
4192
4193/**
4194 * _base_get_port_facts - obtain port facts reply and save in ioc
4195 * @ioc: per adapter object
4196 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4197 *
4198 * Returns 0 for success, non-zero for failure.
4199 */
4200static int
4201_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
4202{
4203 Mpi2PortFactsRequest_t mpi_request;
4204 Mpi2PortFactsReply_t mpi_reply;
4205 struct mpt3sas_port_facts *pfacts;
4206 int mpi_reply_sz, mpi_request_sz, r;
4207
4208 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4209 __func__));
4210
4211 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
4212 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
4213 memset(&mpi_request, 0, mpi_request_sz);
4214 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
4215 mpi_request.PortNumber = port;
4216 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
4217 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
4218
4219 if (r != 0) {
4220 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4221 ioc->name, __func__, r);
4222 return r;
4223 }
4224
4225 pfacts = &ioc->pfacts[port];
4226 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
4227 pfacts->PortNumber = mpi_reply.PortNumber;
4228 pfacts->VP_ID = mpi_reply.VP_ID;
4229 pfacts->VF_ID = mpi_reply.VF_ID;
4230 pfacts->MaxPostedCmdBuffers =
4231 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
4232
4233 return 0;
4234}
4235
4236/**
Sreekanth Reddy4dc8c802015-06-30 12:24:48 +05304237 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
4238 * @ioc: per adapter object
4239 * @timeout:
4240 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4241 *
4242 * Returns 0 for success, non-zero for failure.
4243 */
4244static int
4245_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
4246 int sleep_flag)
4247{
4248 u32 ioc_state;
4249 int rc;
4250
4251 dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
4252 __func__));
4253
4254 if (ioc->pci_error_recovery) {
4255 dfailprintk(ioc, printk(MPT3SAS_FMT
4256 "%s: host in pci error recovery\n", ioc->name, __func__));
4257 return -EFAULT;
4258 }
4259
4260 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4261 dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4262 ioc->name, __func__, ioc_state));
4263
4264 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
4265 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4266 return 0;
4267
4268 if (ioc_state & MPI2_DOORBELL_USED) {
4269 dhsprintk(ioc, printk(MPT3SAS_FMT
4270 "unexpected doorbell active!\n", ioc->name));
4271 goto issue_diag_reset;
4272 }
4273
4274 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4275 mpt3sas_base_fault_info(ioc, ioc_state &
4276 MPI2_DOORBELL_DATA_MASK);
4277 goto issue_diag_reset;
4278 }
4279
4280 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
4281 timeout, sleep_flag);
4282 if (ioc_state) {
4283 dfailprintk(ioc, printk(MPT3SAS_FMT
4284 "%s: failed going to ready state (ioc_state=0x%x)\n",
4285 ioc->name, __func__, ioc_state));
4286 return -EFAULT;
4287 }
4288
4289 issue_diag_reset:
4290 rc = _base_diag_reset(ioc, sleep_flag);
4291 return rc;
4292}
4293
4294/**
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304295 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
4296 * @ioc: per adapter object
4297 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4298 *
4299 * Returns 0 for success, non-zero for failure.
4300 */
4301static int
4302_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4303{
4304 Mpi2IOCFactsRequest_t mpi_request;
4305 Mpi2IOCFactsReply_t mpi_reply;
4306 struct mpt3sas_facts *facts;
4307 int mpi_reply_sz, mpi_request_sz, r;
4308
4309 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4310 __func__));
4311
Sreekanth Reddy4dc8c802015-06-30 12:24:48 +05304312 r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
4313 if (r) {
4314 dfailprintk(ioc, printk(MPT3SAS_FMT
4315 "%s: failed getting to correct state\n",
4316 ioc->name, __func__));
4317 return r;
4318 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304319 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
4320 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
4321 memset(&mpi_request, 0, mpi_request_sz);
4322 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
4323 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
4324 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
4325
4326 if (r != 0) {
4327 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4328 ioc->name, __func__, r);
4329 return r;
4330 }
4331
4332 facts = &ioc->facts;
4333 memset(facts, 0, sizeof(struct mpt3sas_facts));
4334 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
4335 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
4336 facts->VP_ID = mpi_reply.VP_ID;
4337 facts->VF_ID = mpi_reply.VF_ID;
4338 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
4339 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
4340 facts->WhoInit = mpi_reply.WhoInit;
4341 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
4342 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
4343 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
4344 facts->MaxReplyDescriptorPostQueueDepth =
4345 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
4346 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
4347 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
4348 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
4349 ioc->ir_firmware = 1;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05304350 if ((facts->IOCCapabilities &
4351 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
4352 ioc->rdpq_array_capable = 1;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304353 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
4354 facts->IOCRequestFrameSize =
4355 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
Suganath prabu Subramaniebb30242016-01-28 12:07:04 +05304356 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4357 facts->IOCMaxChainSegmentSize =
4358 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
4359 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304360 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
4361 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
4362 ioc->shost->max_id = -1;
4363 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
4364 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
4365 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
4366 facts->HighPriorityCredit =
4367 le16_to_cpu(mpi_reply.HighPriorityCredit);
4368 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
4369 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
4370
4371 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4372 "hba queue depth(%d), max chains per io(%d)\n",
4373 ioc->name, facts->RequestCredit,
4374 facts->MaxChainDepth));
4375 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4376 "request frame size(%d), reply frame size(%d)\n", ioc->name,
4377 facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
4378 return 0;
4379}
4380
4381/**
4382 * _base_send_ioc_init - send ioc_init to firmware
4383 * @ioc: per adapter object
4384 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4385 *
4386 * Returns 0 for success, non-zero for failure.
4387 */
4388static int
4389_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4390{
4391 Mpi2IOCInitRequest_t mpi_request;
4392 Mpi2IOCInitReply_t mpi_reply;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05304393 int i, r = 0;
Tina Ruchandani23409bd2016-04-13 00:01:40 -07004394 ktime_t current_time;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304395 u16 ioc_status;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05304396 u32 reply_post_free_array_sz = 0;
4397 Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
4398 dma_addr_t reply_post_free_array_dma;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304399
4400 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4401 __func__));
4402
4403 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
4404 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
4405 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
4406 mpi_request.VF_ID = 0; /* TODO */
4407 mpi_request.VP_ID = 0;
Sreekanth Reddyd357e842015-11-11 17:30:22 +05304408 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304409 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
4410
4411 if (_base_is_controller_msix_enabled(ioc))
4412 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
4413 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
4414 mpi_request.ReplyDescriptorPostQueueDepth =
4415 cpu_to_le16(ioc->reply_post_queue_depth);
4416 mpi_request.ReplyFreeQueueDepth =
4417 cpu_to_le16(ioc->reply_free_queue_depth);
4418
4419 mpi_request.SenseBufferAddressHigh =
4420 cpu_to_le32((u64)ioc->sense_dma >> 32);
4421 mpi_request.SystemReplyAddressHigh =
4422 cpu_to_le32((u64)ioc->reply_dma >> 32);
4423 mpi_request.SystemRequestFrameBaseAddress =
4424 cpu_to_le64((u64)ioc->request_dma);
4425 mpi_request.ReplyFreeQueueAddress =
4426 cpu_to_le64((u64)ioc->reply_free_dma);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304427
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05304428 if (ioc->rdpq_array_enable) {
4429 reply_post_free_array_sz = ioc->reply_queue_count *
4430 sizeof(Mpi2IOCInitRDPQArrayEntry);
4431 reply_post_free_array = pci_alloc_consistent(ioc->pdev,
4432 reply_post_free_array_sz, &reply_post_free_array_dma);
4433 if (!reply_post_free_array) {
4434 pr_err(MPT3SAS_FMT
4435 "reply_post_free_array: pci_alloc_consistent failed\n",
4436 ioc->name);
4437 r = -ENOMEM;
4438 goto out;
4439 }
4440 memset(reply_post_free_array, 0, reply_post_free_array_sz);
4441 for (i = 0; i < ioc->reply_queue_count; i++)
4442 reply_post_free_array[i].RDPQBaseAddress =
4443 cpu_to_le64(
4444 (u64)ioc->reply_post[i].reply_post_free_dma);
4445 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
4446 mpi_request.ReplyDescriptorPostQueueAddress =
4447 cpu_to_le64((u64)reply_post_free_array_dma);
4448 } else {
4449 mpi_request.ReplyDescriptorPostQueueAddress =
4450 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
4451 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304452
4453 /* This time stamp specifies number of milliseconds
4454 * since epoch ~ midnight January 1, 1970.
4455 */
Tina Ruchandani23409bd2016-04-13 00:01:40 -07004456 current_time = ktime_get_real();
4457 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304458
4459 if (ioc->logging_level & MPT_DEBUG_INIT) {
4460 __le32 *mfp;
4461 int i;
4462
4463 mfp = (__le32 *)&mpi_request;
4464 pr_info("\toffset:data\n");
4465 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
4466 pr_info("\t[0x%02x]:%08x\n", i*4,
4467 le32_to_cpu(mfp[i]));
4468 }
4469
4470 r = _base_handshake_req_reply_wait(ioc,
4471 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
4472 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
4473 sleep_flag);
4474
4475 if (r != 0) {
4476 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4477 ioc->name, __func__, r);
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05304478 goto out;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304479 }
4480
4481 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
4482 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
4483 mpi_reply.IOCLogInfo) {
4484 pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
4485 r = -EIO;
4486 }
4487
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05304488out:
4489 if (reply_post_free_array)
4490 pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
4491 reply_post_free_array,
4492 reply_post_free_array_dma);
4493 return r;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304494}
4495
4496/**
4497 * mpt3sas_port_enable_done - command completion routine for port enable
4498 * @ioc: per adapter object
4499 * @smid: system request message index
4500 * @msix_index: MSIX table index supplied by the OS
4501 * @reply: reply message frame(lower 32bit addr)
4502 *
4503 * Return 1 meaning mf should be freed from _base_interrupt
4504 * 0 means the mf is freed from this function.
4505 */
4506u8
4507mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4508 u32 reply)
4509{
4510 MPI2DefaultReply_t *mpi_reply;
4511 u16 ioc_status;
4512
4513 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
4514 return 1;
4515
4516 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
4517 if (!mpi_reply)
4518 return 1;
4519
4520 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
4521 return 1;
4522
4523 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
4524 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
4525 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
4526 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
4527 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
4528 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
4529 ioc->port_enable_failed = 1;
4530
4531 if (ioc->is_driver_loading) {
4532 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4533 mpt3sas_port_enable_complete(ioc);
4534 return 1;
4535 } else {
4536 ioc->start_scan_failed = ioc_status;
4537 ioc->start_scan = 0;
4538 return 1;
4539 }
4540 }
4541 complete(&ioc->port_enable_cmds.done);
4542 return 1;
4543}
4544
4545/**
4546 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
4547 * @ioc: per adapter object
4548 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4549 *
4550 * Returns 0 for success, non-zero for failure.
4551 */
4552static int
4553_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4554{
4555 Mpi2PortEnableRequest_t *mpi_request;
4556 Mpi2PortEnableReply_t *mpi_reply;
4557 unsigned long timeleft;
4558 int r = 0;
4559 u16 smid;
4560 u16 ioc_status;
4561
4562 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
4563
4564 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4565 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4566 ioc->name, __func__);
4567 return -EAGAIN;
4568 }
4569
4570 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
4571 if (!smid) {
4572 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4573 ioc->name, __func__);
4574 return -EAGAIN;
4575 }
4576
4577 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
4578 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4579 ioc->port_enable_cmds.smid = smid;
4580 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
4581 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4582
4583 init_completion(&ioc->port_enable_cmds.done);
4584 mpt3sas_base_put_smid_default(ioc, smid);
4585 timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
4586 300*HZ);
4587 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
4588 pr_err(MPT3SAS_FMT "%s: timeout\n",
4589 ioc->name, __func__);
4590 _debug_dump_mf(mpi_request,
4591 sizeof(Mpi2PortEnableRequest_t)/4);
4592 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
4593 r = -EFAULT;
4594 else
4595 r = -ETIME;
4596 goto out;
4597 }
4598
4599 mpi_reply = ioc->port_enable_cmds.reply;
4600 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
4601 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4602 pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
4603 ioc->name, __func__, ioc_status);
4604 r = -EFAULT;
4605 goto out;
4606 }
4607
4608 out:
4609 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
4610 pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
4611 "SUCCESS" : "FAILED"));
4612 return r;
4613}
4614
4615/**
4616 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
4617 * @ioc: per adapter object
4618 *
4619 * Returns 0 for success, non-zero for failure.
4620 */
4621int
4622mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
4623{
4624 Mpi2PortEnableRequest_t *mpi_request;
4625 u16 smid;
4626
4627 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
4628
4629 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4630 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4631 ioc->name, __func__);
4632 return -EAGAIN;
4633 }
4634
4635 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
4636 if (!smid) {
4637 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4638 ioc->name, __func__);
4639 return -EAGAIN;
4640 }
4641
4642 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
4643 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4644 ioc->port_enable_cmds.smid = smid;
4645 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
4646 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4647
4648 mpt3sas_base_put_smid_default(ioc, smid);
4649 return 0;
4650}
4651
4652/**
4653 * _base_determine_wait_on_discovery - desposition
4654 * @ioc: per adapter object
4655 *
4656 * Decide whether to wait on discovery to complete. Used to either
4657 * locate boot device, or report volumes ahead of physical devices.
4658 *
4659 * Returns 1 for wait, 0 for don't wait
4660 */
4661static int
4662_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
4663{
4664 /* We wait for discovery to complete if IR firmware is loaded.
4665 * The sas topology events arrive before PD events, so we need time to
4666 * turn on the bit in ioc->pd_handles to indicate PD
4667 * Also, it maybe required to report Volumes ahead of physical
4668 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
4669 */
4670 if (ioc->ir_firmware)
4671 return 1;
4672
4673 /* if no Bios, then we don't need to wait */
4674 if (!ioc->bios_pg3.BiosVersion)
4675 return 0;
4676
4677 /* Bios is present, then we drop down here.
4678 *
4679 * If there any entries in the Bios Page 2, then we wait
4680 * for discovery to complete.
4681 */
4682
4683 /* Current Boot Device */
4684 if ((ioc->bios_pg2.CurrentBootDeviceForm &
4685 MPI2_BIOSPAGE2_FORM_MASK) ==
4686 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
4687 /* Request Boot Device */
4688 (ioc->bios_pg2.ReqBootDeviceForm &
4689 MPI2_BIOSPAGE2_FORM_MASK) ==
4690 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
4691 /* Alternate Request Boot Device */
4692 (ioc->bios_pg2.ReqAltBootDeviceForm &
4693 MPI2_BIOSPAGE2_FORM_MASK) ==
4694 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
4695 return 0;
4696
4697 return 1;
4698}
4699
4700/**
4701 * _base_unmask_events - turn on notification for this event
4702 * @ioc: per adapter object
4703 * @event: firmware event
4704 *
4705 * The mask is stored in ioc->event_masks.
4706 */
4707static void
4708_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
4709{
4710 u32 desired_event;
4711
4712 if (event >= 128)
4713 return;
4714
4715 desired_event = (1 << (event % 32));
4716
4717 if (event < 32)
4718 ioc->event_masks[0] &= ~desired_event;
4719 else if (event < 64)
4720 ioc->event_masks[1] &= ~desired_event;
4721 else if (event < 96)
4722 ioc->event_masks[2] &= ~desired_event;
4723 else if (event < 128)
4724 ioc->event_masks[3] &= ~desired_event;
4725}
4726
4727/**
4728 * _base_event_notification - send event notification
4729 * @ioc: per adapter object
4730 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4731 *
4732 * Returns 0 for success, non-zero for failure.
4733 */
4734static int
4735_base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4736{
4737 Mpi2EventNotificationRequest_t *mpi_request;
4738 unsigned long timeleft;
4739 u16 smid;
4740 int r = 0;
4741 int i;
4742
4743 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4744 __func__));
4745
4746 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4747 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4748 ioc->name, __func__);
4749 return -EAGAIN;
4750 }
4751
4752 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4753 if (!smid) {
4754 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4755 ioc->name, __func__);
4756 return -EAGAIN;
4757 }
4758 ioc->base_cmds.status = MPT3_CMD_PENDING;
4759 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4760 ioc->base_cmds.smid = smid;
4761 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
4762 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
4763 mpi_request->VF_ID = 0; /* TODO */
4764 mpi_request->VP_ID = 0;
4765 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4766 mpi_request->EventMasks[i] =
4767 cpu_to_le32(ioc->event_masks[i]);
4768 init_completion(&ioc->base_cmds.done);
4769 mpt3sas_base_put_smid_default(ioc, smid);
4770 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
4771 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4772 pr_err(MPT3SAS_FMT "%s: timeout\n",
4773 ioc->name, __func__);
4774 _debug_dump_mf(mpi_request,
4775 sizeof(Mpi2EventNotificationRequest_t)/4);
4776 if (ioc->base_cmds.status & MPT3_CMD_RESET)
4777 r = -EFAULT;
4778 else
4779 r = -ETIME;
4780 } else
4781 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
4782 ioc->name, __func__));
4783 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4784 return r;
4785}
4786
4787/**
4788 * mpt3sas_base_validate_event_type - validating event types
4789 * @ioc: per adapter object
4790 * @event: firmware event
4791 *
4792 * This will turn on firmware event notification when application
4793 * ask for that event. We don't mask events that are already enabled.
4794 */
4795void
4796mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
4797{
4798 int i, j;
4799 u32 event_mask, desired_event;
4800 u8 send_update_to_fw;
4801
4802 for (i = 0, send_update_to_fw = 0; i <
4803 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
4804 event_mask = ~event_type[i];
4805 desired_event = 1;
4806 for (j = 0; j < 32; j++) {
4807 if (!(event_mask & desired_event) &&
4808 (ioc->event_masks[i] & desired_event)) {
4809 ioc->event_masks[i] &= ~desired_event;
4810 send_update_to_fw = 1;
4811 }
4812 desired_event = (desired_event << 1);
4813 }
4814 }
4815
4816 if (!send_update_to_fw)
4817 return;
4818
4819 mutex_lock(&ioc->base_cmds.mutex);
4820 _base_event_notification(ioc, CAN_SLEEP);
4821 mutex_unlock(&ioc->base_cmds.mutex);
4822}
4823
4824/**
4825 * _base_diag_reset - the "big hammer" start of day reset
4826 * @ioc: per adapter object
4827 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4828 *
4829 * Returns 0 for success, non-zero for failure.
4830 */
4831static int
4832_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4833{
4834 u32 host_diagnostic;
4835 u32 ioc_state;
4836 u32 count;
4837 u32 hcb_size;
4838
4839 pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
4840
4841 drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
4842 ioc->name));
4843
4844 count = 0;
4845 do {
4846 /* Write magic sequence to WriteSequence register
4847 * Loop until in diagnostic mode
4848 */
4849 drsprintk(ioc, pr_info(MPT3SAS_FMT
4850 "write magic sequence\n", ioc->name));
4851 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4852 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
4853 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
4854 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
4855 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
4856 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
4857 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
4858
4859 /* wait 100 msec */
4860 if (sleep_flag == CAN_SLEEP)
4861 msleep(100);
4862 else
4863 mdelay(100);
4864
4865 if (count++ > 20)
4866 goto out;
4867
4868 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4869 drsprintk(ioc, pr_info(MPT3SAS_FMT
4870 "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
4871 ioc->name, count, host_diagnostic));
4872
4873 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
4874
4875 hcb_size = readl(&ioc->chip->HCBSize);
4876
4877 drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
4878 ioc->name));
4879 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
4880 &ioc->chip->HostDiagnostic);
4881
Sreekanth Reddyb453ff82013-06-29 03:51:19 +05304882 /*This delay allows the chip PCIe hardware time to finish reset tasks*/
4883 if (sleep_flag == CAN_SLEEP)
4884 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4885 else
4886 mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304887
Sreekanth Reddyb453ff82013-06-29 03:51:19 +05304888 /* Approximately 300 second max wait */
4889 for (count = 0; count < (300000000 /
4890 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304891
4892 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4893
4894 if (host_diagnostic == 0xFFFFFFFF)
4895 goto out;
4896 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
4897 break;
4898
Sreekanth Reddyb453ff82013-06-29 03:51:19 +05304899 /* Wait to pass the second read delay window */
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304900 if (sleep_flag == CAN_SLEEP)
Sreekanth Reddyb453ff82013-06-29 03:51:19 +05304901 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4902 / 1000);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304903 else
Sreekanth Reddyb453ff82013-06-29 03:51:19 +05304904 mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4905 / 1000);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05304906 }
4907
4908 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
4909
4910 drsprintk(ioc, pr_info(MPT3SAS_FMT
4911 "restart the adapter assuming the HCB Address points to good F/W\n",
4912 ioc->name));
4913 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
4914 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
4915 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
4916
4917 drsprintk(ioc, pr_info(MPT3SAS_FMT
4918 "re-enable the HCDW\n", ioc->name));
4919 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
4920 &ioc->chip->HCBSize);
4921 }
4922
4923 drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
4924 ioc->name));
4925 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
4926 &ioc->chip->HostDiagnostic);
4927
4928 drsprintk(ioc, pr_info(MPT3SAS_FMT
4929 "disable writes to the diagnostic register\n", ioc->name));
4930 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4931
4932 drsprintk(ioc, pr_info(MPT3SAS_FMT
4933 "Wait for FW to go to the READY state\n", ioc->name));
4934 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
4935 sleep_flag);
4936 if (ioc_state) {
4937 pr_err(MPT3SAS_FMT
4938 "%s: failed going to ready state (ioc_state=0x%x)\n",
4939 ioc->name, __func__, ioc_state);
4940 goto out;
4941 }
4942
4943 pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
4944 return 0;
4945
4946 out:
4947 pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
4948 return -EFAULT;
4949}
4950
4951/**
4952 * _base_make_ioc_ready - put controller in READY state
4953 * @ioc: per adapter object
4954 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4955 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4956 *
4957 * Returns 0 for success, non-zero for failure.
4958 */
4959static int
4960_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
4961 enum reset_type type)
4962{
4963 u32 ioc_state;
4964 int rc;
4965 int count;
4966
4967 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4968 __func__));
4969
4970 if (ioc->pci_error_recovery)
4971 return 0;
4972
4973 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4974 dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4975 ioc->name, __func__, ioc_state));
4976
4977 /* if in RESET state, it should move to READY state shortly */
4978 count = 0;
4979 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
4980 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
4981 MPI2_IOC_STATE_READY) {
4982 if (count++ == 10) {
4983 pr_err(MPT3SAS_FMT
4984 "%s: failed going to ready state (ioc_state=0x%x)\n",
4985 ioc->name, __func__, ioc_state);
4986 return -EFAULT;
4987 }
4988 if (sleep_flag == CAN_SLEEP)
4989 ssleep(1);
4990 else
4991 mdelay(1000);
4992 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4993 }
4994 }
4995
4996 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
4997 return 0;
4998
4999 if (ioc_state & MPI2_DOORBELL_USED) {
5000 dhsprintk(ioc, pr_info(MPT3SAS_FMT
5001 "unexpected doorbell active!\n",
5002 ioc->name));
5003 goto issue_diag_reset;
5004 }
5005
5006 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
5007 mpt3sas_base_fault_info(ioc, ioc_state &
5008 MPI2_DOORBELL_DATA_MASK);
5009 goto issue_diag_reset;
5010 }
5011
5012 if (type == FORCE_BIG_HAMMER)
5013 goto issue_diag_reset;
5014
5015 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
5016 if (!(_base_send_ioc_reset(ioc,
5017 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
5018 return 0;
5019 }
5020
5021 issue_diag_reset:
5022 rc = _base_diag_reset(ioc, CAN_SLEEP);
5023 return rc;
5024}
5025
5026/**
5027 * _base_make_ioc_operational - put controller in OPERATIONAL state
5028 * @ioc: per adapter object
5029 * @sleep_flag: CAN_SLEEP or NO_SLEEP
5030 *
5031 * Returns 0 for success, non-zero for failure.
5032 */
5033static int
5034_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5035{
Calvin Owens5ec8a172016-03-18 12:45:42 -07005036 int r, i, index;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305037 unsigned long flags;
5038 u32 reply_address;
5039 u16 smid;
5040 struct _tr_list *delayed_tr, *delayed_tr_next;
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05305041 struct _sc_list *delayed_sc, *delayed_sc_next;
5042 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305043 u8 hide_flag;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305044 struct adapter_reply_queue *reply_q;
Calvin Owens5ec8a172016-03-18 12:45:42 -07005045 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305046
5047 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5048 __func__));
5049
5050 /* clean the delayed target reset list */
5051 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
5052 &ioc->delayed_tr_list, list) {
5053 list_del(&delayed_tr->list);
5054 kfree(delayed_tr);
5055 }
5056
5057
5058 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
5059 &ioc->delayed_tr_volume_list, list) {
5060 list_del(&delayed_tr->list);
5061 kfree(delayed_tr);
5062 }
5063
Suganath prabu Subramanifd0331b2016-01-28 12:07:02 +05305064 list_for_each_entry_safe(delayed_sc, delayed_sc_next,
5065 &ioc->delayed_sc_list, list) {
5066 list_del(&delayed_sc->list);
5067 kfree(delayed_sc);
5068 }
5069
5070 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
5071 &ioc->delayed_event_ack_list, list) {
5072 list_del(&delayed_event_ack->list);
5073 kfree(delayed_event_ack);
5074 }
5075
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305076 /* initialize the scsi lookup free list */
5077 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5078 INIT_LIST_HEAD(&ioc->free_list);
5079 smid = 1;
5080 for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
5081 INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
5082 ioc->scsi_lookup[i].cb_idx = 0xFF;
5083 ioc->scsi_lookup[i].smid = smid;
5084 ioc->scsi_lookup[i].scmd = NULL;
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305085 ioc->scsi_lookup[i].direct_io = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305086 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
5087 &ioc->free_list);
5088 }
5089
5090 /* hi-priority queue */
5091 INIT_LIST_HEAD(&ioc->hpr_free_list);
5092 smid = ioc->hi_priority_smid;
5093 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
5094 ioc->hpr_lookup[i].cb_idx = 0xFF;
5095 ioc->hpr_lookup[i].smid = smid;
5096 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
5097 &ioc->hpr_free_list);
5098 }
5099
5100 /* internal queue */
5101 INIT_LIST_HEAD(&ioc->internal_free_list);
5102 smid = ioc->internal_smid;
5103 for (i = 0; i < ioc->internal_depth; i++, smid++) {
5104 ioc->internal_lookup[i].cb_idx = 0xFF;
5105 ioc->internal_lookup[i].smid = smid;
5106 list_add_tail(&ioc->internal_lookup[i].tracker_list,
5107 &ioc->internal_free_list);
5108 }
5109
5110 /* chain pool */
5111 INIT_LIST_HEAD(&ioc->free_chain_list);
5112 for (i = 0; i < ioc->chain_depth; i++)
5113 list_add_tail(&ioc->chain_lookup[i].tracker_list,
5114 &ioc->free_chain_list);
5115
5116 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5117
5118 /* initialize Reply Free Queue */
5119 for (i = 0, reply_address = (u32)ioc->reply_dma ;
5120 i < ioc->reply_free_queue_depth ; i++, reply_address +=
5121 ioc->reply_sz)
5122 ioc->reply_free[i] = cpu_to_le32(reply_address);
5123
5124 /* initialize reply queues */
5125 if (ioc->is_driver_loading)
5126 _base_assign_reply_queues(ioc);
5127
5128 /* initialize Reply Post Free Queue */
Calvin Owens5ec8a172016-03-18 12:45:42 -07005129 index = 0;
5130 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305131 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
Calvin Owens5ec8a172016-03-18 12:45:42 -07005132 /*
5133 * If RDPQ is enabled, switch to the next allocation.
5134 * Otherwise advance within the contiguous region.
5135 */
5136 if (ioc->rdpq_array_enable) {
5137 reply_q->reply_post_free =
5138 ioc->reply_post[index++].reply_post_free;
5139 } else {
5140 reply_q->reply_post_free = reply_post_free_contig;
5141 reply_post_free_contig += ioc->reply_post_queue_depth;
5142 }
5143
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305144 reply_q->reply_post_host_index = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305145 for (i = 0; i < ioc->reply_post_queue_depth; i++)
5146 reply_q->reply_post_free[i].Words =
5147 cpu_to_le64(ULLONG_MAX);
5148 if (!_base_is_controller_msix_enabled(ioc))
5149 goto skip_init_reply_post_free_queue;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305150 }
5151 skip_init_reply_post_free_queue:
5152
5153 r = _base_send_ioc_init(ioc, sleep_flag);
5154 if (r)
5155 return r;
5156
5157 /* initialize reply free host index */
5158 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
5159 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
5160
5161 /* initialize reply post host index */
5162 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
Sreekanth Reddyfb77bb52015-06-30 12:24:47 +05305163 if (ioc->msix96_vector)
5164 writel((reply_q->msix_index & 7)<<
5165 MPI2_RPHI_MSIX_INDEX_SHIFT,
5166 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
5167 else
5168 writel(reply_q->msix_index <<
5169 MPI2_RPHI_MSIX_INDEX_SHIFT,
5170 &ioc->chip->ReplyPostHostIndex);
5171
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305172 if (!_base_is_controller_msix_enabled(ioc))
5173 goto skip_init_reply_post_host_index;
5174 }
5175
5176 skip_init_reply_post_host_index:
5177
5178 _base_unmask_interrupts(ioc);
5179 r = _base_event_notification(ioc, sleep_flag);
5180 if (r)
5181 return r;
5182
5183 if (sleep_flag == CAN_SLEEP)
5184 _base_static_config_pages(ioc);
5185
5186
5187 if (ioc->is_driver_loading) {
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305188
5189 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
5190 == 0x80) {
5191 hide_flag = (u8) (
5192 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
5193 MFG_PAGE10_HIDE_SSDS_MASK);
5194 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
5195 ioc->mfg_pg10_hide_flag = hide_flag;
5196 }
5197
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305198 ioc->wait_for_discovery_to_complete =
5199 _base_determine_wait_on_discovery(ioc);
5200
5201 return r; /* scan_start and scan_finished support */
5202 }
5203
5204 r = _base_send_port_enable(ioc, sleep_flag);
5205 if (r)
5206 return r;
5207
5208 return r;
5209}
5210
5211/**
5212 * mpt3sas_base_free_resources - free resources controller resources
5213 * @ioc: per adapter object
5214 *
5215 * Return nothing.
5216 */
5217void
5218mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
5219{
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305220 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5221 __func__));
5222
Sreekanth Reddy08c4d552015-11-11 17:30:33 +05305223 /* synchronizing freeing resource with pci_access_mutex lock */
5224 mutex_lock(&ioc->pci_access_mutex);
Joe Lawrencecf9bd21a2013-08-08 16:45:39 -04005225 if (ioc->chip_phys && ioc->chip) {
5226 _base_mask_interrupts(ioc);
5227 ioc->shost_recovery = 1;
5228 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
5229 ioc->shost_recovery = 0;
5230 }
5231
Sreekanth Reddy580d4e32015-06-30 12:24:50 +05305232 mpt3sas_base_unmap_resources(ioc);
Sreekanth Reddy08c4d552015-11-11 17:30:33 +05305233 mutex_unlock(&ioc->pci_access_mutex);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305234 return;
5235}
5236
5237/**
5238 * mpt3sas_base_attach - attach controller instance
5239 * @ioc: per adapter object
5240 *
5241 * Returns 0 for success, non-zero for failure.
5242 */
5243int
5244mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5245{
5246 int r, i;
5247 int cpu_id, last_cpu_id = 0;
5248
5249 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5250 __func__));
5251
5252 /* setup cpu_msix_table */
5253 ioc->cpu_count = num_online_cpus();
5254 for_each_online_cpu(cpu_id)
5255 last_cpu_id = cpu_id;
5256 ioc->cpu_msix_table_sz = last_cpu_id + 1;
5257 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
5258 ioc->reply_queue_count = 1;
5259 if (!ioc->cpu_msix_table) {
5260 dfailprintk(ioc, pr_info(MPT3SAS_FMT
5261 "allocation for cpu_msix_table failed!!!\n",
5262 ioc->name));
5263 r = -ENOMEM;
5264 goto out_free_resources;
5265 }
5266
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305267 if (ioc->is_warpdrive) {
5268 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
5269 sizeof(resource_size_t *), GFP_KERNEL);
5270 if (!ioc->reply_post_host_index) {
5271 dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
5272 "for cpu_msix_table failed!!!\n", ioc->name));
5273 r = -ENOMEM;
5274 goto out_free_resources;
5275 }
5276 }
5277
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05305278 ioc->rdpq_array_enable_assigned = 0;
5279 ioc->dma_mask = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305280 r = mpt3sas_base_map_resources(ioc);
5281 if (r)
5282 goto out_free_resources;
5283
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305284 if (ioc->is_warpdrive) {
5285 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
5286 &ioc->chip->ReplyPostHostIndex;
5287
5288 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
5289 ioc->reply_post_host_index[i] =
5290 (resource_size_t __iomem *)
5291 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
5292 * 4)));
5293 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305294
5295 pci_set_drvdata(ioc->pdev, ioc->shost);
5296 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
5297 if (r)
5298 goto out_free_resources;
5299
Sreekanth Reddy471ef9d2015-11-11 17:30:24 +05305300 switch (ioc->hba_mpi_version_belonged) {
5301 case MPI2_VERSION:
5302 ioc->build_sg_scmd = &_base_build_sg_scmd;
5303 ioc->build_sg = &_base_build_sg;
5304 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
5305 break;
5306 case MPI25_VERSION:
Suganath prabu Subramanib130b0d2016-01-28 12:06:58 +05305307 case MPI26_VERSION:
Sreekanth Reddy471ef9d2015-11-11 17:30:24 +05305308 /*
5309 * In SAS3.0,
5310 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
5311 * Target Status - all require the IEEE formated scatter gather
5312 * elements.
5313 */
5314 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
5315 ioc->build_sg = &_base_build_sg_ieee;
5316 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
5317 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
5318 break;
5319 }
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305320
5321 /*
5322 * These function pointers for other requests that don't
5323 * the require IEEE scatter gather elements.
5324 *
5325 * For example Configuration Pages and SAS IOUNIT Control don't.
5326 */
5327 ioc->build_sg_mpi = &_base_build_sg;
5328 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
5329
5330 r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
5331 if (r)
5332 goto out_free_resources;
5333
5334 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
5335 sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
5336 if (!ioc->pfacts) {
5337 r = -ENOMEM;
5338 goto out_free_resources;
5339 }
5340
5341 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
5342 r = _base_get_port_facts(ioc, i, CAN_SLEEP);
5343 if (r)
5344 goto out_free_resources;
5345 }
5346
5347 r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
5348 if (r)
5349 goto out_free_resources;
5350
5351 init_waitqueue_head(&ioc->reset_wq);
5352
5353 /* allocate memory pd handle bitmask list */
5354 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
5355 if (ioc->facts.MaxDevHandle % 8)
5356 ioc->pd_handles_sz++;
5357 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
5358 GFP_KERNEL);
5359 if (!ioc->pd_handles) {
5360 r = -ENOMEM;
5361 goto out_free_resources;
5362 }
5363 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
5364 GFP_KERNEL);
5365 if (!ioc->blocking_handles) {
5366 r = -ENOMEM;
5367 goto out_free_resources;
5368 }
5369
5370 ioc->fwfault_debug = mpt3sas_fwfault_debug;
5371
5372 /* base internal command bits */
5373 mutex_init(&ioc->base_cmds.mutex);
5374 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5375 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5376
5377 /* port_enable command bits */
5378 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5379 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
5380
5381 /* transport internal command bits */
5382 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5383 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
5384 mutex_init(&ioc->transport_cmds.mutex);
5385
5386 /* scsih internal command bits */
5387 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5388 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
5389 mutex_init(&ioc->scsih_cmds.mutex);
5390
5391 /* task management internal command bits */
5392 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5393 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
5394 mutex_init(&ioc->tm_cmds.mutex);
5395
5396 /* config page internal command bits */
5397 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5398 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
5399 mutex_init(&ioc->config_cmds.mutex);
5400
5401 /* ctl module internal command bits */
5402 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5403 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
5404 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
5405 mutex_init(&ioc->ctl_cmds.mutex);
5406
5407 if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
5408 !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
5409 !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
5410 !ioc->ctl_cmds.sense) {
5411 r = -ENOMEM;
5412 goto out_free_resources;
5413 }
5414
5415 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5416 ioc->event_masks[i] = -1;
5417
5418 /* here we enable the events we care about */
5419 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
5420 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
5421 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
5422 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5423 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
5424 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
5425 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
5426 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
5427 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
5428 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
Sreekanth Reddy2d8ce8c2015-01-12 11:38:56 +05305429 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
Chaitra P Ba470a512016-05-06 14:29:27 +05305430 if (ioc->hba_mpi_version_belonged == MPI26_VERSION)
5431 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305432
5433 r = _base_make_ioc_operational(ioc, CAN_SLEEP);
5434 if (r)
5435 goto out_free_resources;
5436
Sreekanth Reddy16e179b2015-11-11 17:30:27 +05305437 ioc->non_operational_loop = 0;
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305438 return 0;
5439
5440 out_free_resources:
5441
5442 ioc->remove_host = 1;
5443
5444 mpt3sas_base_free_resources(ioc);
5445 _base_release_memory_pools(ioc);
5446 pci_set_drvdata(ioc->pdev, NULL);
5447 kfree(ioc->cpu_msix_table);
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305448 if (ioc->is_warpdrive)
5449 kfree(ioc->reply_post_host_index);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305450 kfree(ioc->pd_handles);
5451 kfree(ioc->blocking_handles);
5452 kfree(ioc->tm_cmds.reply);
5453 kfree(ioc->transport_cmds.reply);
5454 kfree(ioc->scsih_cmds.reply);
5455 kfree(ioc->config_cmds.reply);
5456 kfree(ioc->base_cmds.reply);
5457 kfree(ioc->port_enable_cmds.reply);
5458 kfree(ioc->ctl_cmds.reply);
5459 kfree(ioc->ctl_cmds.sense);
5460 kfree(ioc->pfacts);
5461 ioc->ctl_cmds.reply = NULL;
5462 ioc->base_cmds.reply = NULL;
5463 ioc->tm_cmds.reply = NULL;
5464 ioc->scsih_cmds.reply = NULL;
5465 ioc->transport_cmds.reply = NULL;
5466 ioc->config_cmds.reply = NULL;
5467 ioc->pfacts = NULL;
5468 return r;
5469}
5470
5471
5472/**
5473 * mpt3sas_base_detach - remove controller instance
5474 * @ioc: per adapter object
5475 *
5476 * Return nothing.
5477 */
5478void
5479mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
5480{
5481 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5482 __func__));
5483
5484 mpt3sas_base_stop_watchdog(ioc);
5485 mpt3sas_base_free_resources(ioc);
5486 _base_release_memory_pools(ioc);
5487 pci_set_drvdata(ioc->pdev, NULL);
5488 kfree(ioc->cpu_msix_table);
Sreekanth Reddy7786ab62015-11-11 17:30:28 +05305489 if (ioc->is_warpdrive)
5490 kfree(ioc->reply_post_host_index);
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305491 kfree(ioc->pd_handles);
5492 kfree(ioc->blocking_handles);
5493 kfree(ioc->pfacts);
5494 kfree(ioc->ctl_cmds.reply);
5495 kfree(ioc->ctl_cmds.sense);
5496 kfree(ioc->base_cmds.reply);
5497 kfree(ioc->port_enable_cmds.reply);
5498 kfree(ioc->tm_cmds.reply);
5499 kfree(ioc->transport_cmds.reply);
5500 kfree(ioc->scsih_cmds.reply);
5501 kfree(ioc->config_cmds.reply);
5502}
5503
5504/**
5505 * _base_reset_handler - reset callback handler (for base)
5506 * @ioc: per adapter object
5507 * @reset_phase: phase
5508 *
5509 * The handler for doing any required cleanup or initialization.
5510 *
5511 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
5512 * MPT3_IOC_DONE_RESET
5513 *
5514 * Return nothing.
5515 */
5516static void
5517_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
5518{
5519 mpt3sas_scsih_reset_handler(ioc, reset_phase);
5520 mpt3sas_ctl_reset_handler(ioc, reset_phase);
5521 switch (reset_phase) {
5522 case MPT3_IOC_PRE_RESET:
5523 dtmprintk(ioc, pr_info(MPT3SAS_FMT
5524 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
5525 break;
5526 case MPT3_IOC_AFTER_RESET:
5527 dtmprintk(ioc, pr_info(MPT3SAS_FMT
5528 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
5529 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
5530 ioc->transport_cmds.status |= MPT3_CMD_RESET;
5531 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
5532 complete(&ioc->transport_cmds.done);
5533 }
5534 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
5535 ioc->base_cmds.status |= MPT3_CMD_RESET;
5536 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
5537 complete(&ioc->base_cmds.done);
5538 }
5539 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5540 ioc->port_enable_failed = 1;
5541 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
5542 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
5543 if (ioc->is_driver_loading) {
5544 ioc->start_scan_failed =
5545 MPI2_IOCSTATUS_INTERNAL_ERROR;
5546 ioc->start_scan = 0;
5547 ioc->port_enable_cmds.status =
5548 MPT3_CMD_NOT_USED;
5549 } else
5550 complete(&ioc->port_enable_cmds.done);
5551 }
5552 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
5553 ioc->config_cmds.status |= MPT3_CMD_RESET;
5554 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
5555 ioc->config_cmds.smid = USHRT_MAX;
5556 complete(&ioc->config_cmds.done);
5557 }
5558 break;
5559 case MPT3_IOC_DONE_RESET:
5560 dtmprintk(ioc, pr_info(MPT3SAS_FMT
5561 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
5562 break;
5563 }
5564}
5565
5566/**
5567 * _wait_for_commands_to_complete - reset controller
5568 * @ioc: Pointer to MPT_ADAPTER structure
5569 * @sleep_flag: CAN_SLEEP or NO_SLEEP
5570 *
5571 * This function waiting(3s) for all pending commands to complete
5572 * prior to putting controller in reset.
5573 */
5574static void
5575_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5576{
5577 u32 ioc_state;
5578 unsigned long flags;
5579 u16 i;
5580
5581 ioc->pending_io_count = 0;
5582 if (sleep_flag != CAN_SLEEP)
5583 return;
5584
5585 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5586 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
5587 return;
5588
5589 /* pending command count */
5590 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5591 for (i = 0; i < ioc->scsiio_depth; i++)
5592 if (ioc->scsi_lookup[i].cb_idx != 0xFF)
5593 ioc->pending_io_count++;
5594 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5595
5596 if (!ioc->pending_io_count)
5597 return;
5598
5599 /* wait for pending commands to complete */
5600 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
5601}
5602
5603/**
5604 * mpt3sas_base_hard_reset_handler - reset controller
5605 * @ioc: Pointer to MPT_ADAPTER structure
5606 * @sleep_flag: CAN_SLEEP or NO_SLEEP
5607 * @type: FORCE_BIG_HAMMER or SOFT_RESET
5608 *
5609 * Returns 0 for success, non-zero for failure.
5610 */
5611int
5612mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
5613 enum reset_type type)
5614{
5615 int r;
5616 unsigned long flags;
5617 u32 ioc_state;
5618 u8 is_fault = 0, is_trigger = 0;
5619
5620 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
5621 __func__));
5622
5623 if (ioc->pci_error_recovery) {
5624 pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
5625 ioc->name, __func__);
5626 r = 0;
5627 goto out_unlocked;
5628 }
5629
5630 if (mpt3sas_fwfault_debug)
5631 mpt3sas_halt_firmware(ioc);
5632
5633 /* TODO - What we really should be doing is pulling
5634 * out all the code associated with NO_SLEEP; its never used.
5635 * That is legacy code from mpt fusion driver, ported over.
5636 * I will leave this BUG_ON here for now till its been resolved.
5637 */
5638 BUG_ON(sleep_flag == NO_SLEEP);
5639
5640 /* wait for an active reset in progress to complete */
5641 if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
5642 do {
5643 ssleep(1);
5644 } while (ioc->shost_recovery == 1);
5645 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
5646 __func__));
5647 return ioc->ioc_reset_in_progress_status;
5648 }
5649
5650 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5651 ioc->shost_recovery = 1;
5652 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5653
5654 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
5655 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
5656 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
5657 MPT3_DIAG_BUFFER_IS_RELEASED))) {
5658 is_trigger = 1;
5659 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5660 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
5661 is_fault = 1;
5662 }
5663 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
5664 _wait_for_commands_to_complete(ioc, sleep_flag);
5665 _base_mask_interrupts(ioc);
5666 r = _base_make_ioc_ready(ioc, sleep_flag, type);
5667 if (r)
5668 goto out;
5669 _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
5670
5671 /* If this hard reset is called while port enable is active, then
5672 * there is no reason to call make_ioc_operational
5673 */
5674 if (ioc->is_driver_loading && ioc->port_enable_failed) {
5675 ioc->remove_host = 1;
5676 r = -EFAULT;
5677 goto out;
5678 }
5679 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
5680 if (r)
5681 goto out;
Sreekanth Reddy9b05c912014-09-12 15:35:31 +05305682
5683 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
5684 panic("%s: Issue occurred with flashing controller firmware."
5685 "Please reboot the system and ensure that the correct"
5686 " firmware version is running\n", ioc->name);
5687
Sreekanth Reddyf92363d2012-11-30 07:44:21 +05305688 r = _base_make_ioc_operational(ioc, sleep_flag);
5689 if (!r)
5690 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
5691
5692 out:
5693 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
5694 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
5695
5696 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5697 ioc->ioc_reset_in_progress_status = r;
5698 ioc->shost_recovery = 0;
5699 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5700 ioc->ioc_reset_count++;
5701 mutex_unlock(&ioc->reset_in_progress_mutex);
5702
5703 out_unlocked:
5704 if ((r == 0) && is_trigger) {
5705 if (is_fault)
5706 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
5707 else
5708 mpt3sas_trigger_master(ioc,
5709 MASTER_TRIGGER_ADAPTER_RESET);
5710 }
5711 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
5712 __func__));
5713 return r;
5714}