blob: d5da5e5c4c6dd4c1a90396237405765cd1520614 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57#include <linux/config.h>
58#include <linux/fs.h>
59#include <linux/init.h>
60#include <linux/types.h>
61#include <linux/errno.h>
62#include <linux/kernel.h>
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_request.h>
83#include "ipr.h"
84
85/*
86 * Global Data
87 */
88static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
94static DEFINE_SPINLOCK(ipr_driver_lock);
95
96/* This table describes the differences between DMA controller chips */
97static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
98 { /* Gemstone and Citrine */
99 .mailbox = 0x0042C,
100 .cache_line_size = 0x20,
101 {
102 .set_interrupt_mask_reg = 0x0022C,
103 .clr_interrupt_mask_reg = 0x00230,
104 .sense_interrupt_mask_reg = 0x0022C,
105 .clr_interrupt_reg = 0x00228,
106 .sense_interrupt_reg = 0x00224,
107 .ioarrin_reg = 0x00404,
108 .sense_uproc_interrupt_reg = 0x00214,
109 .set_uproc_interrupt_reg = 0x00214,
110 .clr_uproc_interrupt_reg = 0x00218
111 }
112 },
113 { /* Snipe and Scamp */
114 .mailbox = 0x0052C,
115 .cache_line_size = 0x20,
116 {
117 .set_interrupt_mask_reg = 0x00288,
118 .clr_interrupt_mask_reg = 0x0028C,
119 .sense_interrupt_mask_reg = 0x00288,
120 .clr_interrupt_reg = 0x00284,
121 .sense_interrupt_reg = 0x00280,
122 .ioarrin_reg = 0x00504,
123 .sense_uproc_interrupt_reg = 0x00290,
124 .set_uproc_interrupt_reg = 0x00290,
125 .clr_uproc_interrupt_reg = 0x00294
126 }
127 },
128};
129
130static const struct ipr_chip_t ipr_chip[] = {
131 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
132 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
133 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
134 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
135};
136
137static int ipr_max_bus_speeds [] = {
138 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
139};
140
141MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
142MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
143module_param_named(max_speed, ipr_max_speed, uint, 0);
144MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
145module_param_named(log_level, ipr_log_level, uint, 0);
146MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
147module_param_named(testmode, ipr_testmode, int, 0);
148MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
149module_param_named(fastfail, ipr_fastfail, int, 0);
150MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
151module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
152MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
153MODULE_LICENSE("GPL");
154MODULE_VERSION(IPR_DRIVER_VERSION);
155
156static const char *ipr_gpdd_dev_end_states[] = {
157 "Command complete",
158 "Terminated by host",
159 "Terminated by device reset",
160 "Terminated by bus reset",
161 "Unknown",
162 "Command not started"
163};
164
165static const char *ipr_gpdd_dev_bus_phases[] = {
166 "Bus free",
167 "Arbitration",
168 "Selection",
169 "Message out",
170 "Command",
171 "Message in",
172 "Data out",
173 "Data in",
174 "Status",
175 "Reselection",
176 "Unknown"
177};
178
179/* A constant array of IOASCs/URCs/Error Messages */
180static const
181struct ipr_error_table_t ipr_error_table[] = {
182 {0x00000000, 1, 1,
183 "8155: An unknown error was received"},
184 {0x00330000, 0, 0,
185 "Soft underlength error"},
186 {0x005A0000, 0, 0,
187 "Command to be cancelled not found"},
188 {0x00808000, 0, 0,
189 "Qualified success"},
190 {0x01080000, 1, 1,
191 "FFFE: Soft device bus error recovered by the IOA"},
192 {0x01170600, 0, 1,
193 "FFF9: Device sector reassign successful"},
194 {0x01170900, 0, 1,
195 "FFF7: Media error recovered by device rewrite procedures"},
196 {0x01180200, 0, 1,
197 "7001: IOA sector reassignment successful"},
198 {0x01180500, 0, 1,
199 "FFF9: Soft media error. Sector reassignment recommended"},
200 {0x01180600, 0, 1,
201 "FFF7: Media error recovered by IOA rewrite procedures"},
202 {0x01418000, 0, 1,
203 "FF3D: Soft PCI bus error recovered by the IOA"},
204 {0x01440000, 1, 1,
205 "FFF6: Device hardware error recovered by the IOA"},
206 {0x01448100, 0, 1,
207 "FFF6: Device hardware error recovered by the device"},
208 {0x01448200, 1, 1,
209 "FF3D: Soft IOA error recovered by the IOA"},
210 {0x01448300, 0, 1,
211 "FFFA: Undefined device response recovered by the IOA"},
212 {0x014A0000, 1, 1,
213 "FFF6: Device bus error, message or command phase"},
214 {0x015D0000, 0, 1,
215 "FFF6: Failure prediction threshold exceeded"},
216 {0x015D9200, 0, 1,
217 "8009: Impending cache battery pack failure"},
218 {0x02040400, 0, 0,
219 "34FF: Disk device format in progress"},
220 {0x023F0000, 0, 0,
221 "Synchronization required"},
222 {0x024E0000, 0, 0,
223 "No ready, IOA shutdown"},
224 {0x025A0000, 0, 0,
225 "Not ready, IOA has been shutdown"},
226 {0x02670100, 0, 1,
227 "3020: Storage subsystem configuration error"},
228 {0x03110B00, 0, 0,
229 "FFF5: Medium error, data unreadable, recommend reassign"},
230 {0x03110C00, 0, 0,
231 "7000: Medium error, data unreadable, do not reassign"},
232 {0x03310000, 0, 1,
233 "FFF3: Disk media format bad"},
234 {0x04050000, 0, 1,
235 "3002: Addressed device failed to respond to selection"},
236 {0x04080000, 1, 1,
237 "3100: Device bus error"},
238 {0x04080100, 0, 1,
239 "3109: IOA timed out a device command"},
240 {0x04088000, 0, 0,
241 "3120: SCSI bus is not operational"},
242 {0x04118000, 0, 1,
243 "9000: IOA reserved area data check"},
244 {0x04118100, 0, 1,
245 "9001: IOA reserved area invalid data pattern"},
246 {0x04118200, 0, 1,
247 "9002: IOA reserved area LRC error"},
248 {0x04320000, 0, 1,
249 "102E: Out of alternate sectors for disk storage"},
250 {0x04330000, 1, 1,
251 "FFF4: Data transfer underlength error"},
252 {0x04338000, 1, 1,
253 "FFF4: Data transfer overlength error"},
254 {0x043E0100, 0, 1,
255 "3400: Logical unit failure"},
256 {0x04408500, 0, 1,
257 "FFF4: Device microcode is corrupt"},
258 {0x04418000, 1, 1,
259 "8150: PCI bus error"},
260 {0x04430000, 1, 0,
261 "Unsupported device bus message received"},
262 {0x04440000, 1, 1,
263 "FFF4: Disk device problem"},
264 {0x04448200, 1, 1,
265 "8150: Permanent IOA failure"},
266 {0x04448300, 0, 1,
267 "3010: Disk device returned wrong response to IOA"},
268 {0x04448400, 0, 1,
269 "8151: IOA microcode error"},
270 {0x04448500, 0, 0,
271 "Device bus status error"},
272 {0x04448600, 0, 1,
273 "8157: IOA error requiring IOA reset to recover"},
274 {0x04490000, 0, 0,
275 "Message reject received from the device"},
276 {0x04449200, 0, 1,
277 "8008: A permanent cache battery pack failure occurred"},
278 {0x0444A000, 0, 1,
279 "9090: Disk unit has been modified after the last known status"},
280 {0x0444A200, 0, 1,
281 "9081: IOA detected device error"},
282 {0x0444A300, 0, 1,
283 "9082: IOA detected device error"},
284 {0x044A0000, 1, 1,
285 "3110: Device bus error, message or command phase"},
286 {0x04670400, 0, 1,
287 "9091: Incorrect hardware configuration change has been detected"},
288 {0x046E0000, 0, 1,
289 "FFF4: Command to logical unit failed"},
290 {0x05240000, 1, 0,
291 "Illegal request, invalid request type or request packet"},
292 {0x05250000, 0, 0,
293 "Illegal request, invalid resource handle"},
294 {0x05260000, 0, 0,
295 "Illegal request, invalid field in parameter list"},
296 {0x05260100, 0, 0,
297 "Illegal request, parameter not supported"},
298 {0x05260200, 0, 0,
299 "Illegal request, parameter value invalid"},
300 {0x052C0000, 0, 0,
301 "Illegal request, command sequence error"},
302 {0x06040500, 0, 1,
303 "9031: Array protection temporarily suspended, protection resuming"},
304 {0x06040600, 0, 1,
305 "9040: Array protection temporarily suspended, protection resuming"},
306 {0x06290000, 0, 1,
307 "FFFB: SCSI bus was reset"},
308 {0x06290500, 0, 0,
309 "FFFE: SCSI bus transition to single ended"},
310 {0x06290600, 0, 0,
311 "FFFE: SCSI bus transition to LVD"},
312 {0x06298000, 0, 1,
313 "FFFB: SCSI bus was reset by another initiator"},
314 {0x063F0300, 0, 1,
315 "3029: A device replacement has occurred"},
316 {0x064C8000, 0, 1,
317 "9051: IOA cache data exists for a missing or failed device"},
318 {0x06670100, 0, 1,
319 "9025: Disk unit is not supported at its physical location"},
320 {0x06670600, 0, 1,
321 "3020: IOA detected a SCSI bus configuration error"},
322 {0x06678000, 0, 1,
323 "3150: SCSI bus configuration error"},
324 {0x06690200, 0, 1,
325 "9041: Array protection temporarily suspended"},
326 {0x06698200, 0, 1,
327 "9042: Corrupt array parity detected on specified device"},
328 {0x066B0200, 0, 1,
329 "9030: Array no longer protected due to missing or failed disk unit"},
330 {0x066B8200, 0, 1,
331 "9032: Array exposed but still protected"},
332 {0x07270000, 0, 0,
333 "Failure due to other device"},
334 {0x07278000, 0, 1,
335 "9008: IOA does not support functions expected by devices"},
336 {0x07278100, 0, 1,
337 "9010: Cache data associated with attached devices cannot be found"},
338 {0x07278200, 0, 1,
339 "9011: Cache data belongs to devices other than those attached"},
340 {0x07278400, 0, 1,
341 "9020: Array missing 2 or more devices with only 1 device present"},
342 {0x07278500, 0, 1,
343 "9021: Array missing 2 or more devices with 2 or more devices present"},
344 {0x07278600, 0, 1,
345 "9022: Exposed array is missing a required device"},
346 {0x07278700, 0, 1,
347 "9023: Array member(s) not at required physical locations"},
348 {0x07278800, 0, 1,
349 "9024: Array not functional due to present hardware configuration"},
350 {0x07278900, 0, 1,
351 "9026: Array not functional due to present hardware configuration"},
352 {0x07278A00, 0, 1,
353 "9027: Array is missing a device and parity is out of sync"},
354 {0x07278B00, 0, 1,
355 "9028: Maximum number of arrays already exist"},
356 {0x07278C00, 0, 1,
357 "9050: Required cache data cannot be located for a disk unit"},
358 {0x07278D00, 0, 1,
359 "9052: Cache data exists for a device that has been modified"},
360 {0x07278F00, 0, 1,
361 "9054: IOA resources not available due to previous problems"},
362 {0x07279100, 0, 1,
363 "9092: Disk unit requires initialization before use"},
364 {0x07279200, 0, 1,
365 "9029: Incorrect hardware configuration change has been detected"},
366 {0x07279600, 0, 1,
367 "9060: One or more disk pairs are missing from an array"},
368 {0x07279700, 0, 1,
369 "9061: One or more disks are missing from an array"},
370 {0x07279800, 0, 1,
371 "9062: One or more disks are missing from an array"},
372 {0x07279900, 0, 1,
373 "9063: Maximum number of functional arrays has been exceeded"},
374 {0x0B260000, 0, 0,
375 "Aborted command, invalid descriptor"},
376 {0x0B5A0000, 0, 0,
377 "Command terminated by host"}
378};
379
380static const struct ipr_ses_table_entry ipr_ses_table[] = {
381 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
382 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
383 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
384 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
385 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
386 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
387 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
388 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
389 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
390 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
391 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
392 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
393 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
394};
395
396/*
397 * Function Prototypes
398 */
399static int ipr_reset_alert(struct ipr_cmnd *);
400static void ipr_process_ccn(struct ipr_cmnd *);
401static void ipr_process_error(struct ipr_cmnd *);
402static void ipr_reset_ioa_job(struct ipr_cmnd *);
403static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
404 enum ipr_shutdown_type);
405
406#ifdef CONFIG_SCSI_IPR_TRACE
407/**
408 * ipr_trc_hook - Add a trace entry to the driver trace
409 * @ipr_cmd: ipr command struct
410 * @type: trace type
411 * @add_data: additional data
412 *
413 * Return value:
414 * none
415 **/
416static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
417 u8 type, u32 add_data)
418{
419 struct ipr_trace_entry *trace_entry;
420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
421
422 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
423 trace_entry->time = jiffies;
424 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
425 trace_entry->type = type;
426 trace_entry->cmd_index = ipr_cmd->cmd_index;
427 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
428 trace_entry->u.add_data = add_data;
429}
430#else
431#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
432#endif
433
434/**
435 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
436 * @ipr_cmd: ipr command struct
437 *
438 * Return value:
439 * none
440 **/
441static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
442{
443 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
444 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
445
446 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
447 ioarcb->write_data_transfer_length = 0;
448 ioarcb->read_data_transfer_length = 0;
449 ioarcb->write_ioadl_len = 0;
450 ioarcb->read_ioadl_len = 0;
451 ioasa->ioasc = 0;
452 ioasa->residual_data_len = 0;
453
454 ipr_cmd->scsi_cmd = NULL;
455 ipr_cmd->sense_buffer[0] = 0;
456 ipr_cmd->dma_use_sg = 0;
457}
458
459/**
460 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
461 * @ipr_cmd: ipr command struct
462 *
463 * Return value:
464 * none
465 **/
466static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
467{
468 ipr_reinit_ipr_cmnd(ipr_cmd);
469 ipr_cmd->u.scratch = 0;
470 ipr_cmd->sibling = NULL;
471 init_timer(&ipr_cmd->timer);
472}
473
474/**
475 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
476 * @ioa_cfg: ioa config struct
477 *
478 * Return value:
479 * pointer to ipr command struct
480 **/
481static
482struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
483{
484 struct ipr_cmnd *ipr_cmd;
485
486 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
487 list_del(&ipr_cmd->queue);
488 ipr_init_ipr_cmnd(ipr_cmd);
489
490 return ipr_cmd;
491}
492
493/**
494 * ipr_unmap_sglist - Unmap scatterlist if mapped
495 * @ioa_cfg: ioa config struct
496 * @ipr_cmd: ipr command struct
497 *
498 * Return value:
499 * nothing
500 **/
501static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
502 struct ipr_cmnd *ipr_cmd)
503{
504 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
505
506 if (ipr_cmd->dma_use_sg) {
507 if (scsi_cmd->use_sg > 0) {
508 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
509 scsi_cmd->use_sg,
510 scsi_cmd->sc_data_direction);
511 } else {
512 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
513 scsi_cmd->request_bufflen,
514 scsi_cmd->sc_data_direction);
515 }
516 }
517}
518
519/**
520 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
521 * @ioa_cfg: ioa config struct
522 * @clr_ints: interrupts to clear
523 *
524 * This function masks all interrupts on the adapter, then clears the
525 * interrupts specified in the mask
526 *
527 * Return value:
528 * none
529 **/
530static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
531 u32 clr_ints)
532{
533 volatile u32 int_reg;
534
535 /* Stop new interrupts */
536 ioa_cfg->allow_interrupts = 0;
537
538 /* Set interrupt mask to stop all new interrupts */
539 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
540
541 /* Clear any pending interrupts */
542 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
543 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
544}
545
546/**
547 * ipr_save_pcix_cmd_reg - Save PCI-X command register
548 * @ioa_cfg: ioa config struct
549 *
550 * Return value:
551 * 0 on success / -EIO on failure
552 **/
553static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
554{
555 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
556
557 if (pcix_cmd_reg == 0) {
558 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
559 return -EIO;
560 }
561
562 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
563 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
564 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
565 return -EIO;
566 }
567
568 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
569 return 0;
570}
571
572/**
573 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
574 * @ioa_cfg: ioa config struct
575 *
576 * Return value:
577 * 0 on success / -EIO on failure
578 **/
579static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
580{
581 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
582
583 if (pcix_cmd_reg) {
584 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
585 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
586 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
587 return -EIO;
588 }
589 } else {
590 dev_err(&ioa_cfg->pdev->dev,
591 "Failed to setup PCI-X command register\n");
592 return -EIO;
593 }
594
595 return 0;
596}
597
598/**
599 * ipr_scsi_eh_done - mid-layer done function for aborted ops
600 * @ipr_cmd: ipr command struct
601 *
602 * This function is invoked by the interrupt handler for
603 * ops generated by the SCSI mid-layer which are being aborted.
604 *
605 * Return value:
606 * none
607 **/
608static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
609{
610 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
611 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
612
613 scsi_cmd->result |= (DID_ERROR << 16);
614
615 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
616 scsi_cmd->scsi_done(scsi_cmd);
617 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
618}
619
620/**
621 * ipr_fail_all_ops - Fails all outstanding ops.
622 * @ioa_cfg: ioa config struct
623 *
624 * This function fails all outstanding ops.
625 *
626 * Return value:
627 * none
628 **/
629static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
630{
631 struct ipr_cmnd *ipr_cmd, *temp;
632
633 ENTER;
634 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
635 list_del(&ipr_cmd->queue);
636
637 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
638 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
639
640 if (ipr_cmd->scsi_cmd)
641 ipr_cmd->done = ipr_scsi_eh_done;
642
643 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
644 del_timer(&ipr_cmd->timer);
645 ipr_cmd->done(ipr_cmd);
646 }
647
648 LEAVE;
649}
650
651/**
652 * ipr_do_req - Send driver initiated requests.
653 * @ipr_cmd: ipr command struct
654 * @done: done function
655 * @timeout_func: timeout function
656 * @timeout: timeout value
657 *
658 * This function sends the specified command to the adapter with the
659 * timeout given. The done function is invoked on command completion.
660 *
661 * Return value:
662 * none
663 **/
664static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
665 void (*done) (struct ipr_cmnd *),
666 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
667{
668 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
669
670 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
671
672 ipr_cmd->done = done;
673
674 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
675 ipr_cmd->timer.expires = jiffies + timeout;
676 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
677
678 add_timer(&ipr_cmd->timer);
679
680 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
681
682 mb();
683 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
684 ioa_cfg->regs.ioarrin_reg);
685}
686
687/**
688 * ipr_internal_cmd_done - Op done function for an internally generated op.
689 * @ipr_cmd: ipr command struct
690 *
691 * This function is the op done function for an internally generated,
692 * blocking op. It simply wakes the sleeping thread.
693 *
694 * Return value:
695 * none
696 **/
697static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
698{
699 if (ipr_cmd->sibling)
700 ipr_cmd->sibling = NULL;
701 else
702 complete(&ipr_cmd->completion);
703}
704
705/**
706 * ipr_send_blocking_cmd - Send command and sleep on its completion.
707 * @ipr_cmd: ipr command struct
708 * @timeout_func: function to invoke if command times out
709 * @timeout: timeout
710 *
711 * Return value:
712 * none
713 **/
714static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
715 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
716 u32 timeout)
717{
718 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
719
720 init_completion(&ipr_cmd->completion);
721 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
722
723 spin_unlock_irq(ioa_cfg->host->host_lock);
724 wait_for_completion(&ipr_cmd->completion);
725 spin_lock_irq(ioa_cfg->host->host_lock);
726}
727
728/**
729 * ipr_send_hcam - Send an HCAM to the adapter.
730 * @ioa_cfg: ioa config struct
731 * @type: HCAM type
732 * @hostrcb: hostrcb struct
733 *
734 * This function will send a Host Controlled Async command to the adapter.
735 * If HCAMs are currently not allowed to be issued to the adapter, it will
736 * place the hostrcb on the free queue.
737 *
738 * Return value:
739 * none
740 **/
741static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
742 struct ipr_hostrcb *hostrcb)
743{
744 struct ipr_cmnd *ipr_cmd;
745 struct ipr_ioarcb *ioarcb;
746
747 if (ioa_cfg->allow_cmds) {
748 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
749 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
750 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
751
752 ipr_cmd->u.hostrcb = hostrcb;
753 ioarcb = &ipr_cmd->ioarcb;
754
755 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
756 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
757 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
758 ioarcb->cmd_pkt.cdb[1] = type;
759 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
760 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
761
762 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
763 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
764 ipr_cmd->ioadl[0].flags_and_data_len =
765 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
766 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
767
768 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
769 ipr_cmd->done = ipr_process_ccn;
770 else
771 ipr_cmd->done = ipr_process_error;
772
773 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
774
775 mb();
776 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
777 ioa_cfg->regs.ioarrin_reg);
778 } else {
779 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
780 }
781}
782
783/**
784 * ipr_init_res_entry - Initialize a resource entry struct.
785 * @res: resource entry struct
786 *
787 * Return value:
788 * none
789 **/
790static void ipr_init_res_entry(struct ipr_resource_entry *res)
791{
792 res->needs_sync_complete = 1;
793 res->in_erp = 0;
794 res->add_to_ml = 0;
795 res->del_from_ml = 0;
796 res->resetting_device = 0;
797 res->sdev = NULL;
798}
799
800/**
801 * ipr_handle_config_change - Handle a config change from the adapter
802 * @ioa_cfg: ioa config struct
803 * @hostrcb: hostrcb
804 *
805 * Return value:
806 * none
807 **/
808static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
809 struct ipr_hostrcb *hostrcb)
810{
811 struct ipr_resource_entry *res = NULL;
812 struct ipr_config_table_entry *cfgte;
813 u32 is_ndn = 1;
814
815 cfgte = &hostrcb->hcam.u.ccn.cfgte;
816
817 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
818 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
819 sizeof(cfgte->res_addr))) {
820 is_ndn = 0;
821 break;
822 }
823 }
824
825 if (is_ndn) {
826 if (list_empty(&ioa_cfg->free_res_q)) {
827 ipr_send_hcam(ioa_cfg,
828 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
829 hostrcb);
830 return;
831 }
832
833 res = list_entry(ioa_cfg->free_res_q.next,
834 struct ipr_resource_entry, queue);
835
836 list_del(&res->queue);
837 ipr_init_res_entry(res);
838 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
839 }
840
841 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
842
843 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
844 if (res->sdev) {
845 res->sdev->hostdata = NULL;
846 res->del_from_ml = 1;
847 if (ioa_cfg->allow_ml_add_del)
848 schedule_work(&ioa_cfg->work_q);
849 } else
850 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
851 } else if (!res->sdev) {
852 res->add_to_ml = 1;
853 if (ioa_cfg->allow_ml_add_del)
854 schedule_work(&ioa_cfg->work_q);
855 }
856
857 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
858}
859
860/**
861 * ipr_process_ccn - Op done function for a CCN.
862 * @ipr_cmd: ipr command struct
863 *
864 * This function is the op done function for a configuration
865 * change notification host controlled async from the adapter.
866 *
867 * Return value:
868 * none
869 **/
870static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
871{
872 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
873 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
874 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
875
876 list_del(&hostrcb->queue);
877 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
878
879 if (ioasc) {
880 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
881 dev_err(&ioa_cfg->pdev->dev,
882 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
883
884 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
885 } else {
886 ipr_handle_config_change(ioa_cfg, hostrcb);
887 }
888}
889
890/**
891 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600892 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 *
894 * Return value:
895 * none
896 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600897static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898{
899 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
900 + IPR_SERIAL_NUM_LEN];
901
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600902 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
903 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 IPR_PROD_ID_LEN);
905 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
906 ipr_err("Vendor/Product ID: %s\n", buffer);
907
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600908 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 buffer[IPR_SERIAL_NUM_LEN] = '\0';
910 ipr_err(" Serial Number: %s\n", buffer);
911}
912
913/**
914 * ipr_log_cache_error - Log a cache error.
915 * @ioa_cfg: ioa config struct
916 * @hostrcb: hostrcb struct
917 *
918 * Return value:
919 * none
920 **/
921static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
922 struct ipr_hostrcb *hostrcb)
923{
924 struct ipr_hostrcb_type_02_error *error =
925 &hostrcb->hcam.u.error.u.type_02_error;
926
927 ipr_err("-----Current Configuration-----\n");
928 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600929 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600931 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
933 ipr_err("-----Expected Configuration-----\n");
934 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600935 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600937 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 ipr_err("Additional IOA Data: %08X %08X %08X\n",
940 be32_to_cpu(error->ioa_data[0]),
941 be32_to_cpu(error->ioa_data[1]),
942 be32_to_cpu(error->ioa_data[2]));
943}
944
945/**
946 * ipr_log_config_error - Log a configuration error.
947 * @ioa_cfg: ioa config struct
948 * @hostrcb: hostrcb struct
949 *
950 * Return value:
951 * none
952 **/
953static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
954 struct ipr_hostrcb *hostrcb)
955{
956 int errors_logged, i;
957 struct ipr_hostrcb_device_data_entry *dev_entry;
958 struct ipr_hostrcb_type_03_error *error;
959
960 error = &hostrcb->hcam.u.error.u.type_03_error;
961 errors_logged = be32_to_cpu(error->errors_logged);
962
963 ipr_err("Device Errors Detected/Logged: %d/%d\n",
964 be32_to_cpu(error->errors_detected), errors_logged);
965
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600966 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
968 for (i = 0; i < errors_logged; i++, dev_entry++) {
969 ipr_err_separator;
970
971 if (dev_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
972 ipr_err("Device %d: missing\n", i + 1);
973 } else {
974 ipr_err("Device %d: %d:%d:%d:%d\n", i + 1,
975 ioa_cfg->host->host_no, dev_entry->dev_res_addr.bus,
976 dev_entry->dev_res_addr.target, dev_entry->dev_res_addr.lun);
977 }
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600978 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600981 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600984 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
986 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600987 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
989 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
990 be32_to_cpu(dev_entry->ioa_data[0]),
991 be32_to_cpu(dev_entry->ioa_data[1]),
992 be32_to_cpu(dev_entry->ioa_data[2]),
993 be32_to_cpu(dev_entry->ioa_data[3]),
994 be32_to_cpu(dev_entry->ioa_data[4]));
995 }
996}
997
998/**
999 * ipr_log_array_error - Log an array configuration error.
1000 * @ioa_cfg: ioa config struct
1001 * @hostrcb: hostrcb struct
1002 *
1003 * Return value:
1004 * none
1005 **/
1006static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1007 struct ipr_hostrcb *hostrcb)
1008{
1009 int i;
1010 struct ipr_hostrcb_type_04_error *error;
1011 struct ipr_hostrcb_array_data_entry *array_entry;
1012 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1013
1014 error = &hostrcb->hcam.u.error.u.type_04_error;
1015
1016 ipr_err_separator;
1017
1018 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1019 error->protection_level,
1020 ioa_cfg->host->host_no,
1021 error->last_func_vset_res_addr.bus,
1022 error->last_func_vset_res_addr.target,
1023 error->last_func_vset_res_addr.lun);
1024
1025 ipr_err_separator;
1026
1027 array_entry = error->array_member;
1028
1029 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001030 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 continue;
1032
1033 if (be32_to_cpu(error->exposed_mode_adn) == i) {
1034 ipr_err("Exposed Array Member %d:\n", i);
1035 } else {
1036 ipr_err("Array Member %d:\n", i);
1037 }
1038
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001039 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
1041 if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
1042 ipr_err("Current Location: unknown\n");
1043 } else {
1044 ipr_err("Current Location: %d:%d:%d:%d\n",
1045 ioa_cfg->host->host_no,
1046 array_entry->dev_res_addr.bus,
1047 array_entry->dev_res_addr.target,
1048 array_entry->dev_res_addr.lun);
1049 }
1050
brking@us.ibm.com 1b69f642005-05-02 19:50:59 -05001051 if (array_entry->expected_dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 ipr_err("Expected Location: unknown\n");
1053 } else {
1054 ipr_err("Expected Location: %d:%d:%d:%d\n",
1055 ioa_cfg->host->host_no,
1056 array_entry->expected_dev_res_addr.bus,
1057 array_entry->expected_dev_res_addr.target,
1058 array_entry->expected_dev_res_addr.lun);
1059 }
1060
1061 ipr_err_separator;
1062
1063 if (i == 9)
1064 array_entry = error->array_member2;
1065 else
1066 array_entry++;
1067 }
1068}
1069
1070/**
1071 * ipr_log_generic_error - Log an adapter error.
1072 * @ioa_cfg: ioa config struct
1073 * @hostrcb: hostrcb struct
1074 *
1075 * Return value:
1076 * none
1077 **/
1078static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1079 struct ipr_hostrcb *hostrcb)
1080{
1081 int i;
1082 int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
1083
1084 if (ioa_data_len == 0)
1085 return;
1086
1087 ipr_err("IOA Error Data:\n");
1088 ipr_err("Offset 0 1 2 3 4 5 6 7 8 9 A B C D E F\n");
1089
1090 for (i = 0; i < ioa_data_len / 4; i += 4) {
1091 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1092 be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
1093 be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
1094 be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
1095 be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
1096 }
1097}
1098
1099/**
1100 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1101 * @ioasc: IOASC
1102 *
1103 * This function will return the index of into the ipr_error_table
1104 * for the specified IOASC. If the IOASC is not in the table,
1105 * 0 will be returned, which points to the entry used for unknown errors.
1106 *
1107 * Return value:
1108 * index into the ipr_error_table
1109 **/
1110static u32 ipr_get_error(u32 ioasc)
1111{
1112 int i;
1113
1114 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1115 if (ipr_error_table[i].ioasc == ioasc)
1116 return i;
1117
1118 return 0;
1119}
1120
1121/**
1122 * ipr_handle_log_data - Log an adapter error.
1123 * @ioa_cfg: ioa config struct
1124 * @hostrcb: hostrcb struct
1125 *
1126 * This function logs an adapter error to the system.
1127 *
1128 * Return value:
1129 * none
1130 **/
1131static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1132 struct ipr_hostrcb *hostrcb)
1133{
1134 u32 ioasc;
1135 int error_index;
1136
1137 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1138 return;
1139
1140 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1141 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1142
1143 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1144
1145 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1146 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1147 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1148 scsi_report_bus_reset(ioa_cfg->host,
1149 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1150 }
1151
1152 error_index = ipr_get_error(ioasc);
1153
1154 if (!ipr_error_table[error_index].log_hcam)
1155 return;
1156
1157 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1158 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1159 "%s\n", ipr_error_table[error_index].error);
1160 } else {
1161 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1162 ipr_error_table[error_index].error);
1163 }
1164
1165 /* Set indication we have logged an error */
1166 ioa_cfg->errors_logged++;
1167
1168 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1169 return;
1170
1171 switch (hostrcb->hcam.overlay_id) {
1172 case IPR_HOST_RCB_OVERLAY_ID_1:
1173 ipr_log_generic_error(ioa_cfg, hostrcb);
1174 break;
1175 case IPR_HOST_RCB_OVERLAY_ID_2:
1176 ipr_log_cache_error(ioa_cfg, hostrcb);
1177 break;
1178 case IPR_HOST_RCB_OVERLAY_ID_3:
1179 ipr_log_config_error(ioa_cfg, hostrcb);
1180 break;
1181 case IPR_HOST_RCB_OVERLAY_ID_4:
1182 case IPR_HOST_RCB_OVERLAY_ID_6:
1183 ipr_log_array_error(ioa_cfg, hostrcb);
1184 break;
1185 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1186 ipr_log_generic_error(ioa_cfg, hostrcb);
1187 break;
1188 default:
1189 dev_err(&ioa_cfg->pdev->dev,
1190 "Unknown error received. Overlay ID: %d\n",
1191 hostrcb->hcam.overlay_id);
1192 break;
1193 }
1194}
1195
1196/**
1197 * ipr_process_error - Op done function for an adapter error log.
1198 * @ipr_cmd: ipr command struct
1199 *
1200 * This function is the op done function for an error log host
1201 * controlled async from the adapter. It will log the error and
1202 * send the HCAM back to the adapter.
1203 *
1204 * Return value:
1205 * none
1206 **/
1207static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1208{
1209 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1210 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1211 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1212
1213 list_del(&hostrcb->queue);
1214 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1215
1216 if (!ioasc) {
1217 ipr_handle_log_data(ioa_cfg, hostrcb);
1218 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1219 dev_err(&ioa_cfg->pdev->dev,
1220 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1221 }
1222
1223 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1224}
1225
1226/**
1227 * ipr_timeout - An internally generated op has timed out.
1228 * @ipr_cmd: ipr command struct
1229 *
1230 * This function blocks host requests and initiates an
1231 * adapter reset.
1232 *
1233 * Return value:
1234 * none
1235 **/
1236static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1237{
1238 unsigned long lock_flags = 0;
1239 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1240
1241 ENTER;
1242 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1243
1244 ioa_cfg->errors_logged++;
1245 dev_err(&ioa_cfg->pdev->dev,
1246 "Adapter being reset due to command timeout.\n");
1247
1248 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1249 ioa_cfg->sdt_state = GET_DUMP;
1250
1251 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1252 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1253
1254 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1255 LEAVE;
1256}
1257
1258/**
1259 * ipr_oper_timeout - Adapter timed out transitioning to operational
1260 * @ipr_cmd: ipr command struct
1261 *
1262 * This function blocks host requests and initiates an
1263 * adapter reset.
1264 *
1265 * Return value:
1266 * none
1267 **/
1268static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1269{
1270 unsigned long lock_flags = 0;
1271 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1272
1273 ENTER;
1274 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1275
1276 ioa_cfg->errors_logged++;
1277 dev_err(&ioa_cfg->pdev->dev,
1278 "Adapter timed out transitioning to operational.\n");
1279
1280 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1281 ioa_cfg->sdt_state = GET_DUMP;
1282
1283 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1284 if (ipr_fastfail)
1285 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1286 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1287 }
1288
1289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1290 LEAVE;
1291}
1292
1293/**
1294 * ipr_reset_reload - Reset/Reload the IOA
1295 * @ioa_cfg: ioa config struct
1296 * @shutdown_type: shutdown type
1297 *
1298 * This function resets the adapter and re-initializes it.
1299 * This function assumes that all new host commands have been stopped.
1300 * Return value:
1301 * SUCCESS / FAILED
1302 **/
1303static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1304 enum ipr_shutdown_type shutdown_type)
1305{
1306 if (!ioa_cfg->in_reset_reload)
1307 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1308
1309 spin_unlock_irq(ioa_cfg->host->host_lock);
1310 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1311 spin_lock_irq(ioa_cfg->host->host_lock);
1312
1313 /* If we got hit with a host reset while we were already resetting
1314 the adapter for some reason, and the reset failed. */
1315 if (ioa_cfg->ioa_is_dead) {
1316 ipr_trace;
1317 return FAILED;
1318 }
1319
1320 return SUCCESS;
1321}
1322
1323/**
1324 * ipr_find_ses_entry - Find matching SES in SES table
1325 * @res: resource entry struct of SES
1326 *
1327 * Return value:
1328 * pointer to SES table entry / NULL on failure
1329 **/
1330static const struct ipr_ses_table_entry *
1331ipr_find_ses_entry(struct ipr_resource_entry *res)
1332{
1333 int i, j, matches;
1334 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1335
1336 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1337 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1338 if (ste->compare_product_id_byte[j] == 'X') {
1339 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1340 matches++;
1341 else
1342 break;
1343 } else
1344 matches++;
1345 }
1346
1347 if (matches == IPR_PROD_ID_LEN)
1348 return ste;
1349 }
1350
1351 return NULL;
1352}
1353
1354/**
1355 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1356 * @ioa_cfg: ioa config struct
1357 * @bus: SCSI bus
1358 * @bus_width: bus width
1359 *
1360 * Return value:
1361 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1362 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1363 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1364 * max 160MHz = max 320MB/sec).
1365 **/
1366static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1367{
1368 struct ipr_resource_entry *res;
1369 const struct ipr_ses_table_entry *ste;
1370 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1371
1372 /* Loop through each config table entry in the config table buffer */
1373 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1374 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1375 continue;
1376
1377 if (bus != res->cfgte.res_addr.bus)
1378 continue;
1379
1380 if (!(ste = ipr_find_ses_entry(res)))
1381 continue;
1382
1383 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1384 }
1385
1386 return max_xfer_rate;
1387}
1388
1389/**
1390 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1391 * @ioa_cfg: ioa config struct
1392 * @max_delay: max delay in micro-seconds to wait
1393 *
1394 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1395 *
1396 * Return value:
1397 * 0 on success / other on failure
1398 **/
1399static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1400{
1401 volatile u32 pcii_reg;
1402 int delay = 1;
1403
1404 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1405 while (delay < max_delay) {
1406 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1407
1408 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1409 return 0;
1410
1411 /* udelay cannot be used if delay is more than a few milliseconds */
1412 if ((delay / 1000) > MAX_UDELAY_MS)
1413 mdelay(delay / 1000);
1414 else
1415 udelay(delay);
1416
1417 delay += delay;
1418 }
1419 return -EIO;
1420}
1421
1422/**
1423 * ipr_get_ldump_data_section - Dump IOA memory
1424 * @ioa_cfg: ioa config struct
1425 * @start_addr: adapter address to dump
1426 * @dest: destination kernel buffer
1427 * @length_in_words: length to dump in 4 byte words
1428 *
1429 * Return value:
1430 * 0 on success / -EIO on failure
1431 **/
1432static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1433 u32 start_addr,
1434 __be32 *dest, u32 length_in_words)
1435{
1436 volatile u32 temp_pcii_reg;
1437 int i, delay = 0;
1438
1439 /* Write IOA interrupt reg starting LDUMP state */
1440 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1441 ioa_cfg->regs.set_uproc_interrupt_reg);
1442
1443 /* Wait for IO debug acknowledge */
1444 if (ipr_wait_iodbg_ack(ioa_cfg,
1445 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1446 dev_err(&ioa_cfg->pdev->dev,
1447 "IOA dump long data transfer timeout\n");
1448 return -EIO;
1449 }
1450
1451 /* Signal LDUMP interlocked - clear IO debug ack */
1452 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1453 ioa_cfg->regs.clr_interrupt_reg);
1454
1455 /* Write Mailbox with starting address */
1456 writel(start_addr, ioa_cfg->ioa_mailbox);
1457
1458 /* Signal address valid - clear IOA Reset alert */
1459 writel(IPR_UPROCI_RESET_ALERT,
1460 ioa_cfg->regs.clr_uproc_interrupt_reg);
1461
1462 for (i = 0; i < length_in_words; i++) {
1463 /* Wait for IO debug acknowledge */
1464 if (ipr_wait_iodbg_ack(ioa_cfg,
1465 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1466 dev_err(&ioa_cfg->pdev->dev,
1467 "IOA dump short data transfer timeout\n");
1468 return -EIO;
1469 }
1470
1471 /* Read data from mailbox and increment destination pointer */
1472 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1473 dest++;
1474
1475 /* For all but the last word of data, signal data received */
1476 if (i < (length_in_words - 1)) {
1477 /* Signal dump data received - Clear IO debug Ack */
1478 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1479 ioa_cfg->regs.clr_interrupt_reg);
1480 }
1481 }
1482
1483 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1484 writel(IPR_UPROCI_RESET_ALERT,
1485 ioa_cfg->regs.set_uproc_interrupt_reg);
1486
1487 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1488 ioa_cfg->regs.clr_uproc_interrupt_reg);
1489
1490 /* Signal dump data received - Clear IO debug Ack */
1491 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1492 ioa_cfg->regs.clr_interrupt_reg);
1493
1494 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1495 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1496 temp_pcii_reg =
1497 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1498
1499 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1500 return 0;
1501
1502 udelay(10);
1503 delay += 10;
1504 }
1505
1506 return 0;
1507}
1508
1509#ifdef CONFIG_SCSI_IPR_DUMP
1510/**
1511 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1512 * @ioa_cfg: ioa config struct
1513 * @pci_address: adapter address
1514 * @length: length of data to copy
1515 *
1516 * Copy data from PCI adapter to kernel buffer.
1517 * Note: length MUST be a 4 byte multiple
1518 * Return value:
1519 * 0 on success / other on failure
1520 **/
1521static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1522 unsigned long pci_address, u32 length)
1523{
1524 int bytes_copied = 0;
1525 int cur_len, rc, rem_len, rem_page_len;
1526 __be32 *page;
1527 unsigned long lock_flags = 0;
1528 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1529
1530 while (bytes_copied < length &&
1531 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1532 if (ioa_dump->page_offset >= PAGE_SIZE ||
1533 ioa_dump->page_offset == 0) {
1534 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1535
1536 if (!page) {
1537 ipr_trace;
1538 return bytes_copied;
1539 }
1540
1541 ioa_dump->page_offset = 0;
1542 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1543 ioa_dump->next_page_index++;
1544 } else
1545 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1546
1547 rem_len = length - bytes_copied;
1548 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1549 cur_len = min(rem_len, rem_page_len);
1550
1551 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1552 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1553 rc = -EIO;
1554 } else {
1555 rc = ipr_get_ldump_data_section(ioa_cfg,
1556 pci_address + bytes_copied,
1557 &page[ioa_dump->page_offset / 4],
1558 (cur_len / sizeof(u32)));
1559 }
1560 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1561
1562 if (!rc) {
1563 ioa_dump->page_offset += cur_len;
1564 bytes_copied += cur_len;
1565 } else {
1566 ipr_trace;
1567 break;
1568 }
1569 schedule();
1570 }
1571
1572 return bytes_copied;
1573}
1574
1575/**
1576 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1577 * @hdr: dump entry header struct
1578 *
1579 * Return value:
1580 * nothing
1581 **/
1582static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1583{
1584 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1585 hdr->num_elems = 1;
1586 hdr->offset = sizeof(*hdr);
1587 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1588}
1589
1590/**
1591 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1592 * @ioa_cfg: ioa config struct
1593 * @driver_dump: driver dump struct
1594 *
1595 * Return value:
1596 * nothing
1597 **/
1598static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1599 struct ipr_driver_dump *driver_dump)
1600{
1601 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1602
1603 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1604 driver_dump->ioa_type_entry.hdr.len =
1605 sizeof(struct ipr_dump_ioa_type_entry) -
1606 sizeof(struct ipr_dump_entry_header);
1607 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1608 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1609 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1610 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1611 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1612 ucode_vpd->minor_release[1];
1613 driver_dump->hdr.num_entries++;
1614}
1615
1616/**
1617 * ipr_dump_version_data - Fill in the driver version in the dump.
1618 * @ioa_cfg: ioa config struct
1619 * @driver_dump: driver dump struct
1620 *
1621 * Return value:
1622 * nothing
1623 **/
1624static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1625 struct ipr_driver_dump *driver_dump)
1626{
1627 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1628 driver_dump->version_entry.hdr.len =
1629 sizeof(struct ipr_dump_version_entry) -
1630 sizeof(struct ipr_dump_entry_header);
1631 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1632 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1633 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1634 driver_dump->hdr.num_entries++;
1635}
1636
1637/**
1638 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1639 * @ioa_cfg: ioa config struct
1640 * @driver_dump: driver dump struct
1641 *
1642 * Return value:
1643 * nothing
1644 **/
1645static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1646 struct ipr_driver_dump *driver_dump)
1647{
1648 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1649 driver_dump->trace_entry.hdr.len =
1650 sizeof(struct ipr_dump_trace_entry) -
1651 sizeof(struct ipr_dump_entry_header);
1652 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1653 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1654 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1655 driver_dump->hdr.num_entries++;
1656}
1657
1658/**
1659 * ipr_dump_location_data - Fill in the IOA location in the dump.
1660 * @ioa_cfg: ioa config struct
1661 * @driver_dump: driver dump struct
1662 *
1663 * Return value:
1664 * nothing
1665 **/
1666static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1667 struct ipr_driver_dump *driver_dump)
1668{
1669 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1670 driver_dump->location_entry.hdr.len =
1671 sizeof(struct ipr_dump_location_entry) -
1672 sizeof(struct ipr_dump_entry_header);
1673 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1674 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1675 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1676 driver_dump->hdr.num_entries++;
1677}
1678
1679/**
1680 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1681 * @ioa_cfg: ioa config struct
1682 * @dump: dump struct
1683 *
1684 * Return value:
1685 * nothing
1686 **/
1687static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1688{
1689 unsigned long start_addr, sdt_word;
1690 unsigned long lock_flags = 0;
1691 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1692 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1693 u32 num_entries, start_off, end_off;
1694 u32 bytes_to_copy, bytes_copied, rc;
1695 struct ipr_sdt *sdt;
1696 int i;
1697
1698 ENTER;
1699
1700 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1701
1702 if (ioa_cfg->sdt_state != GET_DUMP) {
1703 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1704 return;
1705 }
1706
1707 start_addr = readl(ioa_cfg->ioa_mailbox);
1708
1709 if (!ipr_sdt_is_fmt2(start_addr)) {
1710 dev_err(&ioa_cfg->pdev->dev,
1711 "Invalid dump table format: %lx\n", start_addr);
1712 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1713 return;
1714 }
1715
1716 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1717
1718 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1719
1720 /* Initialize the overall dump header */
1721 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1722 driver_dump->hdr.num_entries = 1;
1723 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1724 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1725 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1726 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1727
1728 ipr_dump_version_data(ioa_cfg, driver_dump);
1729 ipr_dump_location_data(ioa_cfg, driver_dump);
1730 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1731 ipr_dump_trace_data(ioa_cfg, driver_dump);
1732
1733 /* Update dump_header */
1734 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1735
1736 /* IOA Dump entry */
1737 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1738 ioa_dump->format = IPR_SDT_FMT2;
1739 ioa_dump->hdr.len = 0;
1740 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1741 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1742
1743 /* First entries in sdt are actually a list of dump addresses and
1744 lengths to gather the real dump data. sdt represents the pointer
1745 to the ioa generated dump table. Dump data will be extracted based
1746 on entries in this table */
1747 sdt = &ioa_dump->sdt;
1748
1749 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1750 sizeof(struct ipr_sdt) / sizeof(__be32));
1751
1752 /* Smart Dump table is ready to use and the first entry is valid */
1753 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1754 dev_err(&ioa_cfg->pdev->dev,
1755 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1756 rc, be32_to_cpu(sdt->hdr.state));
1757 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1758 ioa_cfg->sdt_state = DUMP_OBTAINED;
1759 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1760 return;
1761 }
1762
1763 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1764
1765 if (num_entries > IPR_NUM_SDT_ENTRIES)
1766 num_entries = IPR_NUM_SDT_ENTRIES;
1767
1768 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1769
1770 for (i = 0; i < num_entries; i++) {
1771 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1772 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1773 break;
1774 }
1775
1776 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1777 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1778 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1779 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1780
1781 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1782 bytes_to_copy = end_off - start_off;
1783 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1784 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1785 continue;
1786 }
1787
1788 /* Copy data from adapter to driver buffers */
1789 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1790 bytes_to_copy);
1791
1792 ioa_dump->hdr.len += bytes_copied;
1793
1794 if (bytes_copied != bytes_to_copy) {
1795 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1796 break;
1797 }
1798 }
1799 }
1800 }
1801
1802 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1803
1804 /* Update dump_header */
1805 driver_dump->hdr.len += ioa_dump->hdr.len;
1806 wmb();
1807 ioa_cfg->sdt_state = DUMP_OBTAINED;
1808 LEAVE;
1809}
1810
1811#else
1812#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1813#endif
1814
1815/**
1816 * ipr_release_dump - Free adapter dump memory
1817 * @kref: kref struct
1818 *
1819 * Return value:
1820 * nothing
1821 **/
1822static void ipr_release_dump(struct kref *kref)
1823{
1824 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
1825 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
1826 unsigned long lock_flags = 0;
1827 int i;
1828
1829 ENTER;
1830 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1831 ioa_cfg->dump = NULL;
1832 ioa_cfg->sdt_state = INACTIVE;
1833 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1834
1835 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
1836 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
1837
1838 kfree(dump);
1839 LEAVE;
1840}
1841
1842/**
1843 * ipr_worker_thread - Worker thread
1844 * @data: ioa config struct
1845 *
1846 * Called at task level from a work thread. This function takes care
1847 * of adding and removing device from the mid-layer as configuration
1848 * changes are detected by the adapter.
1849 *
1850 * Return value:
1851 * nothing
1852 **/
1853static void ipr_worker_thread(void *data)
1854{
1855 unsigned long lock_flags;
1856 struct ipr_resource_entry *res;
1857 struct scsi_device *sdev;
1858 struct ipr_dump *dump;
1859 struct ipr_ioa_cfg *ioa_cfg = data;
1860 u8 bus, target, lun;
1861 int did_work;
1862
1863 ENTER;
1864 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1865
1866 if (ioa_cfg->sdt_state == GET_DUMP) {
1867 dump = ioa_cfg->dump;
1868 if (!dump) {
1869 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1870 return;
1871 }
1872 kref_get(&dump->kref);
1873 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1874 ipr_get_ioa_dump(ioa_cfg, dump);
1875 kref_put(&dump->kref, ipr_release_dump);
1876
1877 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1878 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1879 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1880 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1881 return;
1882 }
1883
1884restart:
1885 do {
1886 did_work = 0;
1887 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1888 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1889 return;
1890 }
1891
1892 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1893 if (res->del_from_ml && res->sdev) {
1894 did_work = 1;
1895 sdev = res->sdev;
1896 if (!scsi_device_get(sdev)) {
1897 res->sdev = NULL;
1898 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1900 scsi_remove_device(sdev);
1901 scsi_device_put(sdev);
1902 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1903 }
1904 break;
1905 }
1906 }
1907 } while(did_work);
1908
1909 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1910 if (res->add_to_ml) {
1911 bus = res->cfgte.res_addr.bus;
1912 target = res->cfgte.res_addr.target;
1913 lun = res->cfgte.res_addr.lun;
1914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1915 scsi_add_device(ioa_cfg->host, bus, target, lun);
1916 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1917 goto restart;
1918 }
1919 }
1920
1921 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1922 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
1923 LEAVE;
1924}
1925
1926#ifdef CONFIG_SCSI_IPR_TRACE
1927/**
1928 * ipr_read_trace - Dump the adapter trace
1929 * @kobj: kobject struct
1930 * @buf: buffer
1931 * @off: offset
1932 * @count: buffer size
1933 *
1934 * Return value:
1935 * number of bytes printed to buffer
1936 **/
1937static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1938 loff_t off, size_t count)
1939{
1940 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1941 struct Scsi_Host *shost = class_to_shost(cdev);
1942 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1943 unsigned long lock_flags = 0;
1944 int size = IPR_TRACE_SIZE;
1945 char *src = (char *)ioa_cfg->trace;
1946
1947 if (off > size)
1948 return 0;
1949 if (off + count > size) {
1950 size -= off;
1951 count = size;
1952 }
1953
1954 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1955 memcpy(buf, &src[off], count);
1956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1957 return count;
1958}
1959
1960static struct bin_attribute ipr_trace_attr = {
1961 .attr = {
1962 .name = "trace",
1963 .mode = S_IRUGO,
1964 },
1965 .size = 0,
1966 .read = ipr_read_trace,
1967};
1968#endif
1969
1970/**
1971 * ipr_show_fw_version - Show the firmware version
1972 * @class_dev: class device struct
1973 * @buf: buffer
1974 *
1975 * Return value:
1976 * number of bytes printed to buffer
1977 **/
1978static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
1979{
1980 struct Scsi_Host *shost = class_to_shost(class_dev);
1981 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1982 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1983 unsigned long lock_flags = 0;
1984 int len;
1985
1986 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1987 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
1988 ucode_vpd->major_release, ucode_vpd->card_type,
1989 ucode_vpd->minor_release[0],
1990 ucode_vpd->minor_release[1]);
1991 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1992 return len;
1993}
1994
1995static struct class_device_attribute ipr_fw_version_attr = {
1996 .attr = {
1997 .name = "fw_version",
1998 .mode = S_IRUGO,
1999 },
2000 .show = ipr_show_fw_version,
2001};
2002
2003/**
2004 * ipr_show_log_level - Show the adapter's error logging level
2005 * @class_dev: class device struct
2006 * @buf: buffer
2007 *
2008 * Return value:
2009 * number of bytes printed to buffer
2010 **/
2011static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2012{
2013 struct Scsi_Host *shost = class_to_shost(class_dev);
2014 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2015 unsigned long lock_flags = 0;
2016 int len;
2017
2018 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2019 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2020 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2021 return len;
2022}
2023
2024/**
2025 * ipr_store_log_level - Change the adapter's error logging level
2026 * @class_dev: class device struct
2027 * @buf: buffer
2028 *
2029 * Return value:
2030 * number of bytes printed to buffer
2031 **/
2032static ssize_t ipr_store_log_level(struct class_device *class_dev,
2033 const char *buf, size_t count)
2034{
2035 struct Scsi_Host *shost = class_to_shost(class_dev);
2036 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2037 unsigned long lock_flags = 0;
2038
2039 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2040 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2041 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2042 return strlen(buf);
2043}
2044
2045static struct class_device_attribute ipr_log_level_attr = {
2046 .attr = {
2047 .name = "log_level",
2048 .mode = S_IRUGO | S_IWUSR,
2049 },
2050 .show = ipr_show_log_level,
2051 .store = ipr_store_log_level
2052};
2053
2054/**
2055 * ipr_store_diagnostics - IOA Diagnostics interface
2056 * @class_dev: class_device struct
2057 * @buf: buffer
2058 * @count: buffer size
2059 *
2060 * This function will reset the adapter and wait a reasonable
2061 * amount of time for any errors that the adapter might log.
2062 *
2063 * Return value:
2064 * count on success / other on failure
2065 **/
2066static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2067 const char *buf, size_t count)
2068{
2069 struct Scsi_Host *shost = class_to_shost(class_dev);
2070 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2071 unsigned long lock_flags = 0;
2072 int rc = count;
2073
2074 if (!capable(CAP_SYS_ADMIN))
2075 return -EACCES;
2076
2077 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2078 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2079 ioa_cfg->errors_logged = 0;
2080 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2081
2082 if (ioa_cfg->in_reset_reload) {
2083 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2084 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2085
2086 /* Wait for a second for any errors to be logged */
2087 msleep(1000);
2088 } else {
2089 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2090 return -EIO;
2091 }
2092
2093 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2094 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2095 rc = -EIO;
2096 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2097
2098 return rc;
2099}
2100
2101static struct class_device_attribute ipr_diagnostics_attr = {
2102 .attr = {
2103 .name = "run_diagnostics",
2104 .mode = S_IWUSR,
2105 },
2106 .store = ipr_store_diagnostics
2107};
2108
2109/**
2110 * ipr_store_reset_adapter - Reset the adapter
2111 * @class_dev: class_device struct
2112 * @buf: buffer
2113 * @count: buffer size
2114 *
2115 * This function will reset the adapter.
2116 *
2117 * Return value:
2118 * count on success / other on failure
2119 **/
2120static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2121 const char *buf, size_t count)
2122{
2123 struct Scsi_Host *shost = class_to_shost(class_dev);
2124 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2125 unsigned long lock_flags;
2126 int result = count;
2127
2128 if (!capable(CAP_SYS_ADMIN))
2129 return -EACCES;
2130
2131 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2132 if (!ioa_cfg->in_reset_reload)
2133 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2134 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2135 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2136
2137 return result;
2138}
2139
2140static struct class_device_attribute ipr_ioa_reset_attr = {
2141 .attr = {
2142 .name = "reset_host",
2143 .mode = S_IWUSR,
2144 },
2145 .store = ipr_store_reset_adapter
2146};
2147
2148/**
2149 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2150 * @buf_len: buffer length
2151 *
2152 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2153 * list to use for microcode download
2154 *
2155 * Return value:
2156 * pointer to sglist / NULL on failure
2157 **/
2158static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2159{
2160 int sg_size, order, bsize_elem, num_elem, i, j;
2161 struct ipr_sglist *sglist;
2162 struct scatterlist *scatterlist;
2163 struct page *page;
2164
2165 /* Get the minimum size per scatter/gather element */
2166 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2167
2168 /* Get the actual size per element */
2169 order = get_order(sg_size);
2170
2171 /* Determine the actual number of bytes per element */
2172 bsize_elem = PAGE_SIZE * (1 << order);
2173
2174 /* Determine the actual number of sg entries needed */
2175 if (buf_len % bsize_elem)
2176 num_elem = (buf_len / bsize_elem) + 1;
2177 else
2178 num_elem = buf_len / bsize_elem;
2179
2180 /* Allocate a scatter/gather list for the DMA */
2181 sglist = kmalloc(sizeof(struct ipr_sglist) +
2182 (sizeof(struct scatterlist) * (num_elem - 1)),
2183 GFP_KERNEL);
2184
2185 if (sglist == NULL) {
2186 ipr_trace;
2187 return NULL;
2188 }
2189
2190 memset(sglist, 0, sizeof(struct ipr_sglist) +
2191 (sizeof(struct scatterlist) * (num_elem - 1)));
2192
2193 scatterlist = sglist->scatterlist;
2194
2195 sglist->order = order;
2196 sglist->num_sg = num_elem;
2197
2198 /* Allocate a bunch of sg elements */
2199 for (i = 0; i < num_elem; i++) {
2200 page = alloc_pages(GFP_KERNEL, order);
2201 if (!page) {
2202 ipr_trace;
2203
2204 /* Free up what we already allocated */
2205 for (j = i - 1; j >= 0; j--)
2206 __free_pages(scatterlist[j].page, order);
2207 kfree(sglist);
2208 return NULL;
2209 }
2210
2211 scatterlist[i].page = page;
2212 }
2213
2214 return sglist;
2215}
2216
2217/**
2218 * ipr_free_ucode_buffer - Frees a microcode download buffer
2219 * @p_dnld: scatter/gather list pointer
2220 *
2221 * Free a DMA'able ucode download buffer previously allocated with
2222 * ipr_alloc_ucode_buffer
2223 *
2224 * Return value:
2225 * nothing
2226 **/
2227static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2228{
2229 int i;
2230
2231 for (i = 0; i < sglist->num_sg; i++)
2232 __free_pages(sglist->scatterlist[i].page, sglist->order);
2233
2234 kfree(sglist);
2235}
2236
2237/**
2238 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2239 * @sglist: scatter/gather list pointer
2240 * @buffer: buffer pointer
2241 * @len: buffer length
2242 *
2243 * Copy a microcode image from a user buffer into a buffer allocated by
2244 * ipr_alloc_ucode_buffer
2245 *
2246 * Return value:
2247 * 0 on success / other on failure
2248 **/
2249static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2250 u8 *buffer, u32 len)
2251{
2252 int bsize_elem, i, result = 0;
2253 struct scatterlist *scatterlist;
2254 void *kaddr;
2255
2256 /* Determine the actual number of bytes per element */
2257 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2258
2259 scatterlist = sglist->scatterlist;
2260
2261 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2262 kaddr = kmap(scatterlist[i].page);
2263 memcpy(kaddr, buffer, bsize_elem);
2264 kunmap(scatterlist[i].page);
2265
2266 scatterlist[i].length = bsize_elem;
2267
2268 if (result != 0) {
2269 ipr_trace;
2270 return result;
2271 }
2272 }
2273
2274 if (len % bsize_elem) {
2275 kaddr = kmap(scatterlist[i].page);
2276 memcpy(kaddr, buffer, len % bsize_elem);
2277 kunmap(scatterlist[i].page);
2278
2279 scatterlist[i].length = len % bsize_elem;
2280 }
2281
2282 sglist->buffer_len = len;
2283 return result;
2284}
2285
2286/**
2287 * ipr_map_ucode_buffer - Map a microcode download buffer
2288 * @ipr_cmd: ipr command struct
2289 * @sglist: scatter/gather list
2290 * @len: total length of download buffer
2291 *
2292 * Maps a microcode download scatter/gather list for DMA and
2293 * builds the IOADL.
2294 *
2295 * Return value:
2296 * 0 on success / -EIO on failure
2297 **/
2298static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd,
2299 struct ipr_sglist *sglist, int len)
2300{
2301 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2302 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2303 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2304 struct scatterlist *scatterlist = sglist->scatterlist;
2305 int i;
2306
2307 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist,
2308 sglist->num_sg, DMA_TO_DEVICE);
2309
2310 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2311 ioarcb->write_data_transfer_length = cpu_to_be32(len);
2312 ioarcb->write_ioadl_len =
2313 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2314
2315 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2316 ioadl[i].flags_and_data_len =
2317 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2318 ioadl[i].address =
2319 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2320 }
2321
2322 if (likely(ipr_cmd->dma_use_sg)) {
2323 ioadl[i-1].flags_and_data_len |=
2324 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2325 }
2326 else {
2327 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
2328 return -EIO;
2329 }
2330
2331 return 0;
2332}
2333
2334/**
2335 * ipr_store_update_fw - Update the firmware on the adapter
2336 * @class_dev: class_device struct
2337 * @buf: buffer
2338 * @count: buffer size
2339 *
2340 * This function will update the firmware on the adapter.
2341 *
2342 * Return value:
2343 * count on success / other on failure
2344 **/
2345static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2346 const char *buf, size_t count)
2347{
2348 struct Scsi_Host *shost = class_to_shost(class_dev);
2349 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2350 struct ipr_ucode_image_header *image_hdr;
2351 const struct firmware *fw_entry;
2352 struct ipr_sglist *sglist;
2353 unsigned long lock_flags;
2354 char fname[100];
2355 char *src;
2356 int len, result, dnld_size;
2357
2358 if (!capable(CAP_SYS_ADMIN))
2359 return -EACCES;
2360
2361 len = snprintf(fname, 99, "%s", buf);
2362 fname[len-1] = '\0';
2363
2364 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2365 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2366 return -EIO;
2367 }
2368
2369 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2370
2371 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2372 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2373 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2374 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2375 release_firmware(fw_entry);
2376 return -EINVAL;
2377 }
2378
2379 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2380 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2381 sglist = ipr_alloc_ucode_buffer(dnld_size);
2382
2383 if (!sglist) {
2384 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2385 release_firmware(fw_entry);
2386 return -ENOMEM;
2387 }
2388
2389 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2390
2391 if (result) {
2392 dev_err(&ioa_cfg->pdev->dev,
2393 "Microcode buffer copy to DMA buffer failed\n");
2394 ipr_free_ucode_buffer(sglist);
2395 release_firmware(fw_entry);
2396 return result;
2397 }
2398
2399 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2400
2401 if (ioa_cfg->ucode_sglist) {
2402 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2403 dev_err(&ioa_cfg->pdev->dev,
2404 "Microcode download already in progress\n");
2405 ipr_free_ucode_buffer(sglist);
2406 release_firmware(fw_entry);
2407 return -EIO;
2408 }
2409
2410 ioa_cfg->ucode_sglist = sglist;
2411 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2412 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2413 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2414
2415 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2416 ioa_cfg->ucode_sglist = NULL;
2417 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2418
2419 ipr_free_ucode_buffer(sglist);
2420 release_firmware(fw_entry);
2421
2422 return count;
2423}
2424
2425static struct class_device_attribute ipr_update_fw_attr = {
2426 .attr = {
2427 .name = "update_fw",
2428 .mode = S_IWUSR,
2429 },
2430 .store = ipr_store_update_fw
2431};
2432
2433static struct class_device_attribute *ipr_ioa_attrs[] = {
2434 &ipr_fw_version_attr,
2435 &ipr_log_level_attr,
2436 &ipr_diagnostics_attr,
2437 &ipr_ioa_reset_attr,
2438 &ipr_update_fw_attr,
2439 NULL,
2440};
2441
2442#ifdef CONFIG_SCSI_IPR_DUMP
2443/**
2444 * ipr_read_dump - Dump the adapter
2445 * @kobj: kobject struct
2446 * @buf: buffer
2447 * @off: offset
2448 * @count: buffer size
2449 *
2450 * Return value:
2451 * number of bytes printed to buffer
2452 **/
2453static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2454 loff_t off, size_t count)
2455{
2456 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2457 struct Scsi_Host *shost = class_to_shost(cdev);
2458 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2459 struct ipr_dump *dump;
2460 unsigned long lock_flags = 0;
2461 char *src;
2462 int len;
2463 size_t rc = count;
2464
2465 if (!capable(CAP_SYS_ADMIN))
2466 return -EACCES;
2467
2468 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2469 dump = ioa_cfg->dump;
2470
2471 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2472 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2473 return 0;
2474 }
2475 kref_get(&dump->kref);
2476 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2477
2478 if (off > dump->driver_dump.hdr.len) {
2479 kref_put(&dump->kref, ipr_release_dump);
2480 return 0;
2481 }
2482
2483 if (off + count > dump->driver_dump.hdr.len) {
2484 count = dump->driver_dump.hdr.len - off;
2485 rc = count;
2486 }
2487
2488 if (count && off < sizeof(dump->driver_dump)) {
2489 if (off + count > sizeof(dump->driver_dump))
2490 len = sizeof(dump->driver_dump) - off;
2491 else
2492 len = count;
2493 src = (u8 *)&dump->driver_dump + off;
2494 memcpy(buf, src, len);
2495 buf += len;
2496 off += len;
2497 count -= len;
2498 }
2499
2500 off -= sizeof(dump->driver_dump);
2501
2502 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2503 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2504 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2505 else
2506 len = count;
2507 src = (u8 *)&dump->ioa_dump + off;
2508 memcpy(buf, src, len);
2509 buf += len;
2510 off += len;
2511 count -= len;
2512 }
2513
2514 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2515
2516 while (count) {
2517 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2518 len = PAGE_ALIGN(off) - off;
2519 else
2520 len = count;
2521 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2522 src += off & ~PAGE_MASK;
2523 memcpy(buf, src, len);
2524 buf += len;
2525 off += len;
2526 count -= len;
2527 }
2528
2529 kref_put(&dump->kref, ipr_release_dump);
2530 return rc;
2531}
2532
2533/**
2534 * ipr_alloc_dump - Prepare for adapter dump
2535 * @ioa_cfg: ioa config struct
2536 *
2537 * Return value:
2538 * 0 on success / other on failure
2539 **/
2540static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2541{
2542 struct ipr_dump *dump;
2543 unsigned long lock_flags = 0;
2544
2545 ENTER;
2546 dump = kmalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2547
2548 if (!dump) {
2549 ipr_err("Dump memory allocation failed\n");
2550 return -ENOMEM;
2551 }
2552
2553 memset(dump, 0, sizeof(struct ipr_dump));
2554 kref_init(&dump->kref);
2555 dump->ioa_cfg = ioa_cfg;
2556
2557 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2558
2559 if (INACTIVE != ioa_cfg->sdt_state) {
2560 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2561 kfree(dump);
2562 return 0;
2563 }
2564
2565 ioa_cfg->dump = dump;
2566 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2567 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2568 ioa_cfg->dump_taken = 1;
2569 schedule_work(&ioa_cfg->work_q);
2570 }
2571 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2572
2573 LEAVE;
2574 return 0;
2575}
2576
2577/**
2578 * ipr_free_dump - Free adapter dump memory
2579 * @ioa_cfg: ioa config struct
2580 *
2581 * Return value:
2582 * 0 on success / other on failure
2583 **/
2584static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2585{
2586 struct ipr_dump *dump;
2587 unsigned long lock_flags = 0;
2588
2589 ENTER;
2590
2591 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2592 dump = ioa_cfg->dump;
2593 if (!dump) {
2594 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2595 return 0;
2596 }
2597
2598 ioa_cfg->dump = NULL;
2599 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2600
2601 kref_put(&dump->kref, ipr_release_dump);
2602
2603 LEAVE;
2604 return 0;
2605}
2606
2607/**
2608 * ipr_write_dump - Setup dump state of adapter
2609 * @kobj: kobject struct
2610 * @buf: buffer
2611 * @off: offset
2612 * @count: buffer size
2613 *
2614 * Return value:
2615 * number of bytes printed to buffer
2616 **/
2617static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2618 loff_t off, size_t count)
2619{
2620 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2621 struct Scsi_Host *shost = class_to_shost(cdev);
2622 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2623 int rc;
2624
2625 if (!capable(CAP_SYS_ADMIN))
2626 return -EACCES;
2627
2628 if (buf[0] == '1')
2629 rc = ipr_alloc_dump(ioa_cfg);
2630 else if (buf[0] == '0')
2631 rc = ipr_free_dump(ioa_cfg);
2632 else
2633 return -EINVAL;
2634
2635 if (rc)
2636 return rc;
2637 else
2638 return count;
2639}
2640
2641static struct bin_attribute ipr_dump_attr = {
2642 .attr = {
2643 .name = "dump",
2644 .mode = S_IRUSR | S_IWUSR,
2645 },
2646 .size = 0,
2647 .read = ipr_read_dump,
2648 .write = ipr_write_dump
2649};
2650#else
2651static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2652#endif
2653
2654/**
2655 * ipr_change_queue_depth - Change the device's queue depth
2656 * @sdev: scsi device struct
2657 * @qdepth: depth to set
2658 *
2659 * Return value:
2660 * actual depth set
2661 **/
2662static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
2663{
2664 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2665 return sdev->queue_depth;
2666}
2667
2668/**
2669 * ipr_change_queue_type - Change the device's queue type
2670 * @dsev: scsi device struct
2671 * @tag_type: type of tags to use
2672 *
2673 * Return value:
2674 * actual queue type set
2675 **/
2676static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
2677{
2678 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2679 struct ipr_resource_entry *res;
2680 unsigned long lock_flags = 0;
2681
2682 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2683 res = (struct ipr_resource_entry *)sdev->hostdata;
2684
2685 if (res) {
2686 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2687 /*
2688 * We don't bother quiescing the device here since the
2689 * adapter firmware does it for us.
2690 */
2691 scsi_set_tag_type(sdev, tag_type);
2692
2693 if (tag_type)
2694 scsi_activate_tcq(sdev, sdev->queue_depth);
2695 else
2696 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2697 } else
2698 tag_type = 0;
2699 } else
2700 tag_type = 0;
2701
2702 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2703 return tag_type;
2704}
2705
2706/**
2707 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2708 * @dev: device struct
2709 * @buf: buffer
2710 *
2711 * Return value:
2712 * number of bytes printed to buffer
2713 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04002714static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715{
2716 struct scsi_device *sdev = to_scsi_device(dev);
2717 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2718 struct ipr_resource_entry *res;
2719 unsigned long lock_flags = 0;
2720 ssize_t len = -ENXIO;
2721
2722 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2723 res = (struct ipr_resource_entry *)sdev->hostdata;
2724 if (res)
2725 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2726 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2727 return len;
2728}
2729
2730static struct device_attribute ipr_adapter_handle_attr = {
2731 .attr = {
2732 .name = "adapter_handle",
2733 .mode = S_IRUSR,
2734 },
2735 .show = ipr_show_adapter_handle
2736};
2737
2738static struct device_attribute *ipr_dev_attrs[] = {
2739 &ipr_adapter_handle_attr,
2740 NULL,
2741};
2742
2743/**
2744 * ipr_biosparam - Return the HSC mapping
2745 * @sdev: scsi device struct
2746 * @block_device: block device pointer
2747 * @capacity: capacity of the device
2748 * @parm: Array containing returned HSC values.
2749 *
2750 * This function generates the HSC parms that fdisk uses.
2751 * We want to make sure we return something that places partitions
2752 * on 4k boundaries for best performance with the IOA.
2753 *
2754 * Return value:
2755 * 0 on success
2756 **/
2757static int ipr_biosparam(struct scsi_device *sdev,
2758 struct block_device *block_device,
2759 sector_t capacity, int *parm)
2760{
2761 int heads, sectors;
2762 sector_t cylinders;
2763
2764 heads = 128;
2765 sectors = 32;
2766
2767 cylinders = capacity;
2768 sector_div(cylinders, (128 * 32));
2769
2770 /* return result */
2771 parm[0] = heads;
2772 parm[1] = sectors;
2773 parm[2] = cylinders;
2774
2775 return 0;
2776}
2777
2778/**
2779 * ipr_slave_destroy - Unconfigure a SCSI device
2780 * @sdev: scsi device struct
2781 *
2782 * Return value:
2783 * nothing
2784 **/
2785static void ipr_slave_destroy(struct scsi_device *sdev)
2786{
2787 struct ipr_resource_entry *res;
2788 struct ipr_ioa_cfg *ioa_cfg;
2789 unsigned long lock_flags = 0;
2790
2791 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2792
2793 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2794 res = (struct ipr_resource_entry *) sdev->hostdata;
2795 if (res) {
2796 sdev->hostdata = NULL;
2797 res->sdev = NULL;
2798 }
2799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2800}
2801
2802/**
2803 * ipr_slave_configure - Configure a SCSI device
2804 * @sdev: scsi device struct
2805 *
2806 * This function configures the specified scsi device.
2807 *
2808 * Return value:
2809 * 0 on success
2810 **/
2811static int ipr_slave_configure(struct scsi_device *sdev)
2812{
2813 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2814 struct ipr_resource_entry *res;
2815 unsigned long lock_flags = 0;
2816
2817 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2818 res = sdev->hostdata;
2819 if (res) {
2820 if (ipr_is_af_dasd_device(res))
2821 sdev->type = TYPE_RAID;
2822 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res))
2823 sdev->scsi_level = 4;
2824 if (ipr_is_vset_device(res)) {
2825 sdev->timeout = IPR_VSET_RW_TIMEOUT;
2826 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
2827 }
2828 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
2829 sdev->allow_restart = 1;
2830 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
2831 }
2832 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2833 return 0;
2834}
2835
2836/**
2837 * ipr_slave_alloc - Prepare for commands to a device.
2838 * @sdev: scsi device struct
2839 *
2840 * This function saves a pointer to the resource entry
2841 * in the scsi device struct if the device exists. We
2842 * can then use this pointer in ipr_queuecommand when
2843 * handling new commands.
2844 *
2845 * Return value:
2846 * 0 on success
2847 **/
2848static int ipr_slave_alloc(struct scsi_device *sdev)
2849{
2850 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2851 struct ipr_resource_entry *res;
2852 unsigned long lock_flags;
2853
2854 sdev->hostdata = NULL;
2855
2856 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2857
2858 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2859 if ((res->cfgte.res_addr.bus == sdev->channel) &&
2860 (res->cfgte.res_addr.target == sdev->id) &&
2861 (res->cfgte.res_addr.lun == sdev->lun)) {
2862 res->sdev = sdev;
2863 res->add_to_ml = 0;
2864 res->in_erp = 0;
2865 sdev->hostdata = res;
2866 res->needs_sync_complete = 1;
2867 break;
2868 }
2869 }
2870
2871 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2872
2873 return 0;
2874}
2875
2876/**
2877 * ipr_eh_host_reset - Reset the host adapter
2878 * @scsi_cmd: scsi command struct
2879 *
2880 * Return value:
2881 * SUCCESS / FAILED
2882 **/
Jeff Garzik df0ae242005-05-28 07:57:14 -04002883static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884{
2885 struct ipr_ioa_cfg *ioa_cfg;
2886 int rc;
2887
2888 ENTER;
2889 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2890
2891 dev_err(&ioa_cfg->pdev->dev,
2892 "Adapter being reset as a result of error recovery.\n");
2893
2894 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2895 ioa_cfg->sdt_state = GET_DUMP;
2896
2897 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2898
2899 LEAVE;
2900 return rc;
2901}
2902
Jeff Garzik df0ae242005-05-28 07:57:14 -04002903static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
2904{
2905 int rc;
2906
2907 spin_lock_irq(cmd->device->host->host_lock);
2908 rc = __ipr_eh_host_reset(cmd);
2909 spin_unlock_irq(cmd->device->host->host_lock);
2910
2911 return rc;
2912}
2913
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914/**
2915 * ipr_eh_dev_reset - Reset the device
2916 * @scsi_cmd: scsi command struct
2917 *
2918 * This function issues a device reset to the affected device.
2919 * A LUN reset will be sent to the device first. If that does
2920 * not work, a target reset will be sent.
2921 *
2922 * Return value:
2923 * SUCCESS / FAILED
2924 **/
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04002925static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926{
2927 struct ipr_cmnd *ipr_cmd;
2928 struct ipr_ioa_cfg *ioa_cfg;
2929 struct ipr_resource_entry *res;
2930 struct ipr_cmd_pkt *cmd_pkt;
2931 u32 ioasc;
2932
2933 ENTER;
2934 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2935 res = scsi_cmd->device->hostdata;
2936
2937 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
2938 return FAILED;
2939
2940 /*
2941 * If we are currently going through reset/reload, return failed. This will force the
2942 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
2943 * reset to complete
2944 */
2945 if (ioa_cfg->in_reset_reload)
2946 return FAILED;
2947 if (ioa_cfg->ioa_is_dead)
2948 return FAILED;
2949
2950 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
2951 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
2952 if (ipr_cmd->scsi_cmd)
2953 ipr_cmd->done = ipr_scsi_eh_done;
2954 }
2955 }
2956
2957 res->resetting_device = 1;
2958
2959 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
2960
2961 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
2962 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
2963 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
2964 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
2965
2966 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
2967 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
2968
2969 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
2970
2971 res->resetting_device = 0;
2972
2973 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2974
2975 LEAVE;
2976 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
2977}
2978
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04002979static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
2980{
2981 int rc;
2982
2983 spin_lock_irq(cmd->device->host->host_lock);
2984 rc = __ipr_eh_dev_reset(cmd);
2985 spin_unlock_irq(cmd->device->host->host_lock);
2986
2987 return rc;
2988}
2989
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990/**
2991 * ipr_bus_reset_done - Op done function for bus reset.
2992 * @ipr_cmd: ipr command struct
2993 *
2994 * This function is the op done function for a bus reset
2995 *
2996 * Return value:
2997 * none
2998 **/
2999static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3000{
3001 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3002 struct ipr_resource_entry *res;
3003
3004 ENTER;
3005 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3006 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3007 sizeof(res->cfgte.res_handle))) {
3008 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3009 break;
3010 }
3011 }
3012
3013 /*
3014 * If abort has not completed, indicate the reset has, else call the
3015 * abort's done function to wake the sleeping eh thread
3016 */
3017 if (ipr_cmd->sibling->sibling)
3018 ipr_cmd->sibling->sibling = NULL;
3019 else
3020 ipr_cmd->sibling->done(ipr_cmd->sibling);
3021
3022 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3023 LEAVE;
3024}
3025
3026/**
3027 * ipr_abort_timeout - An abort task has timed out
3028 * @ipr_cmd: ipr command struct
3029 *
3030 * This function handles when an abort task times out. If this
3031 * happens we issue a bus reset since we have resources tied
3032 * up that must be freed before returning to the midlayer.
3033 *
3034 * Return value:
3035 * none
3036 **/
3037static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3038{
3039 struct ipr_cmnd *reset_cmd;
3040 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3041 struct ipr_cmd_pkt *cmd_pkt;
3042 unsigned long lock_flags = 0;
3043
3044 ENTER;
3045 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3046 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3048 return;
3049 }
3050
3051 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3052 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3053 ipr_cmd->sibling = reset_cmd;
3054 reset_cmd->sibling = ipr_cmd;
3055 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3056 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3057 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3058 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3059 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3060
3061 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3063 LEAVE;
3064}
3065
3066/**
3067 * ipr_cancel_op - Cancel specified op
3068 * @scsi_cmd: scsi command struct
3069 *
3070 * This function cancels specified op.
3071 *
3072 * Return value:
3073 * SUCCESS / FAILED
3074 **/
3075static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3076{
3077 struct ipr_cmnd *ipr_cmd;
3078 struct ipr_ioa_cfg *ioa_cfg;
3079 struct ipr_resource_entry *res;
3080 struct ipr_cmd_pkt *cmd_pkt;
3081 u32 ioasc;
3082 int op_found = 0;
3083
3084 ENTER;
3085 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3086 res = scsi_cmd->device->hostdata;
3087
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003088 /* If we are currently going through reset/reload, return failed.
3089 * This will force the mid-layer to call ipr_eh_host_reset,
3090 * which will then go to sleep and wait for the reset to complete
3091 */
3092 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3093 return FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3095 return FAILED;
3096
3097 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3098 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3099 ipr_cmd->done = ipr_scsi_eh_done;
3100 op_found = 1;
3101 break;
3102 }
3103 }
3104
3105 if (!op_found)
3106 return SUCCESS;
3107
3108 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3109 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3110 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3111 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3112 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3113 ipr_cmd->u.sdev = scsi_cmd->device;
3114
3115 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3116 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3117 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3118
3119 /*
3120 * If the abort task timed out and we sent a bus reset, we will get
3121 * one the following responses to the abort
3122 */
3123 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3124 ioasc = 0;
3125 ipr_trace;
3126 }
3127
3128 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3129 res->needs_sync_complete = 1;
3130
3131 LEAVE;
3132 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3133}
3134
3135/**
3136 * ipr_eh_abort - Abort a single op
3137 * @scsi_cmd: scsi command struct
3138 *
3139 * Return value:
3140 * SUCCESS / FAILED
3141 **/
3142static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3143{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003144 unsigned long flags;
3145 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146
3147 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003149 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3150 rc = ipr_cancel_op(scsi_cmd);
3151 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152
3153 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003154 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155}
3156
3157/**
3158 * ipr_handle_other_interrupt - Handle "other" interrupts
3159 * @ioa_cfg: ioa config struct
3160 * @int_reg: interrupt register
3161 *
3162 * Return value:
3163 * IRQ_NONE / IRQ_HANDLED
3164 **/
3165static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3166 volatile u32 int_reg)
3167{
3168 irqreturn_t rc = IRQ_HANDLED;
3169
3170 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3171 /* Mask the interrupt */
3172 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3173
3174 /* Clear the interrupt */
3175 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3176 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3177
3178 list_del(&ioa_cfg->reset_cmd->queue);
3179 del_timer(&ioa_cfg->reset_cmd->timer);
3180 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3181 } else {
3182 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3183 ioa_cfg->ioa_unit_checked = 1;
3184 else
3185 dev_err(&ioa_cfg->pdev->dev,
3186 "Permanent IOA failure. 0x%08X\n", int_reg);
3187
3188 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3189 ioa_cfg->sdt_state = GET_DUMP;
3190
3191 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3192 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3193 }
3194
3195 return rc;
3196}
3197
3198/**
3199 * ipr_isr - Interrupt service routine
3200 * @irq: irq number
3201 * @devp: pointer to ioa config struct
3202 * @regs: pt_regs struct
3203 *
3204 * Return value:
3205 * IRQ_NONE / IRQ_HANDLED
3206 **/
3207static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3208{
3209 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3210 unsigned long lock_flags = 0;
3211 volatile u32 int_reg, int_mask_reg;
3212 u32 ioasc;
3213 u16 cmd_index;
3214 struct ipr_cmnd *ipr_cmd;
3215 irqreturn_t rc = IRQ_NONE;
3216
3217 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3218
3219 /* If interrupts are disabled, ignore the interrupt */
3220 if (!ioa_cfg->allow_interrupts) {
3221 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3222 return IRQ_NONE;
3223 }
3224
3225 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3226 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3227
3228 /* If an interrupt on the adapter did not occur, ignore it */
3229 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3231 return IRQ_NONE;
3232 }
3233
3234 while (1) {
3235 ipr_cmd = NULL;
3236
3237 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3238 ioa_cfg->toggle_bit) {
3239
3240 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3241 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3242
3243 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3244 ioa_cfg->errors_logged++;
3245 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3246
3247 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3248 ioa_cfg->sdt_state = GET_DUMP;
3249
3250 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3251 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3252 return IRQ_HANDLED;
3253 }
3254
3255 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3256
3257 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3258
3259 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3260
3261 list_del(&ipr_cmd->queue);
3262 del_timer(&ipr_cmd->timer);
3263 ipr_cmd->done(ipr_cmd);
3264
3265 rc = IRQ_HANDLED;
3266
3267 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3268 ioa_cfg->hrrq_curr++;
3269 } else {
3270 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3271 ioa_cfg->toggle_bit ^= 1u;
3272 }
3273 }
3274
3275 if (ipr_cmd != NULL) {
3276 /* Clear the PCI interrupt */
3277 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3278 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3279 } else
3280 break;
3281 }
3282
3283 if (unlikely(rc == IRQ_NONE))
3284 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3285
3286 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3287 return rc;
3288}
3289
3290/**
3291 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3292 * @ioa_cfg: ioa config struct
3293 * @ipr_cmd: ipr command struct
3294 *
3295 * Return value:
3296 * 0 on success / -1 on failure
3297 **/
3298static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3299 struct ipr_cmnd *ipr_cmd)
3300{
3301 int i;
3302 struct scatterlist *sglist;
3303 u32 length;
3304 u32 ioadl_flags = 0;
3305 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3306 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3307 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3308
3309 length = scsi_cmd->request_bufflen;
3310
3311 if (length == 0)
3312 return 0;
3313
3314 if (scsi_cmd->use_sg) {
3315 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3316 scsi_cmd->request_buffer,
3317 scsi_cmd->use_sg,
3318 scsi_cmd->sc_data_direction);
3319
3320 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3321 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3322 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3323 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3324 ioarcb->write_ioadl_len =
3325 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3326 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3327 ioadl_flags = IPR_IOADL_FLAGS_READ;
3328 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3329 ioarcb->read_ioadl_len =
3330 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3331 }
3332
3333 sglist = scsi_cmd->request_buffer;
3334
3335 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3336 ioadl[i].flags_and_data_len =
3337 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3338 ioadl[i].address =
3339 cpu_to_be32(sg_dma_address(&sglist[i]));
3340 }
3341
3342 if (likely(ipr_cmd->dma_use_sg)) {
3343 ioadl[i-1].flags_and_data_len |=
3344 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3345 return 0;
3346 } else
3347 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3348 } else {
3349 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3350 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3351 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3352 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3353 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3354 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3355 ioadl_flags = IPR_IOADL_FLAGS_READ;
3356 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3357 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3358 }
3359
3360 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3361 scsi_cmd->request_buffer, length,
3362 scsi_cmd->sc_data_direction);
3363
3364 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3365 ipr_cmd->dma_use_sg = 1;
3366 ioadl[0].flags_and_data_len =
3367 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3368 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3369 return 0;
3370 } else
3371 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3372 }
3373
3374 return -1;
3375}
3376
3377/**
3378 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3379 * @scsi_cmd: scsi command struct
3380 *
3381 * Return value:
3382 * task attributes
3383 **/
3384static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3385{
3386 u8 tag[2];
3387 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3388
3389 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3390 switch (tag[0]) {
3391 case MSG_SIMPLE_TAG:
3392 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3393 break;
3394 case MSG_HEAD_TAG:
3395 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3396 break;
3397 case MSG_ORDERED_TAG:
3398 rc = IPR_FLAGS_LO_ORDERED_TASK;
3399 break;
3400 };
3401 }
3402
3403 return rc;
3404}
3405
3406/**
3407 * ipr_erp_done - Process completion of ERP for a device
3408 * @ipr_cmd: ipr command struct
3409 *
3410 * This function copies the sense buffer into the scsi_cmd
3411 * struct and pushes the scsi_done function.
3412 *
3413 * Return value:
3414 * nothing
3415 **/
3416static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3417{
3418 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3419 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3421 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3422
3423 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3424 scsi_cmd->result |= (DID_ERROR << 16);
3425 ipr_sdev_err(scsi_cmd->device,
3426 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3427 } else {
3428 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3429 SCSI_SENSE_BUFFERSIZE);
3430 }
3431
3432 if (res) {
3433 res->needs_sync_complete = 1;
3434 res->in_erp = 0;
3435 }
3436 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3437 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3438 scsi_cmd->scsi_done(scsi_cmd);
3439}
3440
3441/**
3442 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3443 * @ipr_cmd: ipr command struct
3444 *
3445 * Return value:
3446 * none
3447 **/
3448static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3449{
3450 struct ipr_ioarcb *ioarcb;
3451 struct ipr_ioasa *ioasa;
3452
3453 ioarcb = &ipr_cmd->ioarcb;
3454 ioasa = &ipr_cmd->ioasa;
3455
3456 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3457 ioarcb->write_data_transfer_length = 0;
3458 ioarcb->read_data_transfer_length = 0;
3459 ioarcb->write_ioadl_len = 0;
3460 ioarcb->read_ioadl_len = 0;
3461 ioasa->ioasc = 0;
3462 ioasa->residual_data_len = 0;
3463}
3464
3465/**
3466 * ipr_erp_request_sense - Send request sense to a device
3467 * @ipr_cmd: ipr command struct
3468 *
3469 * This function sends a request sense to a device as a result
3470 * of a check condition.
3471 *
3472 * Return value:
3473 * nothing
3474 **/
3475static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3476{
3477 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3478 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3479
3480 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3481 ipr_erp_done(ipr_cmd);
3482 return;
3483 }
3484
3485 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3486
3487 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3488 cmd_pkt->cdb[0] = REQUEST_SENSE;
3489 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3490 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3491 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3492 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3493
3494 ipr_cmd->ioadl[0].flags_and_data_len =
3495 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3496 ipr_cmd->ioadl[0].address =
3497 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3498
3499 ipr_cmd->ioarcb.read_ioadl_len =
3500 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3501 ipr_cmd->ioarcb.read_data_transfer_length =
3502 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3503
3504 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3505 IPR_REQUEST_SENSE_TIMEOUT * 2);
3506}
3507
3508/**
3509 * ipr_erp_cancel_all - Send cancel all to a device
3510 * @ipr_cmd: ipr command struct
3511 *
3512 * This function sends a cancel all to a device to clear the
3513 * queue. If we are running TCQ on the device, QERR is set to 1,
3514 * which means all outstanding ops have been dropped on the floor.
3515 * Cancel all will return them to us.
3516 *
3517 * Return value:
3518 * nothing
3519 **/
3520static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3521{
3522 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3523 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3524 struct ipr_cmd_pkt *cmd_pkt;
3525
3526 res->in_erp = 1;
3527
3528 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3529
3530 if (!scsi_get_tag_type(scsi_cmd->device)) {
3531 ipr_erp_request_sense(ipr_cmd);
3532 return;
3533 }
3534
3535 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3536 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3537 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3538
3539 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3540 IPR_CANCEL_ALL_TIMEOUT);
3541}
3542
3543/**
3544 * ipr_dump_ioasa - Dump contents of IOASA
3545 * @ioa_cfg: ioa config struct
3546 * @ipr_cmd: ipr command struct
3547 *
3548 * This function is invoked by the interrupt handler when ops
3549 * fail. It will log the IOASA if appropriate. Only called
3550 * for GPDD ops.
3551 *
3552 * Return value:
3553 * none
3554 **/
3555static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3556 struct ipr_cmnd *ipr_cmd)
3557{
3558 int i;
3559 u16 data_len;
3560 u32 ioasc;
3561 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3562 __be32 *ioasa_data = (__be32 *)ioasa;
3563 int error_index;
3564
3565 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3566
3567 if (0 == ioasc)
3568 return;
3569
3570 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3571 return;
3572
3573 error_index = ipr_get_error(ioasc);
3574
3575 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3576 /* Don't log an error if the IOA already logged one */
3577 if (ioasa->ilid != 0)
3578 return;
3579
3580 if (ipr_error_table[error_index].log_ioasa == 0)
3581 return;
3582 }
3583
3584 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3585 ipr_error_table[error_index].error);
3586
3587 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3588 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3589 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3590 "Device End state: %s Phase: %s\n",
3591 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3592 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3593 }
3594
3595 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3596 data_len = sizeof(struct ipr_ioasa);
3597 else
3598 data_len = be16_to_cpu(ioasa->ret_stat_len);
3599
3600 ipr_err("IOASA Dump:\n");
3601
3602 for (i = 0; i < data_len / 4; i += 4) {
3603 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3604 be32_to_cpu(ioasa_data[i]),
3605 be32_to_cpu(ioasa_data[i+1]),
3606 be32_to_cpu(ioasa_data[i+2]),
3607 be32_to_cpu(ioasa_data[i+3]));
3608 }
3609}
3610
3611/**
3612 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3613 * @ioasa: IOASA
3614 * @sense_buf: sense data buffer
3615 *
3616 * Return value:
3617 * none
3618 **/
3619static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3620{
3621 u32 failing_lba;
3622 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3623 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3624 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3625 u32 ioasc = be32_to_cpu(ioasa->ioasc);
3626
3627 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3628
3629 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3630 return;
3631
3632 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3633
3634 if (ipr_is_vset_device(res) &&
3635 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3636 ioasa->u.vset.failing_lba_hi != 0) {
3637 sense_buf[0] = 0x72;
3638 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3639 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3640 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3641
3642 sense_buf[7] = 12;
3643 sense_buf[8] = 0;
3644 sense_buf[9] = 0x0A;
3645 sense_buf[10] = 0x80;
3646
3647 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3648
3649 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3650 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3651 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3652 sense_buf[15] = failing_lba & 0x000000ff;
3653
3654 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3655
3656 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3657 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3658 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3659 sense_buf[19] = failing_lba & 0x000000ff;
3660 } else {
3661 sense_buf[0] = 0x70;
3662 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3663 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3664 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3665
3666 /* Illegal request */
3667 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3668 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3669 sense_buf[7] = 10; /* additional length */
3670
3671 /* IOARCB was in error */
3672 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3673 sense_buf[15] = 0xC0;
3674 else /* Parameter data was invalid */
3675 sense_buf[15] = 0x80;
3676
3677 sense_buf[16] =
3678 ((IPR_FIELD_POINTER_MASK &
3679 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3680 sense_buf[17] =
3681 (IPR_FIELD_POINTER_MASK &
3682 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3683 } else {
3684 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3685 if (ipr_is_vset_device(res))
3686 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3687 else
3688 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3689
3690 sense_buf[0] |= 0x80; /* Or in the Valid bit */
3691 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3692 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3693 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3694 sense_buf[6] = failing_lba & 0x000000ff;
3695 }
3696
3697 sense_buf[7] = 6; /* additional length */
3698 }
3699 }
3700}
3701
3702/**
3703 * ipr_erp_start - Process an error response for a SCSI op
3704 * @ioa_cfg: ioa config struct
3705 * @ipr_cmd: ipr command struct
3706 *
3707 * This function determines whether or not to initiate ERP
3708 * on the affected device.
3709 *
3710 * Return value:
3711 * nothing
3712 **/
3713static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3714 struct ipr_cmnd *ipr_cmd)
3715{
3716 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3717 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3718 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3719
3720 if (!res) {
3721 ipr_scsi_eh_done(ipr_cmd);
3722 return;
3723 }
3724
3725 if (ipr_is_gscsi(res))
3726 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3727 else
3728 ipr_gen_sense(ipr_cmd);
3729
3730 switch (ioasc & IPR_IOASC_IOASC_MASK) {
3731 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3732 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3733 break;
3734 case IPR_IOASC_IR_RESOURCE_HANDLE:
3735 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3736 break;
3737 case IPR_IOASC_HW_SEL_TIMEOUT:
3738 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3739 res->needs_sync_complete = 1;
3740 break;
3741 case IPR_IOASC_SYNC_REQUIRED:
3742 if (!res->in_erp)
3743 res->needs_sync_complete = 1;
3744 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3745 break;
3746 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3747 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3748 break;
3749 case IPR_IOASC_BUS_WAS_RESET:
3750 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3751 /*
3752 * Report the bus reset and ask for a retry. The device
3753 * will give CC/UA the next command.
3754 */
3755 if (!res->resetting_device)
3756 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3757 scsi_cmd->result |= (DID_ERROR << 16);
3758 res->needs_sync_complete = 1;
3759 break;
3760 case IPR_IOASC_HW_DEV_BUS_STATUS:
3761 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3762 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3763 ipr_erp_cancel_all(ipr_cmd);
3764 return;
3765 }
3766 res->needs_sync_complete = 1;
3767 break;
3768 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3769 break;
3770 default:
3771 scsi_cmd->result |= (DID_ERROR << 16);
3772 if (!ipr_is_vset_device(res))
3773 res->needs_sync_complete = 1;
3774 break;
3775 }
3776
3777 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3778 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3779 scsi_cmd->scsi_done(scsi_cmd);
3780}
3781
3782/**
3783 * ipr_scsi_done - mid-layer done function
3784 * @ipr_cmd: ipr command struct
3785 *
3786 * This function is invoked by the interrupt handler for
3787 * ops generated by the SCSI mid-layer
3788 *
3789 * Return value:
3790 * none
3791 **/
3792static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3793{
3794 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3795 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3796 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3797
3798 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
3799
3800 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3801 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3802 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3803 scsi_cmd->scsi_done(scsi_cmd);
3804 } else
3805 ipr_erp_start(ioa_cfg, ipr_cmd);
3806}
3807
3808/**
3809 * ipr_save_ioafp_mode_select - Save adapters mode select data
3810 * @ioa_cfg: ioa config struct
3811 * @scsi_cmd: scsi command struct
3812 *
3813 * This function saves mode select data for the adapter to
3814 * use following an adapter reset.
3815 *
3816 * Return value:
3817 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
3818 **/
3819static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
3820 struct scsi_cmnd *scsi_cmd)
3821{
3822 if (!ioa_cfg->saved_mode_pages) {
3823 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
3824 GFP_ATOMIC);
3825 if (!ioa_cfg->saved_mode_pages) {
3826 dev_err(&ioa_cfg->pdev->dev,
3827 "IOA mode select buffer allocation failed\n");
3828 return SCSI_MLQUEUE_HOST_BUSY;
3829 }
3830 }
3831
3832 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
3833 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
3834 return 0;
3835}
3836
3837/**
3838 * ipr_queuecommand - Queue a mid-layer request
3839 * @scsi_cmd: scsi command struct
3840 * @done: done function
3841 *
3842 * This function queues a request generated by the mid-layer.
3843 *
3844 * Return value:
3845 * 0 on success
3846 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3847 * SCSI_MLQUEUE_HOST_BUSY if host is busy
3848 **/
3849static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
3850 void (*done) (struct scsi_cmnd *))
3851{
3852 struct ipr_ioa_cfg *ioa_cfg;
3853 struct ipr_resource_entry *res;
3854 struct ipr_ioarcb *ioarcb;
3855 struct ipr_cmnd *ipr_cmd;
3856 int rc = 0;
3857
3858 scsi_cmd->scsi_done = done;
3859 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3860 res = scsi_cmd->device->hostdata;
3861 scsi_cmd->result = (DID_OK << 16);
3862
3863 /*
3864 * We are currently blocking all devices due to a host reset
3865 * We have told the host to stop giving us new requests, but
3866 * ERP ops don't count. FIXME
3867 */
3868 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
3869 return SCSI_MLQUEUE_HOST_BUSY;
3870
3871 /*
3872 * FIXME - Create scsi_set_host_offline interface
3873 * and the ioa_is_dead check can be removed
3874 */
3875 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
3876 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3877 scsi_cmd->result = (DID_NO_CONNECT << 16);
3878 scsi_cmd->scsi_done(scsi_cmd);
3879 return 0;
3880 }
3881
3882 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3883 ioarcb = &ipr_cmd->ioarcb;
3884 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
3885
3886 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3887 ipr_cmd->scsi_cmd = scsi_cmd;
3888 ioarcb->res_handle = res->cfgte.res_handle;
3889 ipr_cmd->done = ipr_scsi_done;
3890 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
3891
3892 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
3893 if (scsi_cmd->underflow == 0)
3894 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3895
3896 if (res->needs_sync_complete) {
3897 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
3898 res->needs_sync_complete = 0;
3899 }
3900
3901 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
3902 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
3903 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
3904 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
3905 }
3906
3907 if (scsi_cmd->cmnd[0] >= 0xC0 &&
3908 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
3909 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
3910
3911 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
3912 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
3913
3914 if (likely(rc == 0))
3915 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
3916
3917 if (likely(rc == 0)) {
3918 mb();
3919 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
3920 ioa_cfg->regs.ioarrin_reg);
3921 } else {
3922 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3923 return SCSI_MLQUEUE_HOST_BUSY;
3924 }
3925
3926 return 0;
3927}
3928
3929/**
3930 * ipr_info - Get information about the card/driver
3931 * @scsi_host: scsi host struct
3932 *
3933 * Return value:
3934 * pointer to buffer with description string
3935 **/
3936static const char * ipr_ioa_info(struct Scsi_Host *host)
3937{
3938 static char buffer[512];
3939 struct ipr_ioa_cfg *ioa_cfg;
3940 unsigned long lock_flags = 0;
3941
3942 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
3943
3944 spin_lock_irqsave(host->host_lock, lock_flags);
3945 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
3946 spin_unlock_irqrestore(host->host_lock, lock_flags);
3947
3948 return buffer;
3949}
3950
3951static struct scsi_host_template driver_template = {
3952 .module = THIS_MODULE,
3953 .name = "IPR",
3954 .info = ipr_ioa_info,
3955 .queuecommand = ipr_queuecommand,
3956 .eh_abort_handler = ipr_eh_abort,
3957 .eh_device_reset_handler = ipr_eh_dev_reset,
3958 .eh_host_reset_handler = ipr_eh_host_reset,
3959 .slave_alloc = ipr_slave_alloc,
3960 .slave_configure = ipr_slave_configure,
3961 .slave_destroy = ipr_slave_destroy,
3962 .change_queue_depth = ipr_change_queue_depth,
3963 .change_queue_type = ipr_change_queue_type,
3964 .bios_param = ipr_biosparam,
3965 .can_queue = IPR_MAX_COMMANDS,
3966 .this_id = -1,
3967 .sg_tablesize = IPR_MAX_SGLIST,
3968 .max_sectors = IPR_IOA_MAX_SECTORS,
3969 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
3970 .use_clustering = ENABLE_CLUSTERING,
3971 .shost_attrs = ipr_ioa_attrs,
3972 .sdev_attrs = ipr_dev_attrs,
3973 .proc_name = IPR_NAME
3974};
3975
3976#ifdef CONFIG_PPC_PSERIES
3977static const u16 ipr_blocked_processors[] = {
3978 PV_NORTHSTAR,
3979 PV_PULSAR,
3980 PV_POWER4,
3981 PV_ICESTAR,
3982 PV_SSTAR,
3983 PV_POWER4p,
3984 PV_630,
3985 PV_630p
3986};
3987
3988/**
3989 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
3990 * @ioa_cfg: ioa cfg struct
3991 *
3992 * Adapters that use Gemstone revision < 3.1 do not work reliably on
3993 * certain pSeries hardware. This function determines if the given
3994 * adapter is in one of these confgurations or not.
3995 *
3996 * Return value:
3997 * 1 if adapter is not supported / 0 if adapter is supported
3998 **/
3999static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4000{
4001 u8 rev_id;
4002 int i;
4003
4004 if (ioa_cfg->type == 0x5702) {
4005 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4006 &rev_id) == PCIBIOS_SUCCESSFUL) {
4007 if (rev_id < 4) {
4008 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4009 if (__is_processor(ipr_blocked_processors[i]))
4010 return 1;
4011 }
4012 }
4013 }
4014 }
4015 return 0;
4016}
4017#else
4018#define ipr_invalid_adapter(ioa_cfg) 0
4019#endif
4020
4021/**
4022 * ipr_ioa_bringdown_done - IOA bring down completion.
4023 * @ipr_cmd: ipr command struct
4024 *
4025 * This function processes the completion of an adapter bring down.
4026 * It wakes any reset sleepers.
4027 *
4028 * Return value:
4029 * IPR_RC_JOB_RETURN
4030 **/
4031static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4032{
4033 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4034
4035 ENTER;
4036 ioa_cfg->in_reset_reload = 0;
4037 ioa_cfg->reset_retries = 0;
4038 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4039 wake_up_all(&ioa_cfg->reset_wait_q);
4040
4041 spin_unlock_irq(ioa_cfg->host->host_lock);
4042 scsi_unblock_requests(ioa_cfg->host);
4043 spin_lock_irq(ioa_cfg->host->host_lock);
4044 LEAVE;
4045
4046 return IPR_RC_JOB_RETURN;
4047}
4048
4049/**
4050 * ipr_ioa_reset_done - IOA reset completion.
4051 * @ipr_cmd: ipr command struct
4052 *
4053 * This function processes the completion of an adapter reset.
4054 * It schedules any necessary mid-layer add/removes and
4055 * wakes any reset sleepers.
4056 *
4057 * Return value:
4058 * IPR_RC_JOB_RETURN
4059 **/
4060static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4061{
4062 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4063 struct ipr_resource_entry *res;
4064 struct ipr_hostrcb *hostrcb, *temp;
4065 int i = 0;
4066
4067 ENTER;
4068 ioa_cfg->in_reset_reload = 0;
4069 ioa_cfg->allow_cmds = 1;
4070 ioa_cfg->reset_cmd = NULL;
4071
4072 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4073 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4074 ipr_trace;
4075 break;
4076 }
4077 }
4078 schedule_work(&ioa_cfg->work_q);
4079
4080 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4081 list_del(&hostrcb->queue);
4082 if (i++ < IPR_NUM_LOG_HCAMS)
4083 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4084 else
4085 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4086 }
4087
4088 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4089
4090 ioa_cfg->reset_retries = 0;
4091 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4092 wake_up_all(&ioa_cfg->reset_wait_q);
4093
4094 spin_unlock_irq(ioa_cfg->host->host_lock);
4095 scsi_unblock_requests(ioa_cfg->host);
4096 spin_lock_irq(ioa_cfg->host->host_lock);
4097
4098 if (!ioa_cfg->allow_cmds)
4099 scsi_block_requests(ioa_cfg->host);
4100
4101 LEAVE;
4102 return IPR_RC_JOB_RETURN;
4103}
4104
4105/**
4106 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4107 * @supported_dev: supported device struct
4108 * @vpids: vendor product id struct
4109 *
4110 * Return value:
4111 * none
4112 **/
4113static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4114 struct ipr_std_inq_vpids *vpids)
4115{
4116 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4117 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4118 supported_dev->num_records = 1;
4119 supported_dev->data_length =
4120 cpu_to_be16(sizeof(struct ipr_supported_device));
4121 supported_dev->reserved = 0;
4122}
4123
4124/**
4125 * ipr_set_supported_devs - Send Set Supported Devices for a device
4126 * @ipr_cmd: ipr command struct
4127 *
4128 * This function send a Set Supported Devices to the adapter
4129 *
4130 * Return value:
4131 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4132 **/
4133static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4134{
4135 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4136 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4137 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4138 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4139 struct ipr_resource_entry *res = ipr_cmd->u.res;
4140
4141 ipr_cmd->job_step = ipr_ioa_reset_done;
4142
4143 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4144 if (!ipr_is_af_dasd_device(res))
4145 continue;
4146
4147 ipr_cmd->u.res = res;
4148 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4149
4150 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4151 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4152 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4153
4154 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4155 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4156 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4157
4158 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4159 sizeof(struct ipr_supported_device));
4160 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4161 offsetof(struct ipr_misc_cbs, supp_dev));
4162 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4163 ioarcb->write_data_transfer_length =
4164 cpu_to_be32(sizeof(struct ipr_supported_device));
4165
4166 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4167 IPR_SET_SUP_DEVICE_TIMEOUT);
4168
4169 ipr_cmd->job_step = ipr_set_supported_devs;
4170 return IPR_RC_JOB_RETURN;
4171 }
4172
4173 return IPR_RC_JOB_CONTINUE;
4174}
4175
4176/**
4177 * ipr_get_mode_page - Locate specified mode page
4178 * @mode_pages: mode page buffer
4179 * @page_code: page code to find
4180 * @len: minimum required length for mode page
4181 *
4182 * Return value:
4183 * pointer to mode page / NULL on failure
4184 **/
4185static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4186 u32 page_code, u32 len)
4187{
4188 struct ipr_mode_page_hdr *mode_hdr;
4189 u32 page_length;
4190 u32 length;
4191
4192 if (!mode_pages || (mode_pages->hdr.length == 0))
4193 return NULL;
4194
4195 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4196 mode_hdr = (struct ipr_mode_page_hdr *)
4197 (mode_pages->data + mode_pages->hdr.block_desc_len);
4198
4199 while (length) {
4200 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4201 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4202 return mode_hdr;
4203 break;
4204 } else {
4205 page_length = (sizeof(struct ipr_mode_page_hdr) +
4206 mode_hdr->page_length);
4207 length -= page_length;
4208 mode_hdr = (struct ipr_mode_page_hdr *)
4209 ((unsigned long)mode_hdr + page_length);
4210 }
4211 }
4212 return NULL;
4213}
4214
4215/**
4216 * ipr_check_term_power - Check for term power errors
4217 * @ioa_cfg: ioa config struct
4218 * @mode_pages: IOAFP mode pages buffer
4219 *
4220 * Check the IOAFP's mode page 28 for term power errors
4221 *
4222 * Return value:
4223 * nothing
4224 **/
4225static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4226 struct ipr_mode_pages *mode_pages)
4227{
4228 int i;
4229 int entry_length;
4230 struct ipr_dev_bus_entry *bus;
4231 struct ipr_mode_page28 *mode_page;
4232
4233 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4234 sizeof(struct ipr_mode_page28));
4235
4236 entry_length = mode_page->entry_length;
4237
4238 bus = mode_page->bus;
4239
4240 for (i = 0; i < mode_page->num_entries; i++) {
4241 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4242 dev_err(&ioa_cfg->pdev->dev,
4243 "Term power is absent on scsi bus %d\n",
4244 bus->res_addr.bus);
4245 }
4246
4247 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4248 }
4249}
4250
4251/**
4252 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4253 * @ioa_cfg: ioa config struct
4254 *
4255 * Looks through the config table checking for SES devices. If
4256 * the SES device is in the SES table indicating a maximum SCSI
4257 * bus speed, the speed is limited for the bus.
4258 *
4259 * Return value:
4260 * none
4261 **/
4262static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4263{
4264 u32 max_xfer_rate;
4265 int i;
4266
4267 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4268 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4269 ioa_cfg->bus_attr[i].bus_width);
4270
4271 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4272 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4273 }
4274}
4275
4276/**
4277 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4278 * @ioa_cfg: ioa config struct
4279 * @mode_pages: mode page 28 buffer
4280 *
4281 * Updates mode page 28 based on driver configuration
4282 *
4283 * Return value:
4284 * none
4285 **/
4286static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4287 struct ipr_mode_pages *mode_pages)
4288{
4289 int i, entry_length;
4290 struct ipr_dev_bus_entry *bus;
4291 struct ipr_bus_attributes *bus_attr;
4292 struct ipr_mode_page28 *mode_page;
4293
4294 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4295 sizeof(struct ipr_mode_page28));
4296
4297 entry_length = mode_page->entry_length;
4298
4299 /* Loop for each device bus entry */
4300 for (i = 0, bus = mode_page->bus;
4301 i < mode_page->num_entries;
4302 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4303 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4304 dev_err(&ioa_cfg->pdev->dev,
4305 "Invalid resource address reported: 0x%08X\n",
4306 IPR_GET_PHYS_LOC(bus->res_addr));
4307 continue;
4308 }
4309
4310 bus_attr = &ioa_cfg->bus_attr[i];
4311 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4312 bus->bus_width = bus_attr->bus_width;
4313 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4314 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4315 if (bus_attr->qas_enabled)
4316 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4317 else
4318 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4319 }
4320}
4321
4322/**
4323 * ipr_build_mode_select - Build a mode select command
4324 * @ipr_cmd: ipr command struct
4325 * @res_handle: resource handle to send command to
4326 * @parm: Byte 2 of Mode Sense command
4327 * @dma_addr: DMA buffer address
4328 * @xfer_len: data transfer length
4329 *
4330 * Return value:
4331 * none
4332 **/
4333static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4334 __be32 res_handle, u8 parm, u32 dma_addr,
4335 u8 xfer_len)
4336{
4337 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4338 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4339
4340 ioarcb->res_handle = res_handle;
4341 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4342 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4343 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4344 ioarcb->cmd_pkt.cdb[1] = parm;
4345 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4346
4347 ioadl->flags_and_data_len =
4348 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4349 ioadl->address = cpu_to_be32(dma_addr);
4350 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4351 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4352}
4353
4354/**
4355 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4356 * @ipr_cmd: ipr command struct
4357 *
4358 * This function sets up the SCSI bus attributes and sends
4359 * a Mode Select for Page 28 to activate them.
4360 *
4361 * Return value:
4362 * IPR_RC_JOB_RETURN
4363 **/
4364static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4365{
4366 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4367 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4368 int length;
4369
4370 ENTER;
4371 if (ioa_cfg->saved_mode_pages) {
4372 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4373 ioa_cfg->saved_mode_page_len);
4374 length = ioa_cfg->saved_mode_page_len;
4375 } else {
4376 ipr_scsi_bus_speed_limit(ioa_cfg);
4377 ipr_check_term_power(ioa_cfg, mode_pages);
4378 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4379 length = mode_pages->hdr.length + 1;
4380 mode_pages->hdr.length = 0;
4381 }
4382
4383 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4384 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4385 length);
4386
4387 ipr_cmd->job_step = ipr_set_supported_devs;
4388 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4389 struct ipr_resource_entry, queue);
4390
4391 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4392
4393 LEAVE;
4394 return IPR_RC_JOB_RETURN;
4395}
4396
4397/**
4398 * ipr_build_mode_sense - Builds a mode sense command
4399 * @ipr_cmd: ipr command struct
4400 * @res: resource entry struct
4401 * @parm: Byte 2 of mode sense command
4402 * @dma_addr: DMA address of mode sense buffer
4403 * @xfer_len: Size of DMA buffer
4404 *
4405 * Return value:
4406 * none
4407 **/
4408static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4409 __be32 res_handle,
4410 u8 parm, u32 dma_addr, u8 xfer_len)
4411{
4412 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4413 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4414
4415 ioarcb->res_handle = res_handle;
4416 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4417 ioarcb->cmd_pkt.cdb[2] = parm;
4418 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4419 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4420
4421 ioadl->flags_and_data_len =
4422 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4423 ioadl->address = cpu_to_be32(dma_addr);
4424 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4425 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4426}
4427
4428/**
4429 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4430 * @ipr_cmd: ipr command struct
4431 *
4432 * This function send a Page 28 mode sense to the IOA to
4433 * retrieve SCSI bus attributes.
4434 *
4435 * Return value:
4436 * IPR_RC_JOB_RETURN
4437 **/
4438static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4439{
4440 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4441
4442 ENTER;
4443 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4444 0x28, ioa_cfg->vpd_cbs_dma +
4445 offsetof(struct ipr_misc_cbs, mode_pages),
4446 sizeof(struct ipr_mode_pages));
4447
4448 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4449
4450 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4451
4452 LEAVE;
4453 return IPR_RC_JOB_RETURN;
4454}
4455
4456/**
4457 * ipr_init_res_table - Initialize the resource table
4458 * @ipr_cmd: ipr command struct
4459 *
4460 * This function looks through the existing resource table, comparing
4461 * it with the config table. This function will take care of old/new
4462 * devices and schedule adding/removing them from the mid-layer
4463 * as appropriate.
4464 *
4465 * Return value:
4466 * IPR_RC_JOB_CONTINUE
4467 **/
4468static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4469{
4470 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4471 struct ipr_resource_entry *res, *temp;
4472 struct ipr_config_table_entry *cfgte;
4473 int found, i;
4474 LIST_HEAD(old_res);
4475
4476 ENTER;
4477 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4478 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4479
4480 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4481 list_move_tail(&res->queue, &old_res);
4482
4483 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4484 cfgte = &ioa_cfg->cfg_table->dev[i];
4485 found = 0;
4486
4487 list_for_each_entry_safe(res, temp, &old_res, queue) {
4488 if (!memcmp(&res->cfgte.res_addr,
4489 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4490 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4491 found = 1;
4492 break;
4493 }
4494 }
4495
4496 if (!found) {
4497 if (list_empty(&ioa_cfg->free_res_q)) {
4498 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4499 break;
4500 }
4501
4502 found = 1;
4503 res = list_entry(ioa_cfg->free_res_q.next,
4504 struct ipr_resource_entry, queue);
4505 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4506 ipr_init_res_entry(res);
4507 res->add_to_ml = 1;
4508 }
4509
4510 if (found)
4511 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4512 }
4513
4514 list_for_each_entry_safe(res, temp, &old_res, queue) {
4515 if (res->sdev) {
4516 res->del_from_ml = 1;
4517 res->sdev->hostdata = NULL;
4518 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4519 } else {
4520 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4521 }
4522 }
4523
4524 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4525
4526 LEAVE;
4527 return IPR_RC_JOB_CONTINUE;
4528}
4529
4530/**
4531 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4532 * @ipr_cmd: ipr command struct
4533 *
4534 * This function sends a Query IOA Configuration command
4535 * to the adapter to retrieve the IOA configuration table.
4536 *
4537 * Return value:
4538 * IPR_RC_JOB_RETURN
4539 **/
4540static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4541{
4542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4543 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4544 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4545 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4546
4547 ENTER;
4548 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4549 ucode_vpd->major_release, ucode_vpd->card_type,
4550 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4551 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4552 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4553
4554 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4555 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4556 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4557
4558 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4559 ioarcb->read_data_transfer_length =
4560 cpu_to_be32(sizeof(struct ipr_config_table));
4561
4562 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4563 ioadl->flags_and_data_len =
4564 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4565
4566 ipr_cmd->job_step = ipr_init_res_table;
4567
4568 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4569
4570 LEAVE;
4571 return IPR_RC_JOB_RETURN;
4572}
4573
4574/**
4575 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4576 * @ipr_cmd: ipr command struct
4577 *
4578 * This utility function sends an inquiry to the adapter.
4579 *
4580 * Return value:
4581 * none
4582 **/
4583static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4584 u32 dma_addr, u8 xfer_len)
4585{
4586 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4587 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4588
4589 ENTER;
4590 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4591 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4592
4593 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4594 ioarcb->cmd_pkt.cdb[1] = flags;
4595 ioarcb->cmd_pkt.cdb[2] = page;
4596 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4597
4598 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4599 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4600
4601 ioadl->address = cpu_to_be32(dma_addr);
4602 ioadl->flags_and_data_len =
4603 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4604
4605 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4606 LEAVE;
4607}
4608
4609/**
4610 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4611 * @ipr_cmd: ipr command struct
4612 *
4613 * This function sends a Page 3 inquiry to the adapter
4614 * to retrieve software VPD information.
4615 *
4616 * Return value:
4617 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4618 **/
4619static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
4620{
4621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4622 char type[5];
4623
4624 ENTER;
4625
4626 /* Grab the type out of the VPD and store it away */
4627 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4628 type[4] = '\0';
4629 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4630
4631 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4632
4633 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4634 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4635 sizeof(struct ipr_inquiry_page3));
4636
4637 LEAVE;
4638 return IPR_RC_JOB_RETURN;
4639}
4640
4641/**
4642 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4643 * @ipr_cmd: ipr command struct
4644 *
4645 * This function sends a standard inquiry to the adapter.
4646 *
4647 * Return value:
4648 * IPR_RC_JOB_RETURN
4649 **/
4650static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4651{
4652 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4653
4654 ENTER;
4655 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
4656
4657 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4658 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4659 sizeof(struct ipr_ioa_vpd));
4660
4661 LEAVE;
4662 return IPR_RC_JOB_RETURN;
4663}
4664
4665/**
4666 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4667 * @ipr_cmd: ipr command struct
4668 *
4669 * This function send an Identify Host Request Response Queue
4670 * command to establish the HRRQ with the adapter.
4671 *
4672 * Return value:
4673 * IPR_RC_JOB_RETURN
4674 **/
4675static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4676{
4677 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4678 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4679
4680 ENTER;
4681 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4682
4683 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4684 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4685
4686 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4687 ioarcb->cmd_pkt.cdb[2] =
4688 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4689 ioarcb->cmd_pkt.cdb[3] =
4690 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4691 ioarcb->cmd_pkt.cdb[4] =
4692 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4693 ioarcb->cmd_pkt.cdb[5] =
4694 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
4695 ioarcb->cmd_pkt.cdb[7] =
4696 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4697 ioarcb->cmd_pkt.cdb[8] =
4698 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4699
4700 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4701
4702 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4703
4704 LEAVE;
4705 return IPR_RC_JOB_RETURN;
4706}
4707
4708/**
4709 * ipr_reset_timer_done - Adapter reset timer function
4710 * @ipr_cmd: ipr command struct
4711 *
4712 * Description: This function is used in adapter reset processing
4713 * for timing events. If the reset_cmd pointer in the IOA
4714 * config struct is not this adapter's we are doing nested
4715 * resets and fail_all_ops will take care of freeing the
4716 * command block.
4717 *
4718 * Return value:
4719 * none
4720 **/
4721static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
4722{
4723 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4724 unsigned long lock_flags = 0;
4725
4726 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4727
4728 if (ioa_cfg->reset_cmd == ipr_cmd) {
4729 list_del(&ipr_cmd->queue);
4730 ipr_cmd->done(ipr_cmd);
4731 }
4732
4733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4734}
4735
4736/**
4737 * ipr_reset_start_timer - Start a timer for adapter reset job
4738 * @ipr_cmd: ipr command struct
4739 * @timeout: timeout value
4740 *
4741 * Description: This function is used in adapter reset processing
4742 * for timing events. If the reset_cmd pointer in the IOA
4743 * config struct is not this adapter's we are doing nested
4744 * resets and fail_all_ops will take care of freeing the
4745 * command block.
4746 *
4747 * Return value:
4748 * none
4749 **/
4750static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
4751 unsigned long timeout)
4752{
4753 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
4754 ipr_cmd->done = ipr_reset_ioa_job;
4755
4756 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4757 ipr_cmd->timer.expires = jiffies + timeout;
4758 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
4759 add_timer(&ipr_cmd->timer);
4760}
4761
4762/**
4763 * ipr_init_ioa_mem - Initialize ioa_cfg control block
4764 * @ioa_cfg: ioa cfg struct
4765 *
4766 * Return value:
4767 * nothing
4768 **/
4769static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
4770{
4771 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
4772
4773 /* Initialize Host RRQ pointers */
4774 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
4775 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
4776 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4777 ioa_cfg->toggle_bit = 1;
4778
4779 /* Zero out config table */
4780 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
4781}
4782
4783/**
4784 * ipr_reset_enable_ioa - Enable the IOA following a reset.
4785 * @ipr_cmd: ipr command struct
4786 *
4787 * This function reinitializes some control blocks and
4788 * enables destructive diagnostics on the adapter.
4789 *
4790 * Return value:
4791 * IPR_RC_JOB_RETURN
4792 **/
4793static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
4794{
4795 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4796 volatile u32 int_reg;
4797
4798 ENTER;
4799 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
4800 ipr_init_ioa_mem(ioa_cfg);
4801
4802 ioa_cfg->allow_interrupts = 1;
4803 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4804
4805 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4806 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
4807 ioa_cfg->regs.clr_interrupt_mask_reg);
4808 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4809 return IPR_RC_JOB_CONTINUE;
4810 }
4811
4812 /* Enable destructive diagnostics on IOA */
4813 writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
4814
4815 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
4816 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4817
4818 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
4819
4820 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4821 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
4822 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
4823 ipr_cmd->done = ipr_reset_ioa_job;
4824 add_timer(&ipr_cmd->timer);
4825 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4826
4827 LEAVE;
4828 return IPR_RC_JOB_RETURN;
4829}
4830
4831/**
4832 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
4833 * @ipr_cmd: ipr command struct
4834 *
4835 * This function is invoked when an adapter dump has run out
4836 * of processing time.
4837 *
4838 * Return value:
4839 * IPR_RC_JOB_CONTINUE
4840 **/
4841static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
4842{
4843 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4844
4845 if (ioa_cfg->sdt_state == GET_DUMP)
4846 ioa_cfg->sdt_state = ABORT_DUMP;
4847
4848 ipr_cmd->job_step = ipr_reset_alert;
4849
4850 return IPR_RC_JOB_CONTINUE;
4851}
4852
4853/**
4854 * ipr_unit_check_no_data - Log a unit check/no data error log
4855 * @ioa_cfg: ioa config struct
4856 *
4857 * Logs an error indicating the adapter unit checked, but for some
4858 * reason, we were unable to fetch the unit check buffer.
4859 *
4860 * Return value:
4861 * nothing
4862 **/
4863static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
4864{
4865 ioa_cfg->errors_logged++;
4866 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
4867}
4868
4869/**
4870 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
4871 * @ioa_cfg: ioa config struct
4872 *
4873 * Fetches the unit check buffer from the adapter by clocking the data
4874 * through the mailbox register.
4875 *
4876 * Return value:
4877 * nothing
4878 **/
4879static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
4880{
4881 unsigned long mailbox;
4882 struct ipr_hostrcb *hostrcb;
4883 struct ipr_uc_sdt sdt;
4884 int rc, length;
4885
4886 mailbox = readl(ioa_cfg->ioa_mailbox);
4887
4888 if (!ipr_sdt_is_fmt2(mailbox)) {
4889 ipr_unit_check_no_data(ioa_cfg);
4890 return;
4891 }
4892
4893 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
4894 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
4895 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
4896
4897 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
4898 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
4899 ipr_unit_check_no_data(ioa_cfg);
4900 return;
4901 }
4902
4903 /* Find length of the first sdt entry (UC buffer) */
4904 length = (be32_to_cpu(sdt.entry[0].end_offset) -
4905 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
4906
4907 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
4908 struct ipr_hostrcb, queue);
4909 list_del(&hostrcb->queue);
4910 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
4911
4912 rc = ipr_get_ldump_data_section(ioa_cfg,
4913 be32_to_cpu(sdt.entry[0].bar_str_offset),
4914 (__be32 *)&hostrcb->hcam,
4915 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
4916
4917 if (!rc)
4918 ipr_handle_log_data(ioa_cfg, hostrcb);
4919 else
4920 ipr_unit_check_no_data(ioa_cfg);
4921
4922 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4923}
4924
4925/**
4926 * ipr_reset_restore_cfg_space - Restore PCI config space.
4927 * @ipr_cmd: ipr command struct
4928 *
4929 * Description: This function restores the saved PCI config space of
4930 * the adapter, fails all outstanding ops back to the callers, and
4931 * fetches the dump/unit check if applicable to this reset.
4932 *
4933 * Return value:
4934 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4935 **/
4936static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
4937{
4938 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4939 int rc;
4940
4941 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07004942 pci_unblock_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004943 rc = pci_restore_state(ioa_cfg->pdev);
4944
4945 if (rc != PCIBIOS_SUCCESSFUL) {
4946 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4947 return IPR_RC_JOB_CONTINUE;
4948 }
4949
4950 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
4951 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4952 return IPR_RC_JOB_CONTINUE;
4953 }
4954
4955 ipr_fail_all_ops(ioa_cfg);
4956
4957 if (ioa_cfg->ioa_unit_checked) {
4958 ioa_cfg->ioa_unit_checked = 0;
4959 ipr_get_unit_check_buffer(ioa_cfg);
4960 ipr_cmd->job_step = ipr_reset_alert;
4961 ipr_reset_start_timer(ipr_cmd, 0);
4962 return IPR_RC_JOB_RETURN;
4963 }
4964
4965 if (ioa_cfg->in_ioa_bringdown) {
4966 ipr_cmd->job_step = ipr_ioa_bringdown_done;
4967 } else {
4968 ipr_cmd->job_step = ipr_reset_enable_ioa;
4969
4970 if (GET_DUMP == ioa_cfg->sdt_state) {
4971 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
4972 ipr_cmd->job_step = ipr_reset_wait_for_dump;
4973 schedule_work(&ioa_cfg->work_q);
4974 return IPR_RC_JOB_RETURN;
4975 }
4976 }
4977
4978 ENTER;
4979 return IPR_RC_JOB_CONTINUE;
4980}
4981
4982/**
4983 * ipr_reset_start_bist - Run BIST on the adapter.
4984 * @ipr_cmd: ipr command struct
4985 *
4986 * Description: This function runs BIST on the adapter, then delays 2 seconds.
4987 *
4988 * Return value:
4989 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4990 **/
4991static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
4992{
4993 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4994 int rc;
4995
4996 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07004997 pci_block_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004998 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
4999
5000 if (rc != PCIBIOS_SUCCESSFUL) {
5001 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5002 rc = IPR_RC_JOB_CONTINUE;
5003 } else {
5004 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5005 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5006 rc = IPR_RC_JOB_RETURN;
5007 }
5008
5009 LEAVE;
5010 return rc;
5011}
5012
5013/**
5014 * ipr_reset_allowed - Query whether or not IOA can be reset
5015 * @ioa_cfg: ioa config struct
5016 *
5017 * Return value:
5018 * 0 if reset not allowed / non-zero if reset is allowed
5019 **/
5020static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5021{
5022 volatile u32 temp_reg;
5023
5024 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5025 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5026}
5027
5028/**
5029 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5030 * @ipr_cmd: ipr command struct
5031 *
5032 * Description: This function waits for adapter permission to run BIST,
5033 * then runs BIST. If the adapter does not give permission after a
5034 * reasonable time, we will reset the adapter anyway. The impact of
5035 * resetting the adapter without warning the adapter is the risk of
5036 * losing the persistent error log on the adapter. If the adapter is
5037 * reset while it is writing to the flash on the adapter, the flash
5038 * segment will have bad ECC and be zeroed.
5039 *
5040 * Return value:
5041 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5042 **/
5043static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5044{
5045 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5046 int rc = IPR_RC_JOB_RETURN;
5047
5048 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5049 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5050 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5051 } else {
5052 ipr_cmd->job_step = ipr_reset_start_bist;
5053 rc = IPR_RC_JOB_CONTINUE;
5054 }
5055
5056 return rc;
5057}
5058
5059/**
5060 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5061 * @ipr_cmd: ipr command struct
5062 *
5063 * Description: This function alerts the adapter that it will be reset.
5064 * If memory space is not currently enabled, proceed directly
5065 * to running BIST on the adapter. The timer must always be started
5066 * so we guarantee we do not run BIST from ipr_isr.
5067 *
5068 * Return value:
5069 * IPR_RC_JOB_RETURN
5070 **/
5071static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5072{
5073 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5074 u16 cmd_reg;
5075 int rc;
5076
5077 ENTER;
5078 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5079
5080 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5081 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5082 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5083 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5084 } else {
5085 ipr_cmd->job_step = ipr_reset_start_bist;
5086 }
5087
5088 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5089 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5090
5091 LEAVE;
5092 return IPR_RC_JOB_RETURN;
5093}
5094
5095/**
5096 * ipr_reset_ucode_download_done - Microcode download completion
5097 * @ipr_cmd: ipr command struct
5098 *
5099 * Description: This function unmaps the microcode download buffer.
5100 *
5101 * Return value:
5102 * IPR_RC_JOB_CONTINUE
5103 **/
5104static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5105{
5106 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5107 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5108
5109 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5110 sglist->num_sg, DMA_TO_DEVICE);
5111
5112 ipr_cmd->job_step = ipr_reset_alert;
5113 return IPR_RC_JOB_CONTINUE;
5114}
5115
5116/**
5117 * ipr_reset_ucode_download - Download microcode to the adapter
5118 * @ipr_cmd: ipr command struct
5119 *
5120 * Description: This function checks to see if it there is microcode
5121 * to download to the adapter. If there is, a download is performed.
5122 *
5123 * Return value:
5124 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5125 **/
5126static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5127{
5128 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5129 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5130
5131 ENTER;
5132 ipr_cmd->job_step = ipr_reset_alert;
5133
5134 if (!sglist)
5135 return IPR_RC_JOB_CONTINUE;
5136
5137 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5138 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5139 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5140 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5141 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5142 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5143 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5144
5145 if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) {
5146 dev_err(&ioa_cfg->pdev->dev,
5147 "Failed to map microcode download buffer\n");
5148 return IPR_RC_JOB_CONTINUE;
5149 }
5150
5151 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5152
5153 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5154 IPR_WRITE_BUFFER_TIMEOUT);
5155
5156 LEAVE;
5157 return IPR_RC_JOB_RETURN;
5158}
5159
5160/**
5161 * ipr_reset_shutdown_ioa - Shutdown the adapter
5162 * @ipr_cmd: ipr command struct
5163 *
5164 * Description: This function issues an adapter shutdown of the
5165 * specified type to the specified adapter as part of the
5166 * adapter reset job.
5167 *
5168 * Return value:
5169 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5170 **/
5171static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5172{
5173 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5174 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5175 unsigned long timeout;
5176 int rc = IPR_RC_JOB_CONTINUE;
5177
5178 ENTER;
5179 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5180 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5181 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5182 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5183 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5184
5185 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5186 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5187 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5188 timeout = IPR_INTERNAL_TIMEOUT;
5189 else
5190 timeout = IPR_SHUTDOWN_TIMEOUT;
5191
5192 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5193
5194 rc = IPR_RC_JOB_RETURN;
5195 ipr_cmd->job_step = ipr_reset_ucode_download;
5196 } else
5197 ipr_cmd->job_step = ipr_reset_alert;
5198
5199 LEAVE;
5200 return rc;
5201}
5202
5203/**
5204 * ipr_reset_ioa_job - Adapter reset job
5205 * @ipr_cmd: ipr command struct
5206 *
5207 * Description: This function is the job router for the adapter reset job.
5208 *
5209 * Return value:
5210 * none
5211 **/
5212static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5213{
5214 u32 rc, ioasc;
5215 unsigned long scratch = ipr_cmd->u.scratch;
5216 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5217
5218 do {
5219 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5220
5221 if (ioa_cfg->reset_cmd != ipr_cmd) {
5222 /*
5223 * We are doing nested adapter resets and this is
5224 * not the current reset job.
5225 */
5226 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5227 return;
5228 }
5229
5230 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5231 dev_err(&ioa_cfg->pdev->dev,
5232 "0x%02X failed with IOASC: 0x%08X\n",
5233 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5234
5235 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5236 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5237 return;
5238 }
5239
5240 ipr_reinit_ipr_cmnd(ipr_cmd);
5241 ipr_cmd->u.scratch = scratch;
5242 rc = ipr_cmd->job_step(ipr_cmd);
5243 } while(rc == IPR_RC_JOB_CONTINUE);
5244}
5245
5246/**
5247 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5248 * @ioa_cfg: ioa config struct
5249 * @job_step: first job step of reset job
5250 * @shutdown_type: shutdown type
5251 *
5252 * Description: This function will initiate the reset of the given adapter
5253 * starting at the selected job step.
5254 * If the caller needs to wait on the completion of the reset,
5255 * the caller must sleep on the reset_wait_q.
5256 *
5257 * Return value:
5258 * none
5259 **/
5260static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5261 int (*job_step) (struct ipr_cmnd *),
5262 enum ipr_shutdown_type shutdown_type)
5263{
5264 struct ipr_cmnd *ipr_cmd;
5265
5266 ioa_cfg->in_reset_reload = 1;
5267 ioa_cfg->allow_cmds = 0;
5268 scsi_block_requests(ioa_cfg->host);
5269
5270 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5271 ioa_cfg->reset_cmd = ipr_cmd;
5272 ipr_cmd->job_step = job_step;
5273 ipr_cmd->u.shutdown_type = shutdown_type;
5274
5275 ipr_reset_ioa_job(ipr_cmd);
5276}
5277
5278/**
5279 * ipr_initiate_ioa_reset - Initiate an adapter reset
5280 * @ioa_cfg: ioa config struct
5281 * @shutdown_type: shutdown type
5282 *
5283 * Description: This function will initiate the reset of the given adapter.
5284 * If the caller needs to wait on the completion of the reset,
5285 * the caller must sleep on the reset_wait_q.
5286 *
5287 * Return value:
5288 * none
5289 **/
5290static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5291 enum ipr_shutdown_type shutdown_type)
5292{
5293 if (ioa_cfg->ioa_is_dead)
5294 return;
5295
5296 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5297 ioa_cfg->sdt_state = ABORT_DUMP;
5298
5299 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5300 dev_err(&ioa_cfg->pdev->dev,
5301 "IOA taken offline - error recovery failed\n");
5302
5303 ioa_cfg->reset_retries = 0;
5304 ioa_cfg->ioa_is_dead = 1;
5305
5306 if (ioa_cfg->in_ioa_bringdown) {
5307 ioa_cfg->reset_cmd = NULL;
5308 ioa_cfg->in_reset_reload = 0;
5309 ipr_fail_all_ops(ioa_cfg);
5310 wake_up_all(&ioa_cfg->reset_wait_q);
5311
5312 spin_unlock_irq(ioa_cfg->host->host_lock);
5313 scsi_unblock_requests(ioa_cfg->host);
5314 spin_lock_irq(ioa_cfg->host->host_lock);
5315 return;
5316 } else {
5317 ioa_cfg->in_ioa_bringdown = 1;
5318 shutdown_type = IPR_SHUTDOWN_NONE;
5319 }
5320 }
5321
5322 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5323 shutdown_type);
5324}
5325
5326/**
5327 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5328 * @ioa_cfg: ioa cfg struct
5329 *
5330 * Description: This is the second phase of adapter intialization
5331 * This function takes care of initilizing the adapter to the point
5332 * where it can accept new commands.
5333
5334 * Return value:
5335 * 0 on sucess / -EIO on failure
5336 **/
5337static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5338{
5339 int rc = 0;
5340 unsigned long host_lock_flags = 0;
5341
5342 ENTER;
5343 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5344 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5345 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5346
5347 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5348 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5349 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5350
5351 if (ioa_cfg->ioa_is_dead) {
5352 rc = -EIO;
5353 } else if (ipr_invalid_adapter(ioa_cfg)) {
5354 if (!ipr_testmode)
5355 rc = -EIO;
5356
5357 dev_err(&ioa_cfg->pdev->dev,
5358 "Adapter not supported in this hardware configuration.\n");
5359 }
5360
5361 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5362
5363 LEAVE;
5364 return rc;
5365}
5366
5367/**
5368 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5369 * @ioa_cfg: ioa config struct
5370 *
5371 * Return value:
5372 * none
5373 **/
5374static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5375{
5376 int i;
5377
5378 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5379 if (ioa_cfg->ipr_cmnd_list[i])
5380 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5381 ioa_cfg->ipr_cmnd_list[i],
5382 ioa_cfg->ipr_cmnd_list_dma[i]);
5383
5384 ioa_cfg->ipr_cmnd_list[i] = NULL;
5385 }
5386
5387 if (ioa_cfg->ipr_cmd_pool)
5388 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5389
5390 ioa_cfg->ipr_cmd_pool = NULL;
5391}
5392
5393/**
5394 * ipr_free_mem - Frees memory allocated for an adapter
5395 * @ioa_cfg: ioa cfg struct
5396 *
5397 * Return value:
5398 * nothing
5399 **/
5400static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5401{
5402 int i;
5403
5404 kfree(ioa_cfg->res_entries);
5405 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5406 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5407 ipr_free_cmd_blks(ioa_cfg);
5408 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5409 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5410 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5411 ioa_cfg->cfg_table,
5412 ioa_cfg->cfg_table_dma);
5413
5414 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5415 pci_free_consistent(ioa_cfg->pdev,
5416 sizeof(struct ipr_hostrcb),
5417 ioa_cfg->hostrcb[i],
5418 ioa_cfg->hostrcb_dma[i]);
5419 }
5420
5421 ipr_free_dump(ioa_cfg);
5422 kfree(ioa_cfg->saved_mode_pages);
5423 kfree(ioa_cfg->trace);
5424}
5425
5426/**
5427 * ipr_free_all_resources - Free all allocated resources for an adapter.
5428 * @ipr_cmd: ipr command struct
5429 *
5430 * This function frees all allocated resources for the
5431 * specified adapter.
5432 *
5433 * Return value:
5434 * none
5435 **/
5436static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5437{
5438 struct pci_dev *pdev = ioa_cfg->pdev;
5439
5440 ENTER;
5441 free_irq(pdev->irq, ioa_cfg);
5442 iounmap(ioa_cfg->hdw_dma_regs);
5443 pci_release_regions(pdev);
5444 ipr_free_mem(ioa_cfg);
5445 scsi_host_put(ioa_cfg->host);
5446 pci_disable_device(pdev);
5447 LEAVE;
5448}
5449
5450/**
5451 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5452 * @ioa_cfg: ioa config struct
5453 *
5454 * Return value:
5455 * 0 on success / -ENOMEM on allocation failure
5456 **/
5457static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5458{
5459 struct ipr_cmnd *ipr_cmd;
5460 struct ipr_ioarcb *ioarcb;
5461 dma_addr_t dma_addr;
5462 int i;
5463
5464 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5465 sizeof(struct ipr_cmnd), 8, 0);
5466
5467 if (!ioa_cfg->ipr_cmd_pool)
5468 return -ENOMEM;
5469
5470 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5471 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5472
5473 if (!ipr_cmd) {
5474 ipr_free_cmd_blks(ioa_cfg);
5475 return -ENOMEM;
5476 }
5477
5478 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5479 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5480 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5481
5482 ioarcb = &ipr_cmd->ioarcb;
5483 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5484 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5485 ioarcb->write_ioadl_addr =
5486 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5487 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5488 ioarcb->ioasa_host_pci_addr =
5489 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5490 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5491 ipr_cmd->cmd_index = i;
5492 ipr_cmd->ioa_cfg = ioa_cfg;
5493 ipr_cmd->sense_buffer_dma = dma_addr +
5494 offsetof(struct ipr_cmnd, sense_buffer);
5495
5496 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5497 }
5498
5499 return 0;
5500}
5501
5502/**
5503 * ipr_alloc_mem - Allocate memory for an adapter
5504 * @ioa_cfg: ioa config struct
5505 *
5506 * Return value:
5507 * 0 on success / non-zero for error
5508 **/
5509static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5510{
5511 struct pci_dev *pdev = ioa_cfg->pdev;
5512 int i, rc = -ENOMEM;
5513
5514 ENTER;
5515 ioa_cfg->res_entries = kmalloc(sizeof(struct ipr_resource_entry) *
5516 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5517
5518 if (!ioa_cfg->res_entries)
5519 goto out;
5520
5521 memset(ioa_cfg->res_entries, 0,
5522 sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS);
5523
5524 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5525 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5526
5527 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5528 sizeof(struct ipr_misc_cbs),
5529 &ioa_cfg->vpd_cbs_dma);
5530
5531 if (!ioa_cfg->vpd_cbs)
5532 goto out_free_res_entries;
5533
5534 if (ipr_alloc_cmd_blks(ioa_cfg))
5535 goto out_free_vpd_cbs;
5536
5537 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5538 sizeof(u32) * IPR_NUM_CMD_BLKS,
5539 &ioa_cfg->host_rrq_dma);
5540
5541 if (!ioa_cfg->host_rrq)
5542 goto out_ipr_free_cmd_blocks;
5543
5544 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5545 sizeof(struct ipr_config_table),
5546 &ioa_cfg->cfg_table_dma);
5547
5548 if (!ioa_cfg->cfg_table)
5549 goto out_free_host_rrq;
5550
5551 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5552 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5553 sizeof(struct ipr_hostrcb),
5554 &ioa_cfg->hostrcb_dma[i]);
5555
5556 if (!ioa_cfg->hostrcb[i])
5557 goto out_free_hostrcb_dma;
5558
5559 ioa_cfg->hostrcb[i]->hostrcb_dma =
5560 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5561 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5562 }
5563
5564 ioa_cfg->trace = kmalloc(sizeof(struct ipr_trace_entry) *
5565 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5566
5567 if (!ioa_cfg->trace)
5568 goto out_free_hostrcb_dma;
5569
5570 memset(ioa_cfg->trace, 0,
5571 sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES);
5572
5573 rc = 0;
5574out:
5575 LEAVE;
5576 return rc;
5577
5578out_free_hostrcb_dma:
5579 while (i-- > 0) {
5580 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
5581 ioa_cfg->hostrcb[i],
5582 ioa_cfg->hostrcb_dma[i]);
5583 }
5584 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
5585 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
5586out_free_host_rrq:
5587 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5588 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5589out_ipr_free_cmd_blocks:
5590 ipr_free_cmd_blks(ioa_cfg);
5591out_free_vpd_cbs:
5592 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
5593 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5594out_free_res_entries:
5595 kfree(ioa_cfg->res_entries);
5596 goto out;
5597}
5598
5599/**
5600 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5601 * @ioa_cfg: ioa config struct
5602 *
5603 * Return value:
5604 * none
5605 **/
5606static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5607{
5608 int i;
5609
5610 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5611 ioa_cfg->bus_attr[i].bus = i;
5612 ioa_cfg->bus_attr[i].qas_enabled = 0;
5613 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5614 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5615 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5616 else
5617 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5618 }
5619}
5620
5621/**
5622 * ipr_init_ioa_cfg - Initialize IOA config struct
5623 * @ioa_cfg: ioa config struct
5624 * @host: scsi host struct
5625 * @pdev: PCI dev struct
5626 *
5627 * Return value:
5628 * none
5629 **/
5630static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5631 struct Scsi_Host *host, struct pci_dev *pdev)
5632{
5633 const struct ipr_interrupt_offsets *p;
5634 struct ipr_interrupts *t;
5635 void __iomem *base;
5636
5637 ioa_cfg->host = host;
5638 ioa_cfg->pdev = pdev;
5639 ioa_cfg->log_level = ipr_log_level;
5640 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5641 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5642 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5643 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5644 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5645 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5646 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5647 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5648
5649 INIT_LIST_HEAD(&ioa_cfg->free_q);
5650 INIT_LIST_HEAD(&ioa_cfg->pending_q);
5651 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5652 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5653 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5654 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5655 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5656 init_waitqueue_head(&ioa_cfg->reset_wait_q);
5657 ioa_cfg->sdt_state = INACTIVE;
5658
5659 ipr_initialize_bus_attr(ioa_cfg);
5660
5661 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5662 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5663 host->max_channel = IPR_MAX_BUS_TO_SCAN;
5664 host->unique_id = host->host_no;
5665 host->max_cmd_len = IPR_MAX_CDB_LEN;
5666 pci_set_drvdata(pdev, ioa_cfg);
5667
5668 p = &ioa_cfg->chip_cfg->regs;
5669 t = &ioa_cfg->regs;
5670 base = ioa_cfg->hdw_dma_regs;
5671
5672 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
5673 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
5674 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
5675 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
5676 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
5677 t->ioarrin_reg = base + p->ioarrin_reg;
5678 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
5679 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
5680 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
5681}
5682
5683/**
5684 * ipr_get_chip_cfg - Find adapter chip configuration
5685 * @dev_id: PCI device id struct
5686 *
5687 * Return value:
5688 * ptr to chip config on success / NULL on failure
5689 **/
5690static const struct ipr_chip_cfg_t * __devinit
5691ipr_get_chip_cfg(const struct pci_device_id *dev_id)
5692{
5693 int i;
5694
5695 if (dev_id->driver_data)
5696 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5697
5698 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
5699 if (ipr_chip[i].vendor == dev_id->vendor &&
5700 ipr_chip[i].device == dev_id->device)
5701 return ipr_chip[i].cfg;
5702 return NULL;
5703}
5704
5705/**
5706 * ipr_probe_ioa - Allocates memory and does first stage of initialization
5707 * @pdev: PCI device struct
5708 * @dev_id: PCI device id struct
5709 *
5710 * Return value:
5711 * 0 on success / non-zero on failure
5712 **/
5713static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
5714 const struct pci_device_id *dev_id)
5715{
5716 struct ipr_ioa_cfg *ioa_cfg;
5717 struct Scsi_Host *host;
5718 unsigned long ipr_regs_pci;
5719 void __iomem *ipr_regs;
5720 u32 rc = PCIBIOS_SUCCESSFUL;
5721
5722 ENTER;
5723
5724 if ((rc = pci_enable_device(pdev))) {
5725 dev_err(&pdev->dev, "Cannot enable adapter\n");
5726 goto out;
5727 }
5728
5729 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
5730
5731 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
5732
5733 if (!host) {
5734 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
5735 rc = -ENOMEM;
5736 goto out_disable;
5737 }
5738
5739 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
5740 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
5741
5742 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
5743
5744 if (!ioa_cfg->chip_cfg) {
5745 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
5746 dev_id->vendor, dev_id->device);
5747 goto out_scsi_host_put;
5748 }
5749
5750 ipr_regs_pci = pci_resource_start(pdev, 0);
5751
5752 rc = pci_request_regions(pdev, IPR_NAME);
5753 if (rc < 0) {
5754 dev_err(&pdev->dev,
5755 "Couldn't register memory range of registers\n");
5756 goto out_scsi_host_put;
5757 }
5758
5759 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
5760
5761 if (!ipr_regs) {
5762 dev_err(&pdev->dev,
5763 "Couldn't map memory range of registers\n");
5764 rc = -ENOMEM;
5765 goto out_release_regions;
5766 }
5767
5768 ioa_cfg->hdw_dma_regs = ipr_regs;
5769 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
5770 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
5771
5772 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
5773
5774 pci_set_master(pdev);
5775
5776 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
5777 if (rc < 0) {
5778 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5779 goto cleanup_nomem;
5780 }
5781
5782 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5783 ioa_cfg->chip_cfg->cache_line_size);
5784
5785 if (rc != PCIBIOS_SUCCESSFUL) {
5786 dev_err(&pdev->dev, "Write of cache line size failed\n");
5787 rc = -EIO;
5788 goto cleanup_nomem;
5789 }
5790
5791 /* Save away PCI config space for use following IOA reset */
5792 rc = pci_save_state(pdev);
5793
5794 if (rc != PCIBIOS_SUCCESSFUL) {
5795 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5796 rc = -EIO;
5797 goto cleanup_nomem;
5798 }
5799
5800 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
5801 goto cleanup_nomem;
5802
5803 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
5804 goto cleanup_nomem;
5805
5806 rc = ipr_alloc_mem(ioa_cfg);
5807 if (rc < 0) {
5808 dev_err(&pdev->dev,
5809 "Couldn't allocate enough memory for device driver!\n");
5810 goto cleanup_nomem;
5811 }
5812
5813 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
5814 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
5815
5816 if (rc) {
5817 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
5818 pdev->irq, rc);
5819 goto cleanup_nolog;
5820 }
5821
5822 spin_lock(&ipr_driver_lock);
5823 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
5824 spin_unlock(&ipr_driver_lock);
5825
5826 LEAVE;
5827out:
5828 return rc;
5829
5830cleanup_nolog:
5831 ipr_free_mem(ioa_cfg);
5832cleanup_nomem:
5833 iounmap(ipr_regs);
5834out_release_regions:
5835 pci_release_regions(pdev);
5836out_scsi_host_put:
5837 scsi_host_put(host);
5838out_disable:
5839 pci_disable_device(pdev);
5840 goto out;
5841}
5842
5843/**
5844 * ipr_scan_vsets - Scans for VSET devices
5845 * @ioa_cfg: ioa config struct
5846 *
5847 * Description: Since the VSET resources do not follow SAM in that we can have
5848 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
5849 *
5850 * Return value:
5851 * none
5852 **/
5853static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
5854{
5855 int target, lun;
5856
5857 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
5858 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
5859 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
5860}
5861
5862/**
5863 * ipr_initiate_ioa_bringdown - Bring down an adapter
5864 * @ioa_cfg: ioa config struct
5865 * @shutdown_type: shutdown type
5866 *
5867 * Description: This function will initiate bringing down the adapter.
5868 * This consists of issuing an IOA shutdown to the adapter
5869 * to flush the cache, and running BIST.
5870 * If the caller needs to wait on the completion of the reset,
5871 * the caller must sleep on the reset_wait_q.
5872 *
5873 * Return value:
5874 * none
5875 **/
5876static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
5877 enum ipr_shutdown_type shutdown_type)
5878{
5879 ENTER;
5880 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5881 ioa_cfg->sdt_state = ABORT_DUMP;
5882 ioa_cfg->reset_retries = 0;
5883 ioa_cfg->in_ioa_bringdown = 1;
5884 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
5885 LEAVE;
5886}
5887
5888/**
5889 * __ipr_remove - Remove a single adapter
5890 * @pdev: pci device struct
5891 *
5892 * Adapter hot plug remove entry point.
5893 *
5894 * Return value:
5895 * none
5896 **/
5897static void __ipr_remove(struct pci_dev *pdev)
5898{
5899 unsigned long host_lock_flags = 0;
5900 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5901 ENTER;
5902
5903 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5904 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
5905
5906 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5907 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
brking@us.ibm.com 5cbf5ea2005-05-02 19:50:47 -05005908 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005909 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5910
5911 spin_lock(&ipr_driver_lock);
5912 list_del(&ioa_cfg->queue);
5913 spin_unlock(&ipr_driver_lock);
5914
5915 if (ioa_cfg->sdt_state == ABORT_DUMP)
5916 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
5917 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5918
5919 ipr_free_all_resources(ioa_cfg);
5920
5921 LEAVE;
5922}
5923
5924/**
5925 * ipr_remove - IOA hot plug remove entry point
5926 * @pdev: pci device struct
5927 *
5928 * Adapter hot plug remove entry point.
5929 *
5930 * Return value:
5931 * none
5932 **/
5933static void ipr_remove(struct pci_dev *pdev)
5934{
5935 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5936
5937 ENTER;
5938
Linus Torvalds1da177e2005-04-16 15:20:36 -07005939 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5940 &ipr_trace_attr);
5941 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5942 &ipr_dump_attr);
5943 scsi_remove_host(ioa_cfg->host);
5944
5945 __ipr_remove(pdev);
5946
5947 LEAVE;
5948}
5949
5950/**
5951 * ipr_probe - Adapter hot plug add entry point
5952 *
5953 * Return value:
5954 * 0 on success / non-zero on failure
5955 **/
5956static int __devinit ipr_probe(struct pci_dev *pdev,
5957 const struct pci_device_id *dev_id)
5958{
5959 struct ipr_ioa_cfg *ioa_cfg;
5960 int rc;
5961
5962 rc = ipr_probe_ioa(pdev, dev_id);
5963
5964 if (rc)
5965 return rc;
5966
5967 ioa_cfg = pci_get_drvdata(pdev);
5968 rc = ipr_probe_ioa_part2(ioa_cfg);
5969
5970 if (rc) {
5971 __ipr_remove(pdev);
5972 return rc;
5973 }
5974
5975 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
5976
5977 if (rc) {
5978 __ipr_remove(pdev);
5979 return rc;
5980 }
5981
5982 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5983 &ipr_trace_attr);
5984
5985 if (rc) {
5986 scsi_remove_host(ioa_cfg->host);
5987 __ipr_remove(pdev);
5988 return rc;
5989 }
5990
5991 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5992 &ipr_dump_attr);
5993
5994 if (rc) {
5995 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5996 &ipr_trace_attr);
5997 scsi_remove_host(ioa_cfg->host);
5998 __ipr_remove(pdev);
5999 return rc;
6000 }
6001
6002 scsi_scan_host(ioa_cfg->host);
6003 ipr_scan_vsets(ioa_cfg);
6004 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6005 ioa_cfg->allow_ml_add_del = 1;
brking@us.ibm.com11cd8f12005-11-01 17:00:11 -06006006 ioa_cfg->host->max_channel = IPR_VSET_BUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006007 schedule_work(&ioa_cfg->work_q);
6008 return 0;
6009}
6010
6011/**
6012 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006013 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006014 *
6015 * This function is invoked upon system shutdown/reboot. It will issue
6016 * an adapter shutdown to the adapter to flush the write cache.
6017 *
6018 * Return value:
6019 * none
6020 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006021static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006022{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006023 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006024 unsigned long lock_flags = 0;
6025
6026 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6027 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6028 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6029 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6030}
6031
6032static struct pci_device_id ipr_pci_table[] __devinitdata = {
6033 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6034 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6035 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6036 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6037 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6038 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6039 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6040 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6041 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6042 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6043 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6044 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6045 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6046 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6047 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6048 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6049 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6050 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6051 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6052 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6053 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6054 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6055 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6056 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6057 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6058 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6059 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6060 { }
6061};
6062MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6063
6064static struct pci_driver ipr_driver = {
6065 .name = IPR_NAME,
6066 .id_table = ipr_pci_table,
6067 .probe = ipr_probe,
6068 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006069 .shutdown = ipr_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006070};
6071
6072/**
6073 * ipr_init - Module entry point
6074 *
6075 * Return value:
6076 * 0 on success / negative value on failure
6077 **/
6078static int __init ipr_init(void)
6079{
6080 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6081 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6082
6083 return pci_module_init(&ipr_driver);
6084}
6085
6086/**
6087 * ipr_exit - Module unload
6088 *
6089 * Module unload entry point.
6090 *
6091 * Return value:
6092 * none
6093 **/
6094static void __exit ipr_exit(void)
6095{
6096 pci_unregister_driver(&ipr_driver);
6097}
6098
6099module_init(ipr_init);
6100module_exit(ipr_exit);