blob: eae61d994b90b10b1cafbc951355ce396fcd871b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57#include <linux/config.h>
58#include <linux/fs.h>
59#include <linux/init.h>
60#include <linux/types.h>
61#include <linux/errno.h>
62#include <linux/kernel.h>
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_request.h>
83#include "ipr.h"
84
85/*
86 * Global Data
87 */
88static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
brking@us.ibm.com62275042005-11-01 17:01:14 -060094static unsigned int ipr_enable_cache = 1;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060095static unsigned int ipr_debug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096static DEFINE_SPINLOCK(ipr_driver_lock);
97
98/* This table describes the differences between DMA controller chips */
99static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
100 { /* Gemstone and Citrine */
101 .mailbox = 0x0042C,
102 .cache_line_size = 0x20,
103 {
104 .set_interrupt_mask_reg = 0x0022C,
105 .clr_interrupt_mask_reg = 0x00230,
106 .sense_interrupt_mask_reg = 0x0022C,
107 .clr_interrupt_reg = 0x00228,
108 .sense_interrupt_reg = 0x00224,
109 .ioarrin_reg = 0x00404,
110 .sense_uproc_interrupt_reg = 0x00214,
111 .set_uproc_interrupt_reg = 0x00214,
112 .clr_uproc_interrupt_reg = 0x00218
113 }
114 },
115 { /* Snipe and Scamp */
116 .mailbox = 0x0052C,
117 .cache_line_size = 0x20,
118 {
119 .set_interrupt_mask_reg = 0x00288,
120 .clr_interrupt_mask_reg = 0x0028C,
121 .sense_interrupt_mask_reg = 0x00288,
122 .clr_interrupt_reg = 0x00284,
123 .sense_interrupt_reg = 0x00280,
124 .ioarrin_reg = 0x00504,
125 .sense_uproc_interrupt_reg = 0x00290,
126 .set_uproc_interrupt_reg = 0x00290,
127 .clr_uproc_interrupt_reg = 0x00294
128 }
129 },
130};
131
132static const struct ipr_chip_t ipr_chip[] = {
133 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
136 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
137};
138
139static int ipr_max_bus_speeds [] = {
140 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
141};
142
143MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
144MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
145module_param_named(max_speed, ipr_max_speed, uint, 0);
146MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
147module_param_named(log_level, ipr_log_level, uint, 0);
148MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
149module_param_named(testmode, ipr_testmode, int, 0);
150MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
151module_param_named(fastfail, ipr_fastfail, int, 0);
152MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
153module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
154MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
brking@us.ibm.com62275042005-11-01 17:01:14 -0600155module_param_named(enable_cache, ipr_enable_cache, int, 0);
156MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600157module_param_named(debug, ipr_debug, int, 0);
158MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159MODULE_LICENSE("GPL");
160MODULE_VERSION(IPR_DRIVER_VERSION);
161
162static const char *ipr_gpdd_dev_end_states[] = {
163 "Command complete",
164 "Terminated by host",
165 "Terminated by device reset",
166 "Terminated by bus reset",
167 "Unknown",
168 "Command not started"
169};
170
171static const char *ipr_gpdd_dev_bus_phases[] = {
172 "Bus free",
173 "Arbitration",
174 "Selection",
175 "Message out",
176 "Command",
177 "Message in",
178 "Data out",
179 "Data in",
180 "Status",
181 "Reselection",
182 "Unknown"
183};
184
185/* A constant array of IOASCs/URCs/Error Messages */
186static const
187struct ipr_error_table_t ipr_error_table[] = {
188 {0x00000000, 1, 1,
189 "8155: An unknown error was received"},
190 {0x00330000, 0, 0,
191 "Soft underlength error"},
192 {0x005A0000, 0, 0,
193 "Command to be cancelled not found"},
194 {0x00808000, 0, 0,
195 "Qualified success"},
196 {0x01080000, 1, 1,
197 "FFFE: Soft device bus error recovered by the IOA"},
198 {0x01170600, 0, 1,
199 "FFF9: Device sector reassign successful"},
200 {0x01170900, 0, 1,
201 "FFF7: Media error recovered by device rewrite procedures"},
202 {0x01180200, 0, 1,
203 "7001: IOA sector reassignment successful"},
204 {0x01180500, 0, 1,
205 "FFF9: Soft media error. Sector reassignment recommended"},
206 {0x01180600, 0, 1,
207 "FFF7: Media error recovered by IOA rewrite procedures"},
208 {0x01418000, 0, 1,
209 "FF3D: Soft PCI bus error recovered by the IOA"},
210 {0x01440000, 1, 1,
211 "FFF6: Device hardware error recovered by the IOA"},
212 {0x01448100, 0, 1,
213 "FFF6: Device hardware error recovered by the device"},
214 {0x01448200, 1, 1,
215 "FF3D: Soft IOA error recovered by the IOA"},
216 {0x01448300, 0, 1,
217 "FFFA: Undefined device response recovered by the IOA"},
218 {0x014A0000, 1, 1,
219 "FFF6: Device bus error, message or command phase"},
220 {0x015D0000, 0, 1,
221 "FFF6: Failure prediction threshold exceeded"},
222 {0x015D9200, 0, 1,
223 "8009: Impending cache battery pack failure"},
224 {0x02040400, 0, 0,
225 "34FF: Disk device format in progress"},
226 {0x023F0000, 0, 0,
227 "Synchronization required"},
228 {0x024E0000, 0, 0,
229 "No ready, IOA shutdown"},
230 {0x025A0000, 0, 0,
231 "Not ready, IOA has been shutdown"},
232 {0x02670100, 0, 1,
233 "3020: Storage subsystem configuration error"},
234 {0x03110B00, 0, 0,
235 "FFF5: Medium error, data unreadable, recommend reassign"},
236 {0x03110C00, 0, 0,
237 "7000: Medium error, data unreadable, do not reassign"},
238 {0x03310000, 0, 1,
239 "FFF3: Disk media format bad"},
240 {0x04050000, 0, 1,
241 "3002: Addressed device failed to respond to selection"},
242 {0x04080000, 1, 1,
243 "3100: Device bus error"},
244 {0x04080100, 0, 1,
245 "3109: IOA timed out a device command"},
246 {0x04088000, 0, 0,
247 "3120: SCSI bus is not operational"},
248 {0x04118000, 0, 1,
249 "9000: IOA reserved area data check"},
250 {0x04118100, 0, 1,
251 "9001: IOA reserved area invalid data pattern"},
252 {0x04118200, 0, 1,
253 "9002: IOA reserved area LRC error"},
254 {0x04320000, 0, 1,
255 "102E: Out of alternate sectors for disk storage"},
256 {0x04330000, 1, 1,
257 "FFF4: Data transfer underlength error"},
258 {0x04338000, 1, 1,
259 "FFF4: Data transfer overlength error"},
260 {0x043E0100, 0, 1,
261 "3400: Logical unit failure"},
262 {0x04408500, 0, 1,
263 "FFF4: Device microcode is corrupt"},
264 {0x04418000, 1, 1,
265 "8150: PCI bus error"},
266 {0x04430000, 1, 0,
267 "Unsupported device bus message received"},
268 {0x04440000, 1, 1,
269 "FFF4: Disk device problem"},
270 {0x04448200, 1, 1,
271 "8150: Permanent IOA failure"},
272 {0x04448300, 0, 1,
273 "3010: Disk device returned wrong response to IOA"},
274 {0x04448400, 0, 1,
275 "8151: IOA microcode error"},
276 {0x04448500, 0, 0,
277 "Device bus status error"},
278 {0x04448600, 0, 1,
279 "8157: IOA error requiring IOA reset to recover"},
280 {0x04490000, 0, 0,
281 "Message reject received from the device"},
282 {0x04449200, 0, 1,
283 "8008: A permanent cache battery pack failure occurred"},
284 {0x0444A000, 0, 1,
285 "9090: Disk unit has been modified after the last known status"},
286 {0x0444A200, 0, 1,
287 "9081: IOA detected device error"},
288 {0x0444A300, 0, 1,
289 "9082: IOA detected device error"},
290 {0x044A0000, 1, 1,
291 "3110: Device bus error, message or command phase"},
292 {0x04670400, 0, 1,
293 "9091: Incorrect hardware configuration change has been detected"},
294 {0x046E0000, 0, 1,
295 "FFF4: Command to logical unit failed"},
296 {0x05240000, 1, 0,
297 "Illegal request, invalid request type or request packet"},
298 {0x05250000, 0, 0,
299 "Illegal request, invalid resource handle"},
300 {0x05260000, 0, 0,
301 "Illegal request, invalid field in parameter list"},
302 {0x05260100, 0, 0,
303 "Illegal request, parameter not supported"},
304 {0x05260200, 0, 0,
305 "Illegal request, parameter value invalid"},
306 {0x052C0000, 0, 0,
307 "Illegal request, command sequence error"},
308 {0x06040500, 0, 1,
309 "9031: Array protection temporarily suspended, protection resuming"},
310 {0x06040600, 0, 1,
311 "9040: Array protection temporarily suspended, protection resuming"},
312 {0x06290000, 0, 1,
313 "FFFB: SCSI bus was reset"},
314 {0x06290500, 0, 0,
315 "FFFE: SCSI bus transition to single ended"},
316 {0x06290600, 0, 0,
317 "FFFE: SCSI bus transition to LVD"},
318 {0x06298000, 0, 1,
319 "FFFB: SCSI bus was reset by another initiator"},
320 {0x063F0300, 0, 1,
321 "3029: A device replacement has occurred"},
322 {0x064C8000, 0, 1,
323 "9051: IOA cache data exists for a missing or failed device"},
324 {0x06670100, 0, 1,
325 "9025: Disk unit is not supported at its physical location"},
326 {0x06670600, 0, 1,
327 "3020: IOA detected a SCSI bus configuration error"},
328 {0x06678000, 0, 1,
329 "3150: SCSI bus configuration error"},
330 {0x06690200, 0, 1,
331 "9041: Array protection temporarily suspended"},
332 {0x06698200, 0, 1,
333 "9042: Corrupt array parity detected on specified device"},
334 {0x066B0200, 0, 1,
335 "9030: Array no longer protected due to missing or failed disk unit"},
336 {0x066B8200, 0, 1,
337 "9032: Array exposed but still protected"},
338 {0x07270000, 0, 0,
339 "Failure due to other device"},
340 {0x07278000, 0, 1,
341 "9008: IOA does not support functions expected by devices"},
342 {0x07278100, 0, 1,
343 "9010: Cache data associated with attached devices cannot be found"},
344 {0x07278200, 0, 1,
345 "9011: Cache data belongs to devices other than those attached"},
346 {0x07278400, 0, 1,
347 "9020: Array missing 2 or more devices with only 1 device present"},
348 {0x07278500, 0, 1,
349 "9021: Array missing 2 or more devices with 2 or more devices present"},
350 {0x07278600, 0, 1,
351 "9022: Exposed array is missing a required device"},
352 {0x07278700, 0, 1,
353 "9023: Array member(s) not at required physical locations"},
354 {0x07278800, 0, 1,
355 "9024: Array not functional due to present hardware configuration"},
356 {0x07278900, 0, 1,
357 "9026: Array not functional due to present hardware configuration"},
358 {0x07278A00, 0, 1,
359 "9027: Array is missing a device and parity is out of sync"},
360 {0x07278B00, 0, 1,
361 "9028: Maximum number of arrays already exist"},
362 {0x07278C00, 0, 1,
363 "9050: Required cache data cannot be located for a disk unit"},
364 {0x07278D00, 0, 1,
365 "9052: Cache data exists for a device that has been modified"},
366 {0x07278F00, 0, 1,
367 "9054: IOA resources not available due to previous problems"},
368 {0x07279100, 0, 1,
369 "9092: Disk unit requires initialization before use"},
370 {0x07279200, 0, 1,
371 "9029: Incorrect hardware configuration change has been detected"},
372 {0x07279600, 0, 1,
373 "9060: One or more disk pairs are missing from an array"},
374 {0x07279700, 0, 1,
375 "9061: One or more disks are missing from an array"},
376 {0x07279800, 0, 1,
377 "9062: One or more disks are missing from an array"},
378 {0x07279900, 0, 1,
379 "9063: Maximum number of functional arrays has been exceeded"},
380 {0x0B260000, 0, 0,
381 "Aborted command, invalid descriptor"},
382 {0x0B5A0000, 0, 0,
383 "Command terminated by host"}
384};
385
386static const struct ipr_ses_table_entry ipr_ses_table[] = {
387 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
388 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
389 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
390 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
391 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
392 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
393 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
394 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
395 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
396 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
397 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
398 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
399 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
400};
401
402/*
403 * Function Prototypes
404 */
405static int ipr_reset_alert(struct ipr_cmnd *);
406static void ipr_process_ccn(struct ipr_cmnd *);
407static void ipr_process_error(struct ipr_cmnd *);
408static void ipr_reset_ioa_job(struct ipr_cmnd *);
409static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
410 enum ipr_shutdown_type);
411
412#ifdef CONFIG_SCSI_IPR_TRACE
413/**
414 * ipr_trc_hook - Add a trace entry to the driver trace
415 * @ipr_cmd: ipr command struct
416 * @type: trace type
417 * @add_data: additional data
418 *
419 * Return value:
420 * none
421 **/
422static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
423 u8 type, u32 add_data)
424{
425 struct ipr_trace_entry *trace_entry;
426 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
427
428 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
429 trace_entry->time = jiffies;
430 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
431 trace_entry->type = type;
432 trace_entry->cmd_index = ipr_cmd->cmd_index;
433 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
434 trace_entry->u.add_data = add_data;
435}
436#else
437#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
438#endif
439
440/**
441 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
442 * @ipr_cmd: ipr command struct
443 *
444 * Return value:
445 * none
446 **/
447static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
448{
449 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
450 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
451
452 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
453 ioarcb->write_data_transfer_length = 0;
454 ioarcb->read_data_transfer_length = 0;
455 ioarcb->write_ioadl_len = 0;
456 ioarcb->read_ioadl_len = 0;
457 ioasa->ioasc = 0;
458 ioasa->residual_data_len = 0;
459
460 ipr_cmd->scsi_cmd = NULL;
461 ipr_cmd->sense_buffer[0] = 0;
462 ipr_cmd->dma_use_sg = 0;
463}
464
465/**
466 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
467 * @ipr_cmd: ipr command struct
468 *
469 * Return value:
470 * none
471 **/
472static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
473{
474 ipr_reinit_ipr_cmnd(ipr_cmd);
475 ipr_cmd->u.scratch = 0;
476 ipr_cmd->sibling = NULL;
477 init_timer(&ipr_cmd->timer);
478}
479
480/**
481 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
482 * @ioa_cfg: ioa config struct
483 *
484 * Return value:
485 * pointer to ipr command struct
486 **/
487static
488struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
489{
490 struct ipr_cmnd *ipr_cmd;
491
492 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
493 list_del(&ipr_cmd->queue);
494 ipr_init_ipr_cmnd(ipr_cmd);
495
496 return ipr_cmd;
497}
498
499/**
500 * ipr_unmap_sglist - Unmap scatterlist if mapped
501 * @ioa_cfg: ioa config struct
502 * @ipr_cmd: ipr command struct
503 *
504 * Return value:
505 * nothing
506 **/
507static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
508 struct ipr_cmnd *ipr_cmd)
509{
510 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
511
512 if (ipr_cmd->dma_use_sg) {
513 if (scsi_cmd->use_sg > 0) {
514 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
515 scsi_cmd->use_sg,
516 scsi_cmd->sc_data_direction);
517 } else {
518 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
519 scsi_cmd->request_bufflen,
520 scsi_cmd->sc_data_direction);
521 }
522 }
523}
524
525/**
526 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
527 * @ioa_cfg: ioa config struct
528 * @clr_ints: interrupts to clear
529 *
530 * This function masks all interrupts on the adapter, then clears the
531 * interrupts specified in the mask
532 *
533 * Return value:
534 * none
535 **/
536static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
537 u32 clr_ints)
538{
539 volatile u32 int_reg;
540
541 /* Stop new interrupts */
542 ioa_cfg->allow_interrupts = 0;
543
544 /* Set interrupt mask to stop all new interrupts */
545 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
546
547 /* Clear any pending interrupts */
548 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
549 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
550}
551
552/**
553 * ipr_save_pcix_cmd_reg - Save PCI-X command register
554 * @ioa_cfg: ioa config struct
555 *
556 * Return value:
557 * 0 on success / -EIO on failure
558 **/
559static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
560{
561 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
562
563 if (pcix_cmd_reg == 0) {
564 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
565 return -EIO;
566 }
567
568 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
569 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
570 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
571 return -EIO;
572 }
573
574 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
575 return 0;
576}
577
578/**
579 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
580 * @ioa_cfg: ioa config struct
581 *
582 * Return value:
583 * 0 on success / -EIO on failure
584 **/
585static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
586{
587 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
588
589 if (pcix_cmd_reg) {
590 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
591 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
592 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
593 return -EIO;
594 }
595 } else {
596 dev_err(&ioa_cfg->pdev->dev,
597 "Failed to setup PCI-X command register\n");
598 return -EIO;
599 }
600
601 return 0;
602}
603
604/**
605 * ipr_scsi_eh_done - mid-layer done function for aborted ops
606 * @ipr_cmd: ipr command struct
607 *
608 * This function is invoked by the interrupt handler for
609 * ops generated by the SCSI mid-layer which are being aborted.
610 *
611 * Return value:
612 * none
613 **/
614static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
615{
616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
617 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
618
619 scsi_cmd->result |= (DID_ERROR << 16);
620
621 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
622 scsi_cmd->scsi_done(scsi_cmd);
623 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
624}
625
626/**
627 * ipr_fail_all_ops - Fails all outstanding ops.
628 * @ioa_cfg: ioa config struct
629 *
630 * This function fails all outstanding ops.
631 *
632 * Return value:
633 * none
634 **/
635static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
636{
637 struct ipr_cmnd *ipr_cmd, *temp;
638
639 ENTER;
640 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
641 list_del(&ipr_cmd->queue);
642
643 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
644 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
645
646 if (ipr_cmd->scsi_cmd)
647 ipr_cmd->done = ipr_scsi_eh_done;
648
649 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
650 del_timer(&ipr_cmd->timer);
651 ipr_cmd->done(ipr_cmd);
652 }
653
654 LEAVE;
655}
656
657/**
658 * ipr_do_req - Send driver initiated requests.
659 * @ipr_cmd: ipr command struct
660 * @done: done function
661 * @timeout_func: timeout function
662 * @timeout: timeout value
663 *
664 * This function sends the specified command to the adapter with the
665 * timeout given. The done function is invoked on command completion.
666 *
667 * Return value:
668 * none
669 **/
670static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
671 void (*done) (struct ipr_cmnd *),
672 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
673{
674 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
675
676 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
677
678 ipr_cmd->done = done;
679
680 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
681 ipr_cmd->timer.expires = jiffies + timeout;
682 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
683
684 add_timer(&ipr_cmd->timer);
685
686 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
687
688 mb();
689 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
690 ioa_cfg->regs.ioarrin_reg);
691}
692
693/**
694 * ipr_internal_cmd_done - Op done function for an internally generated op.
695 * @ipr_cmd: ipr command struct
696 *
697 * This function is the op done function for an internally generated,
698 * blocking op. It simply wakes the sleeping thread.
699 *
700 * Return value:
701 * none
702 **/
703static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
704{
705 if (ipr_cmd->sibling)
706 ipr_cmd->sibling = NULL;
707 else
708 complete(&ipr_cmd->completion);
709}
710
711/**
712 * ipr_send_blocking_cmd - Send command and sleep on its completion.
713 * @ipr_cmd: ipr command struct
714 * @timeout_func: function to invoke if command times out
715 * @timeout: timeout
716 *
717 * Return value:
718 * none
719 **/
720static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
721 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
722 u32 timeout)
723{
724 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
725
726 init_completion(&ipr_cmd->completion);
727 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
728
729 spin_unlock_irq(ioa_cfg->host->host_lock);
730 wait_for_completion(&ipr_cmd->completion);
731 spin_lock_irq(ioa_cfg->host->host_lock);
732}
733
734/**
735 * ipr_send_hcam - Send an HCAM to the adapter.
736 * @ioa_cfg: ioa config struct
737 * @type: HCAM type
738 * @hostrcb: hostrcb struct
739 *
740 * This function will send a Host Controlled Async command to the adapter.
741 * If HCAMs are currently not allowed to be issued to the adapter, it will
742 * place the hostrcb on the free queue.
743 *
744 * Return value:
745 * none
746 **/
747static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
748 struct ipr_hostrcb *hostrcb)
749{
750 struct ipr_cmnd *ipr_cmd;
751 struct ipr_ioarcb *ioarcb;
752
753 if (ioa_cfg->allow_cmds) {
754 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
755 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
756 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
757
758 ipr_cmd->u.hostrcb = hostrcb;
759 ioarcb = &ipr_cmd->ioarcb;
760
761 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
762 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
763 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
764 ioarcb->cmd_pkt.cdb[1] = type;
765 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
766 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
767
768 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
769 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
770 ipr_cmd->ioadl[0].flags_and_data_len =
771 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
772 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
773
774 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
775 ipr_cmd->done = ipr_process_ccn;
776 else
777 ipr_cmd->done = ipr_process_error;
778
779 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
780
781 mb();
782 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
783 ioa_cfg->regs.ioarrin_reg);
784 } else {
785 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
786 }
787}
788
789/**
790 * ipr_init_res_entry - Initialize a resource entry struct.
791 * @res: resource entry struct
792 *
793 * Return value:
794 * none
795 **/
796static void ipr_init_res_entry(struct ipr_resource_entry *res)
797{
798 res->needs_sync_complete = 1;
799 res->in_erp = 0;
800 res->add_to_ml = 0;
801 res->del_from_ml = 0;
802 res->resetting_device = 0;
803 res->sdev = NULL;
804}
805
806/**
807 * ipr_handle_config_change - Handle a config change from the adapter
808 * @ioa_cfg: ioa config struct
809 * @hostrcb: hostrcb
810 *
811 * Return value:
812 * none
813 **/
814static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
815 struct ipr_hostrcb *hostrcb)
816{
817 struct ipr_resource_entry *res = NULL;
818 struct ipr_config_table_entry *cfgte;
819 u32 is_ndn = 1;
820
821 cfgte = &hostrcb->hcam.u.ccn.cfgte;
822
823 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
824 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
825 sizeof(cfgte->res_addr))) {
826 is_ndn = 0;
827 break;
828 }
829 }
830
831 if (is_ndn) {
832 if (list_empty(&ioa_cfg->free_res_q)) {
833 ipr_send_hcam(ioa_cfg,
834 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
835 hostrcb);
836 return;
837 }
838
839 res = list_entry(ioa_cfg->free_res_q.next,
840 struct ipr_resource_entry, queue);
841
842 list_del(&res->queue);
843 ipr_init_res_entry(res);
844 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
845 }
846
847 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
848
849 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
850 if (res->sdev) {
851 res->sdev->hostdata = NULL;
852 res->del_from_ml = 1;
853 if (ioa_cfg->allow_ml_add_del)
854 schedule_work(&ioa_cfg->work_q);
855 } else
856 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
857 } else if (!res->sdev) {
858 res->add_to_ml = 1;
859 if (ioa_cfg->allow_ml_add_del)
860 schedule_work(&ioa_cfg->work_q);
861 }
862
863 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
864}
865
866/**
867 * ipr_process_ccn - Op done function for a CCN.
868 * @ipr_cmd: ipr command struct
869 *
870 * This function is the op done function for a configuration
871 * change notification host controlled async from the adapter.
872 *
873 * Return value:
874 * none
875 **/
876static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
877{
878 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
879 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
880 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
881
882 list_del(&hostrcb->queue);
883 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
884
885 if (ioasc) {
886 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
887 dev_err(&ioa_cfg->pdev->dev,
888 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
889
890 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
891 } else {
892 ipr_handle_config_change(ioa_cfg, hostrcb);
893 }
894}
895
896/**
897 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600898 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 *
900 * Return value:
901 * none
902 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600903static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904{
905 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
906 + IPR_SERIAL_NUM_LEN];
907
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600908 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
909 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 IPR_PROD_ID_LEN);
911 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
912 ipr_err("Vendor/Product ID: %s\n", buffer);
913
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600914 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 buffer[IPR_SERIAL_NUM_LEN] = '\0';
916 ipr_err(" Serial Number: %s\n", buffer);
917}
918
919/**
920 * ipr_log_cache_error - Log a cache error.
921 * @ioa_cfg: ioa config struct
922 * @hostrcb: hostrcb struct
923 *
924 * Return value:
925 * none
926 **/
927static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
928 struct ipr_hostrcb *hostrcb)
929{
930 struct ipr_hostrcb_type_02_error *error =
931 &hostrcb->hcam.u.error.u.type_02_error;
932
933 ipr_err("-----Current Configuration-----\n");
934 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600935 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600937 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 ipr_err("-----Expected Configuration-----\n");
940 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600941 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600943 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
945 ipr_err("Additional IOA Data: %08X %08X %08X\n",
946 be32_to_cpu(error->ioa_data[0]),
947 be32_to_cpu(error->ioa_data[1]),
948 be32_to_cpu(error->ioa_data[2]));
949}
950
951/**
952 * ipr_log_config_error - Log a configuration error.
953 * @ioa_cfg: ioa config struct
954 * @hostrcb: hostrcb struct
955 *
956 * Return value:
957 * none
958 **/
959static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
960 struct ipr_hostrcb *hostrcb)
961{
962 int errors_logged, i;
963 struct ipr_hostrcb_device_data_entry *dev_entry;
964 struct ipr_hostrcb_type_03_error *error;
965
966 error = &hostrcb->hcam.u.error.u.type_03_error;
967 errors_logged = be32_to_cpu(error->errors_logged);
968
969 ipr_err("Device Errors Detected/Logged: %d/%d\n",
970 be32_to_cpu(error->errors_detected), errors_logged);
971
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600972 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
974 for (i = 0; i < errors_logged; i++, dev_entry++) {
975 ipr_err_separator;
976
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -0600977 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600978 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600981 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600984 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
986 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600987 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
989 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
990 be32_to_cpu(dev_entry->ioa_data[0]),
991 be32_to_cpu(dev_entry->ioa_data[1]),
992 be32_to_cpu(dev_entry->ioa_data[2]),
993 be32_to_cpu(dev_entry->ioa_data[3]),
994 be32_to_cpu(dev_entry->ioa_data[4]));
995 }
996}
997
998/**
999 * ipr_log_array_error - Log an array configuration error.
1000 * @ioa_cfg: ioa config struct
1001 * @hostrcb: hostrcb struct
1002 *
1003 * Return value:
1004 * none
1005 **/
1006static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1007 struct ipr_hostrcb *hostrcb)
1008{
1009 int i;
1010 struct ipr_hostrcb_type_04_error *error;
1011 struct ipr_hostrcb_array_data_entry *array_entry;
1012 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1013
1014 error = &hostrcb->hcam.u.error.u.type_04_error;
1015
1016 ipr_err_separator;
1017
1018 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1019 error->protection_level,
1020 ioa_cfg->host->host_no,
1021 error->last_func_vset_res_addr.bus,
1022 error->last_func_vset_res_addr.target,
1023 error->last_func_vset_res_addr.lun);
1024
1025 ipr_err_separator;
1026
1027 array_entry = error->array_member;
1028
1029 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001030 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 continue;
1032
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001033 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001035 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001038 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001040 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1041 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1042 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
1044 ipr_err_separator;
1045
1046 if (i == 9)
1047 array_entry = error->array_member2;
1048 else
1049 array_entry++;
1050 }
1051}
1052
1053/**
1054 * ipr_log_generic_error - Log an adapter error.
1055 * @ioa_cfg: ioa config struct
1056 * @hostrcb: hostrcb struct
1057 *
1058 * Return value:
1059 * none
1060 **/
1061static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1062 struct ipr_hostrcb *hostrcb)
1063{
1064 int i;
1065 int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
1066
1067 if (ioa_data_len == 0)
1068 return;
1069
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 for (i = 0; i < ioa_data_len / 4; i += 4) {
1071 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1072 be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
1073 be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
1074 be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
1075 be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
1076 }
1077}
1078
1079/**
1080 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1081 * @ioasc: IOASC
1082 *
1083 * This function will return the index of into the ipr_error_table
1084 * for the specified IOASC. If the IOASC is not in the table,
1085 * 0 will be returned, which points to the entry used for unknown errors.
1086 *
1087 * Return value:
1088 * index into the ipr_error_table
1089 **/
1090static u32 ipr_get_error(u32 ioasc)
1091{
1092 int i;
1093
1094 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1095 if (ipr_error_table[i].ioasc == ioasc)
1096 return i;
1097
1098 return 0;
1099}
1100
1101/**
1102 * ipr_handle_log_data - Log an adapter error.
1103 * @ioa_cfg: ioa config struct
1104 * @hostrcb: hostrcb struct
1105 *
1106 * This function logs an adapter error to the system.
1107 *
1108 * Return value:
1109 * none
1110 **/
1111static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1112 struct ipr_hostrcb *hostrcb)
1113{
1114 u32 ioasc;
1115 int error_index;
1116
1117 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1118 return;
1119
1120 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1121 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1122
1123 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1124
1125 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1126 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1127 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1128 scsi_report_bus_reset(ioa_cfg->host,
1129 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1130 }
1131
1132 error_index = ipr_get_error(ioasc);
1133
1134 if (!ipr_error_table[error_index].log_hcam)
1135 return;
1136
1137 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1138 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1139 "%s\n", ipr_error_table[error_index].error);
1140 } else {
1141 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1142 ipr_error_table[error_index].error);
1143 }
1144
1145 /* Set indication we have logged an error */
1146 ioa_cfg->errors_logged++;
1147
1148 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1149 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06001150 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1151 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152
1153 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 case IPR_HOST_RCB_OVERLAY_ID_2:
1155 ipr_log_cache_error(ioa_cfg, hostrcb);
1156 break;
1157 case IPR_HOST_RCB_OVERLAY_ID_3:
1158 ipr_log_config_error(ioa_cfg, hostrcb);
1159 break;
1160 case IPR_HOST_RCB_OVERLAY_ID_4:
1161 case IPR_HOST_RCB_OVERLAY_ID_6:
1162 ipr_log_array_error(ioa_cfg, hostrcb);
1163 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06001164 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06001167 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 break;
1169 }
1170}
1171
1172/**
1173 * ipr_process_error - Op done function for an adapter error log.
1174 * @ipr_cmd: ipr command struct
1175 *
1176 * This function is the op done function for an error log host
1177 * controlled async from the adapter. It will log the error and
1178 * send the HCAM back to the adapter.
1179 *
1180 * Return value:
1181 * none
1182 **/
1183static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1184{
1185 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1186 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1187 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1188
1189 list_del(&hostrcb->queue);
1190 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1191
1192 if (!ioasc) {
1193 ipr_handle_log_data(ioa_cfg, hostrcb);
1194 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1195 dev_err(&ioa_cfg->pdev->dev,
1196 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1197 }
1198
1199 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1200}
1201
1202/**
1203 * ipr_timeout - An internally generated op has timed out.
1204 * @ipr_cmd: ipr command struct
1205 *
1206 * This function blocks host requests and initiates an
1207 * adapter reset.
1208 *
1209 * Return value:
1210 * none
1211 **/
1212static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1213{
1214 unsigned long lock_flags = 0;
1215 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1216
1217 ENTER;
1218 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1219
1220 ioa_cfg->errors_logged++;
1221 dev_err(&ioa_cfg->pdev->dev,
1222 "Adapter being reset due to command timeout.\n");
1223
1224 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1225 ioa_cfg->sdt_state = GET_DUMP;
1226
1227 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1228 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1229
1230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1231 LEAVE;
1232}
1233
1234/**
1235 * ipr_oper_timeout - Adapter timed out transitioning to operational
1236 * @ipr_cmd: ipr command struct
1237 *
1238 * This function blocks host requests and initiates an
1239 * adapter reset.
1240 *
1241 * Return value:
1242 * none
1243 **/
1244static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1245{
1246 unsigned long lock_flags = 0;
1247 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1248
1249 ENTER;
1250 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1251
1252 ioa_cfg->errors_logged++;
1253 dev_err(&ioa_cfg->pdev->dev,
1254 "Adapter timed out transitioning to operational.\n");
1255
1256 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1257 ioa_cfg->sdt_state = GET_DUMP;
1258
1259 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1260 if (ipr_fastfail)
1261 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1262 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1263 }
1264
1265 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1266 LEAVE;
1267}
1268
1269/**
1270 * ipr_reset_reload - Reset/Reload the IOA
1271 * @ioa_cfg: ioa config struct
1272 * @shutdown_type: shutdown type
1273 *
1274 * This function resets the adapter and re-initializes it.
1275 * This function assumes that all new host commands have been stopped.
1276 * Return value:
1277 * SUCCESS / FAILED
1278 **/
1279static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1280 enum ipr_shutdown_type shutdown_type)
1281{
1282 if (!ioa_cfg->in_reset_reload)
1283 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1284
1285 spin_unlock_irq(ioa_cfg->host->host_lock);
1286 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1287 spin_lock_irq(ioa_cfg->host->host_lock);
1288
1289 /* If we got hit with a host reset while we were already resetting
1290 the adapter for some reason, and the reset failed. */
1291 if (ioa_cfg->ioa_is_dead) {
1292 ipr_trace;
1293 return FAILED;
1294 }
1295
1296 return SUCCESS;
1297}
1298
1299/**
1300 * ipr_find_ses_entry - Find matching SES in SES table
1301 * @res: resource entry struct of SES
1302 *
1303 * Return value:
1304 * pointer to SES table entry / NULL on failure
1305 **/
1306static const struct ipr_ses_table_entry *
1307ipr_find_ses_entry(struct ipr_resource_entry *res)
1308{
1309 int i, j, matches;
1310 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1311
1312 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1313 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1314 if (ste->compare_product_id_byte[j] == 'X') {
1315 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1316 matches++;
1317 else
1318 break;
1319 } else
1320 matches++;
1321 }
1322
1323 if (matches == IPR_PROD_ID_LEN)
1324 return ste;
1325 }
1326
1327 return NULL;
1328}
1329
1330/**
1331 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1332 * @ioa_cfg: ioa config struct
1333 * @bus: SCSI bus
1334 * @bus_width: bus width
1335 *
1336 * Return value:
1337 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1338 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1339 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1340 * max 160MHz = max 320MB/sec).
1341 **/
1342static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1343{
1344 struct ipr_resource_entry *res;
1345 const struct ipr_ses_table_entry *ste;
1346 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1347
1348 /* Loop through each config table entry in the config table buffer */
1349 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1350 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1351 continue;
1352
1353 if (bus != res->cfgte.res_addr.bus)
1354 continue;
1355
1356 if (!(ste = ipr_find_ses_entry(res)))
1357 continue;
1358
1359 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1360 }
1361
1362 return max_xfer_rate;
1363}
1364
1365/**
1366 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1367 * @ioa_cfg: ioa config struct
1368 * @max_delay: max delay in micro-seconds to wait
1369 *
1370 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1371 *
1372 * Return value:
1373 * 0 on success / other on failure
1374 **/
1375static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1376{
1377 volatile u32 pcii_reg;
1378 int delay = 1;
1379
1380 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1381 while (delay < max_delay) {
1382 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1383
1384 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1385 return 0;
1386
1387 /* udelay cannot be used if delay is more than a few milliseconds */
1388 if ((delay / 1000) > MAX_UDELAY_MS)
1389 mdelay(delay / 1000);
1390 else
1391 udelay(delay);
1392
1393 delay += delay;
1394 }
1395 return -EIO;
1396}
1397
1398/**
1399 * ipr_get_ldump_data_section - Dump IOA memory
1400 * @ioa_cfg: ioa config struct
1401 * @start_addr: adapter address to dump
1402 * @dest: destination kernel buffer
1403 * @length_in_words: length to dump in 4 byte words
1404 *
1405 * Return value:
1406 * 0 on success / -EIO on failure
1407 **/
1408static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1409 u32 start_addr,
1410 __be32 *dest, u32 length_in_words)
1411{
1412 volatile u32 temp_pcii_reg;
1413 int i, delay = 0;
1414
1415 /* Write IOA interrupt reg starting LDUMP state */
1416 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1417 ioa_cfg->regs.set_uproc_interrupt_reg);
1418
1419 /* Wait for IO debug acknowledge */
1420 if (ipr_wait_iodbg_ack(ioa_cfg,
1421 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1422 dev_err(&ioa_cfg->pdev->dev,
1423 "IOA dump long data transfer timeout\n");
1424 return -EIO;
1425 }
1426
1427 /* Signal LDUMP interlocked - clear IO debug ack */
1428 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1429 ioa_cfg->regs.clr_interrupt_reg);
1430
1431 /* Write Mailbox with starting address */
1432 writel(start_addr, ioa_cfg->ioa_mailbox);
1433
1434 /* Signal address valid - clear IOA Reset alert */
1435 writel(IPR_UPROCI_RESET_ALERT,
1436 ioa_cfg->regs.clr_uproc_interrupt_reg);
1437
1438 for (i = 0; i < length_in_words; i++) {
1439 /* Wait for IO debug acknowledge */
1440 if (ipr_wait_iodbg_ack(ioa_cfg,
1441 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1442 dev_err(&ioa_cfg->pdev->dev,
1443 "IOA dump short data transfer timeout\n");
1444 return -EIO;
1445 }
1446
1447 /* Read data from mailbox and increment destination pointer */
1448 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1449 dest++;
1450
1451 /* For all but the last word of data, signal data received */
1452 if (i < (length_in_words - 1)) {
1453 /* Signal dump data received - Clear IO debug Ack */
1454 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1455 ioa_cfg->regs.clr_interrupt_reg);
1456 }
1457 }
1458
1459 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1460 writel(IPR_UPROCI_RESET_ALERT,
1461 ioa_cfg->regs.set_uproc_interrupt_reg);
1462
1463 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1464 ioa_cfg->regs.clr_uproc_interrupt_reg);
1465
1466 /* Signal dump data received - Clear IO debug Ack */
1467 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1468 ioa_cfg->regs.clr_interrupt_reg);
1469
1470 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1471 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1472 temp_pcii_reg =
1473 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1474
1475 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1476 return 0;
1477
1478 udelay(10);
1479 delay += 10;
1480 }
1481
1482 return 0;
1483}
1484
1485#ifdef CONFIG_SCSI_IPR_DUMP
1486/**
1487 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1488 * @ioa_cfg: ioa config struct
1489 * @pci_address: adapter address
1490 * @length: length of data to copy
1491 *
1492 * Copy data from PCI adapter to kernel buffer.
1493 * Note: length MUST be a 4 byte multiple
1494 * Return value:
1495 * 0 on success / other on failure
1496 **/
1497static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1498 unsigned long pci_address, u32 length)
1499{
1500 int bytes_copied = 0;
1501 int cur_len, rc, rem_len, rem_page_len;
1502 __be32 *page;
1503 unsigned long lock_flags = 0;
1504 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1505
1506 while (bytes_copied < length &&
1507 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1508 if (ioa_dump->page_offset >= PAGE_SIZE ||
1509 ioa_dump->page_offset == 0) {
1510 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1511
1512 if (!page) {
1513 ipr_trace;
1514 return bytes_copied;
1515 }
1516
1517 ioa_dump->page_offset = 0;
1518 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1519 ioa_dump->next_page_index++;
1520 } else
1521 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1522
1523 rem_len = length - bytes_copied;
1524 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1525 cur_len = min(rem_len, rem_page_len);
1526
1527 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1528 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1529 rc = -EIO;
1530 } else {
1531 rc = ipr_get_ldump_data_section(ioa_cfg,
1532 pci_address + bytes_copied,
1533 &page[ioa_dump->page_offset / 4],
1534 (cur_len / sizeof(u32)));
1535 }
1536 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1537
1538 if (!rc) {
1539 ioa_dump->page_offset += cur_len;
1540 bytes_copied += cur_len;
1541 } else {
1542 ipr_trace;
1543 break;
1544 }
1545 schedule();
1546 }
1547
1548 return bytes_copied;
1549}
1550
1551/**
1552 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1553 * @hdr: dump entry header struct
1554 *
1555 * Return value:
1556 * nothing
1557 **/
1558static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1559{
1560 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1561 hdr->num_elems = 1;
1562 hdr->offset = sizeof(*hdr);
1563 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1564}
1565
1566/**
1567 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1568 * @ioa_cfg: ioa config struct
1569 * @driver_dump: driver dump struct
1570 *
1571 * Return value:
1572 * nothing
1573 **/
1574static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1575 struct ipr_driver_dump *driver_dump)
1576{
1577 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1578
1579 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1580 driver_dump->ioa_type_entry.hdr.len =
1581 sizeof(struct ipr_dump_ioa_type_entry) -
1582 sizeof(struct ipr_dump_entry_header);
1583 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1584 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1585 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1586 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1587 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1588 ucode_vpd->minor_release[1];
1589 driver_dump->hdr.num_entries++;
1590}
1591
1592/**
1593 * ipr_dump_version_data - Fill in the driver version in the dump.
1594 * @ioa_cfg: ioa config struct
1595 * @driver_dump: driver dump struct
1596 *
1597 * Return value:
1598 * nothing
1599 **/
1600static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1601 struct ipr_driver_dump *driver_dump)
1602{
1603 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1604 driver_dump->version_entry.hdr.len =
1605 sizeof(struct ipr_dump_version_entry) -
1606 sizeof(struct ipr_dump_entry_header);
1607 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1608 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1609 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1610 driver_dump->hdr.num_entries++;
1611}
1612
1613/**
1614 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1615 * @ioa_cfg: ioa config struct
1616 * @driver_dump: driver dump struct
1617 *
1618 * Return value:
1619 * nothing
1620 **/
1621static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1622 struct ipr_driver_dump *driver_dump)
1623{
1624 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1625 driver_dump->trace_entry.hdr.len =
1626 sizeof(struct ipr_dump_trace_entry) -
1627 sizeof(struct ipr_dump_entry_header);
1628 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1629 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1630 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1631 driver_dump->hdr.num_entries++;
1632}
1633
1634/**
1635 * ipr_dump_location_data - Fill in the IOA location in the dump.
1636 * @ioa_cfg: ioa config struct
1637 * @driver_dump: driver dump struct
1638 *
1639 * Return value:
1640 * nothing
1641 **/
1642static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1643 struct ipr_driver_dump *driver_dump)
1644{
1645 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1646 driver_dump->location_entry.hdr.len =
1647 sizeof(struct ipr_dump_location_entry) -
1648 sizeof(struct ipr_dump_entry_header);
1649 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1650 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1651 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1652 driver_dump->hdr.num_entries++;
1653}
1654
1655/**
1656 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1657 * @ioa_cfg: ioa config struct
1658 * @dump: dump struct
1659 *
1660 * Return value:
1661 * nothing
1662 **/
1663static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1664{
1665 unsigned long start_addr, sdt_word;
1666 unsigned long lock_flags = 0;
1667 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1668 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1669 u32 num_entries, start_off, end_off;
1670 u32 bytes_to_copy, bytes_copied, rc;
1671 struct ipr_sdt *sdt;
1672 int i;
1673
1674 ENTER;
1675
1676 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1677
1678 if (ioa_cfg->sdt_state != GET_DUMP) {
1679 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1680 return;
1681 }
1682
1683 start_addr = readl(ioa_cfg->ioa_mailbox);
1684
1685 if (!ipr_sdt_is_fmt2(start_addr)) {
1686 dev_err(&ioa_cfg->pdev->dev,
1687 "Invalid dump table format: %lx\n", start_addr);
1688 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1689 return;
1690 }
1691
1692 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1693
1694 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1695
1696 /* Initialize the overall dump header */
1697 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1698 driver_dump->hdr.num_entries = 1;
1699 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1700 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1701 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1702 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1703
1704 ipr_dump_version_data(ioa_cfg, driver_dump);
1705 ipr_dump_location_data(ioa_cfg, driver_dump);
1706 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1707 ipr_dump_trace_data(ioa_cfg, driver_dump);
1708
1709 /* Update dump_header */
1710 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1711
1712 /* IOA Dump entry */
1713 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1714 ioa_dump->format = IPR_SDT_FMT2;
1715 ioa_dump->hdr.len = 0;
1716 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1717 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1718
1719 /* First entries in sdt are actually a list of dump addresses and
1720 lengths to gather the real dump data. sdt represents the pointer
1721 to the ioa generated dump table. Dump data will be extracted based
1722 on entries in this table */
1723 sdt = &ioa_dump->sdt;
1724
1725 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1726 sizeof(struct ipr_sdt) / sizeof(__be32));
1727
1728 /* Smart Dump table is ready to use and the first entry is valid */
1729 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1730 dev_err(&ioa_cfg->pdev->dev,
1731 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1732 rc, be32_to_cpu(sdt->hdr.state));
1733 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1734 ioa_cfg->sdt_state = DUMP_OBTAINED;
1735 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1736 return;
1737 }
1738
1739 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1740
1741 if (num_entries > IPR_NUM_SDT_ENTRIES)
1742 num_entries = IPR_NUM_SDT_ENTRIES;
1743
1744 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1745
1746 for (i = 0; i < num_entries; i++) {
1747 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1748 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1749 break;
1750 }
1751
1752 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1753 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1754 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1755 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1756
1757 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1758 bytes_to_copy = end_off - start_off;
1759 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1760 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1761 continue;
1762 }
1763
1764 /* Copy data from adapter to driver buffers */
1765 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1766 bytes_to_copy);
1767
1768 ioa_dump->hdr.len += bytes_copied;
1769
1770 if (bytes_copied != bytes_to_copy) {
1771 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1772 break;
1773 }
1774 }
1775 }
1776 }
1777
1778 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1779
1780 /* Update dump_header */
1781 driver_dump->hdr.len += ioa_dump->hdr.len;
1782 wmb();
1783 ioa_cfg->sdt_state = DUMP_OBTAINED;
1784 LEAVE;
1785}
1786
1787#else
1788#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1789#endif
1790
1791/**
1792 * ipr_release_dump - Free adapter dump memory
1793 * @kref: kref struct
1794 *
1795 * Return value:
1796 * nothing
1797 **/
1798static void ipr_release_dump(struct kref *kref)
1799{
1800 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
1801 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
1802 unsigned long lock_flags = 0;
1803 int i;
1804
1805 ENTER;
1806 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1807 ioa_cfg->dump = NULL;
1808 ioa_cfg->sdt_state = INACTIVE;
1809 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1810
1811 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
1812 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
1813
1814 kfree(dump);
1815 LEAVE;
1816}
1817
1818/**
1819 * ipr_worker_thread - Worker thread
1820 * @data: ioa config struct
1821 *
1822 * Called at task level from a work thread. This function takes care
1823 * of adding and removing device from the mid-layer as configuration
1824 * changes are detected by the adapter.
1825 *
1826 * Return value:
1827 * nothing
1828 **/
1829static void ipr_worker_thread(void *data)
1830{
1831 unsigned long lock_flags;
1832 struct ipr_resource_entry *res;
1833 struct scsi_device *sdev;
1834 struct ipr_dump *dump;
1835 struct ipr_ioa_cfg *ioa_cfg = data;
1836 u8 bus, target, lun;
1837 int did_work;
1838
1839 ENTER;
1840 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1841
1842 if (ioa_cfg->sdt_state == GET_DUMP) {
1843 dump = ioa_cfg->dump;
1844 if (!dump) {
1845 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1846 return;
1847 }
1848 kref_get(&dump->kref);
1849 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1850 ipr_get_ioa_dump(ioa_cfg, dump);
1851 kref_put(&dump->kref, ipr_release_dump);
1852
1853 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1854 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1855 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1856 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1857 return;
1858 }
1859
1860restart:
1861 do {
1862 did_work = 0;
1863 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1864 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1865 return;
1866 }
1867
1868 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1869 if (res->del_from_ml && res->sdev) {
1870 did_work = 1;
1871 sdev = res->sdev;
1872 if (!scsi_device_get(sdev)) {
1873 res->sdev = NULL;
1874 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1875 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1876 scsi_remove_device(sdev);
1877 scsi_device_put(sdev);
1878 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1879 }
1880 break;
1881 }
1882 }
1883 } while(did_work);
1884
1885 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1886 if (res->add_to_ml) {
1887 bus = res->cfgte.res_addr.bus;
1888 target = res->cfgte.res_addr.target;
1889 lun = res->cfgte.res_addr.lun;
1890 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1891 scsi_add_device(ioa_cfg->host, bus, target, lun);
1892 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1893 goto restart;
1894 }
1895 }
1896
1897 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1898 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
1899 LEAVE;
1900}
1901
1902#ifdef CONFIG_SCSI_IPR_TRACE
1903/**
1904 * ipr_read_trace - Dump the adapter trace
1905 * @kobj: kobject struct
1906 * @buf: buffer
1907 * @off: offset
1908 * @count: buffer size
1909 *
1910 * Return value:
1911 * number of bytes printed to buffer
1912 **/
1913static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1914 loff_t off, size_t count)
1915{
1916 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1917 struct Scsi_Host *shost = class_to_shost(cdev);
1918 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1919 unsigned long lock_flags = 0;
1920 int size = IPR_TRACE_SIZE;
1921 char *src = (char *)ioa_cfg->trace;
1922
1923 if (off > size)
1924 return 0;
1925 if (off + count > size) {
1926 size -= off;
1927 count = size;
1928 }
1929
1930 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1931 memcpy(buf, &src[off], count);
1932 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1933 return count;
1934}
1935
1936static struct bin_attribute ipr_trace_attr = {
1937 .attr = {
1938 .name = "trace",
1939 .mode = S_IRUGO,
1940 },
1941 .size = 0,
1942 .read = ipr_read_trace,
1943};
1944#endif
1945
brking@us.ibm.com62275042005-11-01 17:01:14 -06001946static const struct {
1947 enum ipr_cache_state state;
1948 char *name;
1949} cache_state [] = {
1950 { CACHE_NONE, "none" },
1951 { CACHE_DISABLED, "disabled" },
1952 { CACHE_ENABLED, "enabled" }
1953};
1954
1955/**
1956 * ipr_show_write_caching - Show the write caching attribute
1957 * @class_dev: class device struct
1958 * @buf: buffer
1959 *
1960 * Return value:
1961 * number of bytes printed to buffer
1962 **/
1963static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
1964{
1965 struct Scsi_Host *shost = class_to_shost(class_dev);
1966 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1967 unsigned long lock_flags = 0;
1968 int i, len = 0;
1969
1970 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1971 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
1972 if (cache_state[i].state == ioa_cfg->cache_state) {
1973 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
1974 break;
1975 }
1976 }
1977 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1978 return len;
1979}
1980
1981
1982/**
1983 * ipr_store_write_caching - Enable/disable adapter write cache
1984 * @class_dev: class_device struct
1985 * @buf: buffer
1986 * @count: buffer size
1987 *
1988 * This function will enable/disable adapter write cache.
1989 *
1990 * Return value:
1991 * count on success / other on failure
1992 **/
1993static ssize_t ipr_store_write_caching(struct class_device *class_dev,
1994 const char *buf, size_t count)
1995{
1996 struct Scsi_Host *shost = class_to_shost(class_dev);
1997 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1998 unsigned long lock_flags = 0;
1999 enum ipr_cache_state new_state = CACHE_INVALID;
2000 int i;
2001
2002 if (!capable(CAP_SYS_ADMIN))
2003 return -EACCES;
2004 if (ioa_cfg->cache_state == CACHE_NONE)
2005 return -EINVAL;
2006
2007 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2008 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2009 new_state = cache_state[i].state;
2010 break;
2011 }
2012 }
2013
2014 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2015 return -EINVAL;
2016
2017 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2018 if (ioa_cfg->cache_state == new_state) {
2019 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2020 return count;
2021 }
2022
2023 ioa_cfg->cache_state = new_state;
2024 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2025 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2026 if (!ioa_cfg->in_reset_reload)
2027 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2028 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2029 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2030
2031 return count;
2032}
2033
2034static struct class_device_attribute ipr_ioa_cache_attr = {
2035 .attr = {
2036 .name = "write_cache",
2037 .mode = S_IRUGO | S_IWUSR,
2038 },
2039 .show = ipr_show_write_caching,
2040 .store = ipr_store_write_caching
2041};
2042
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043/**
2044 * ipr_show_fw_version - Show the firmware version
2045 * @class_dev: class device struct
2046 * @buf: buffer
2047 *
2048 * Return value:
2049 * number of bytes printed to buffer
2050 **/
2051static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2052{
2053 struct Scsi_Host *shost = class_to_shost(class_dev);
2054 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2055 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2056 unsigned long lock_flags = 0;
2057 int len;
2058
2059 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2060 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2061 ucode_vpd->major_release, ucode_vpd->card_type,
2062 ucode_vpd->minor_release[0],
2063 ucode_vpd->minor_release[1]);
2064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2065 return len;
2066}
2067
2068static struct class_device_attribute ipr_fw_version_attr = {
2069 .attr = {
2070 .name = "fw_version",
2071 .mode = S_IRUGO,
2072 },
2073 .show = ipr_show_fw_version,
2074};
2075
2076/**
2077 * ipr_show_log_level - Show the adapter's error logging level
2078 * @class_dev: class device struct
2079 * @buf: buffer
2080 *
2081 * Return value:
2082 * number of bytes printed to buffer
2083 **/
2084static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2085{
2086 struct Scsi_Host *shost = class_to_shost(class_dev);
2087 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2088 unsigned long lock_flags = 0;
2089 int len;
2090
2091 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2092 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2093 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2094 return len;
2095}
2096
2097/**
2098 * ipr_store_log_level - Change the adapter's error logging level
2099 * @class_dev: class device struct
2100 * @buf: buffer
2101 *
2102 * Return value:
2103 * number of bytes printed to buffer
2104 **/
2105static ssize_t ipr_store_log_level(struct class_device *class_dev,
2106 const char *buf, size_t count)
2107{
2108 struct Scsi_Host *shost = class_to_shost(class_dev);
2109 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2110 unsigned long lock_flags = 0;
2111
2112 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2113 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2114 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2115 return strlen(buf);
2116}
2117
2118static struct class_device_attribute ipr_log_level_attr = {
2119 .attr = {
2120 .name = "log_level",
2121 .mode = S_IRUGO | S_IWUSR,
2122 },
2123 .show = ipr_show_log_level,
2124 .store = ipr_store_log_level
2125};
2126
2127/**
2128 * ipr_store_diagnostics - IOA Diagnostics interface
2129 * @class_dev: class_device struct
2130 * @buf: buffer
2131 * @count: buffer size
2132 *
2133 * This function will reset the adapter and wait a reasonable
2134 * amount of time for any errors that the adapter might log.
2135 *
2136 * Return value:
2137 * count on success / other on failure
2138 **/
2139static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2140 const char *buf, size_t count)
2141{
2142 struct Scsi_Host *shost = class_to_shost(class_dev);
2143 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2144 unsigned long lock_flags = 0;
2145 int rc = count;
2146
2147 if (!capable(CAP_SYS_ADMIN))
2148 return -EACCES;
2149
2150 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2151 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2152 ioa_cfg->errors_logged = 0;
2153 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2154
2155 if (ioa_cfg->in_reset_reload) {
2156 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2157 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2158
2159 /* Wait for a second for any errors to be logged */
2160 msleep(1000);
2161 } else {
2162 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2163 return -EIO;
2164 }
2165
2166 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2167 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2168 rc = -EIO;
2169 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2170
2171 return rc;
2172}
2173
2174static struct class_device_attribute ipr_diagnostics_attr = {
2175 .attr = {
2176 .name = "run_diagnostics",
2177 .mode = S_IWUSR,
2178 },
2179 .store = ipr_store_diagnostics
2180};
2181
2182/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06002183 * ipr_show_adapter_state - Show the adapter's state
2184 * @class_dev: class device struct
2185 * @buf: buffer
2186 *
2187 * Return value:
2188 * number of bytes printed to buffer
2189 **/
2190static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2191{
2192 struct Scsi_Host *shost = class_to_shost(class_dev);
2193 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2194 unsigned long lock_flags = 0;
2195 int len;
2196
2197 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2198 if (ioa_cfg->ioa_is_dead)
2199 len = snprintf(buf, PAGE_SIZE, "offline\n");
2200 else
2201 len = snprintf(buf, PAGE_SIZE, "online\n");
2202 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2203 return len;
2204}
2205
2206/**
2207 * ipr_store_adapter_state - Change adapter state
2208 * @class_dev: class_device struct
2209 * @buf: buffer
2210 * @count: buffer size
2211 *
2212 * This function will change the adapter's state.
2213 *
2214 * Return value:
2215 * count on success / other on failure
2216 **/
2217static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2218 const char *buf, size_t count)
2219{
2220 struct Scsi_Host *shost = class_to_shost(class_dev);
2221 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2222 unsigned long lock_flags;
2223 int result = count;
2224
2225 if (!capable(CAP_SYS_ADMIN))
2226 return -EACCES;
2227
2228 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2229 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2230 ioa_cfg->ioa_is_dead = 0;
2231 ioa_cfg->reset_retries = 0;
2232 ioa_cfg->in_ioa_bringdown = 0;
2233 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2234 }
2235 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2236 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2237
2238 return result;
2239}
2240
2241static struct class_device_attribute ipr_ioa_state_attr = {
2242 .attr = {
2243 .name = "state",
2244 .mode = S_IRUGO | S_IWUSR,
2245 },
2246 .show = ipr_show_adapter_state,
2247 .store = ipr_store_adapter_state
2248};
2249
2250/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 * ipr_store_reset_adapter - Reset the adapter
2252 * @class_dev: class_device struct
2253 * @buf: buffer
2254 * @count: buffer size
2255 *
2256 * This function will reset the adapter.
2257 *
2258 * Return value:
2259 * count on success / other on failure
2260 **/
2261static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2262 const char *buf, size_t count)
2263{
2264 struct Scsi_Host *shost = class_to_shost(class_dev);
2265 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2266 unsigned long lock_flags;
2267 int result = count;
2268
2269 if (!capable(CAP_SYS_ADMIN))
2270 return -EACCES;
2271
2272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2273 if (!ioa_cfg->in_reset_reload)
2274 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2276 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2277
2278 return result;
2279}
2280
2281static struct class_device_attribute ipr_ioa_reset_attr = {
2282 .attr = {
2283 .name = "reset_host",
2284 .mode = S_IWUSR,
2285 },
2286 .store = ipr_store_reset_adapter
2287};
2288
2289/**
2290 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2291 * @buf_len: buffer length
2292 *
2293 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2294 * list to use for microcode download
2295 *
2296 * Return value:
2297 * pointer to sglist / NULL on failure
2298 **/
2299static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2300{
2301 int sg_size, order, bsize_elem, num_elem, i, j;
2302 struct ipr_sglist *sglist;
2303 struct scatterlist *scatterlist;
2304 struct page *page;
2305
2306 /* Get the minimum size per scatter/gather element */
2307 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2308
2309 /* Get the actual size per element */
2310 order = get_order(sg_size);
2311
2312 /* Determine the actual number of bytes per element */
2313 bsize_elem = PAGE_SIZE * (1 << order);
2314
2315 /* Determine the actual number of sg entries needed */
2316 if (buf_len % bsize_elem)
2317 num_elem = (buf_len / bsize_elem) + 1;
2318 else
2319 num_elem = buf_len / bsize_elem;
2320
2321 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06002322 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 (sizeof(struct scatterlist) * (num_elem - 1)),
2324 GFP_KERNEL);
2325
2326 if (sglist == NULL) {
2327 ipr_trace;
2328 return NULL;
2329 }
2330
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 scatterlist = sglist->scatterlist;
2332
2333 sglist->order = order;
2334 sglist->num_sg = num_elem;
2335
2336 /* Allocate a bunch of sg elements */
2337 for (i = 0; i < num_elem; i++) {
2338 page = alloc_pages(GFP_KERNEL, order);
2339 if (!page) {
2340 ipr_trace;
2341
2342 /* Free up what we already allocated */
2343 for (j = i - 1; j >= 0; j--)
2344 __free_pages(scatterlist[j].page, order);
2345 kfree(sglist);
2346 return NULL;
2347 }
2348
2349 scatterlist[i].page = page;
2350 }
2351
2352 return sglist;
2353}
2354
2355/**
2356 * ipr_free_ucode_buffer - Frees a microcode download buffer
2357 * @p_dnld: scatter/gather list pointer
2358 *
2359 * Free a DMA'able ucode download buffer previously allocated with
2360 * ipr_alloc_ucode_buffer
2361 *
2362 * Return value:
2363 * nothing
2364 **/
2365static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2366{
2367 int i;
2368
2369 for (i = 0; i < sglist->num_sg; i++)
2370 __free_pages(sglist->scatterlist[i].page, sglist->order);
2371
2372 kfree(sglist);
2373}
2374
2375/**
2376 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2377 * @sglist: scatter/gather list pointer
2378 * @buffer: buffer pointer
2379 * @len: buffer length
2380 *
2381 * Copy a microcode image from a user buffer into a buffer allocated by
2382 * ipr_alloc_ucode_buffer
2383 *
2384 * Return value:
2385 * 0 on success / other on failure
2386 **/
2387static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2388 u8 *buffer, u32 len)
2389{
2390 int bsize_elem, i, result = 0;
2391 struct scatterlist *scatterlist;
2392 void *kaddr;
2393
2394 /* Determine the actual number of bytes per element */
2395 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2396
2397 scatterlist = sglist->scatterlist;
2398
2399 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2400 kaddr = kmap(scatterlist[i].page);
2401 memcpy(kaddr, buffer, bsize_elem);
2402 kunmap(scatterlist[i].page);
2403
2404 scatterlist[i].length = bsize_elem;
2405
2406 if (result != 0) {
2407 ipr_trace;
2408 return result;
2409 }
2410 }
2411
2412 if (len % bsize_elem) {
2413 kaddr = kmap(scatterlist[i].page);
2414 memcpy(kaddr, buffer, len % bsize_elem);
2415 kunmap(scatterlist[i].page);
2416
2417 scatterlist[i].length = len % bsize_elem;
2418 }
2419
2420 sglist->buffer_len = len;
2421 return result;
2422}
2423
2424/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002425 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 * @ipr_cmd: ipr command struct
2427 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002429 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002432static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2433 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2436 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2437 struct scatterlist *scatterlist = sglist->scatterlist;
2438 int i;
2439
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002440 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002442 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 ioarcb->write_ioadl_len =
2444 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2445
2446 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2447 ioadl[i].flags_and_data_len =
2448 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2449 ioadl[i].address =
2450 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2451 }
2452
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002453 ioadl[i-1].flags_and_data_len |=
2454 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2455}
2456
2457/**
2458 * ipr_update_ioa_ucode - Update IOA's microcode
2459 * @ioa_cfg: ioa config struct
2460 * @sglist: scatter/gather list
2461 *
2462 * Initiate an adapter reset to update the IOA's microcode
2463 *
2464 * Return value:
2465 * 0 on success / -EIO on failure
2466 **/
2467static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2468 struct ipr_sglist *sglist)
2469{
2470 unsigned long lock_flags;
2471
2472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2473
2474 if (ioa_cfg->ucode_sglist) {
2475 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2476 dev_err(&ioa_cfg->pdev->dev,
2477 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 return -EIO;
2479 }
2480
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002481 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2482 sglist->num_sg, DMA_TO_DEVICE);
2483
2484 if (!sglist->num_dma_sg) {
2485 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2486 dev_err(&ioa_cfg->pdev->dev,
2487 "Failed to map microcode download buffer!\n");
2488 return -EIO;
2489 }
2490
2491 ioa_cfg->ucode_sglist = sglist;
2492 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2493 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2494 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2495
2496 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2497 ioa_cfg->ucode_sglist = NULL;
2498 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 return 0;
2500}
2501
2502/**
2503 * ipr_store_update_fw - Update the firmware on the adapter
2504 * @class_dev: class_device struct
2505 * @buf: buffer
2506 * @count: buffer size
2507 *
2508 * This function will update the firmware on the adapter.
2509 *
2510 * Return value:
2511 * count on success / other on failure
2512 **/
2513static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2514 const char *buf, size_t count)
2515{
2516 struct Scsi_Host *shost = class_to_shost(class_dev);
2517 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2518 struct ipr_ucode_image_header *image_hdr;
2519 const struct firmware *fw_entry;
2520 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 char fname[100];
2522 char *src;
2523 int len, result, dnld_size;
2524
2525 if (!capable(CAP_SYS_ADMIN))
2526 return -EACCES;
2527
2528 len = snprintf(fname, 99, "%s", buf);
2529 fname[len-1] = '\0';
2530
2531 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2532 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2533 return -EIO;
2534 }
2535
2536 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2537
2538 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2539 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2540 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2541 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2542 release_firmware(fw_entry);
2543 return -EINVAL;
2544 }
2545
2546 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2547 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2548 sglist = ipr_alloc_ucode_buffer(dnld_size);
2549
2550 if (!sglist) {
2551 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2552 release_firmware(fw_entry);
2553 return -ENOMEM;
2554 }
2555
2556 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2557
2558 if (result) {
2559 dev_err(&ioa_cfg->pdev->dev,
2560 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002561 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 }
2563
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002564 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002566 if (!result)
2567 result = count;
2568out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 ipr_free_ucode_buffer(sglist);
2570 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002571 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572}
2573
2574static struct class_device_attribute ipr_update_fw_attr = {
2575 .attr = {
2576 .name = "update_fw",
2577 .mode = S_IWUSR,
2578 },
2579 .store = ipr_store_update_fw
2580};
2581
2582static struct class_device_attribute *ipr_ioa_attrs[] = {
2583 &ipr_fw_version_attr,
2584 &ipr_log_level_attr,
2585 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06002586 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 &ipr_ioa_reset_attr,
2588 &ipr_update_fw_attr,
brking@us.ibm.com62275042005-11-01 17:01:14 -06002589 &ipr_ioa_cache_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 NULL,
2591};
2592
2593#ifdef CONFIG_SCSI_IPR_DUMP
2594/**
2595 * ipr_read_dump - Dump the adapter
2596 * @kobj: kobject struct
2597 * @buf: buffer
2598 * @off: offset
2599 * @count: buffer size
2600 *
2601 * Return value:
2602 * number of bytes printed to buffer
2603 **/
2604static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2605 loff_t off, size_t count)
2606{
2607 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2608 struct Scsi_Host *shost = class_to_shost(cdev);
2609 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2610 struct ipr_dump *dump;
2611 unsigned long lock_flags = 0;
2612 char *src;
2613 int len;
2614 size_t rc = count;
2615
2616 if (!capable(CAP_SYS_ADMIN))
2617 return -EACCES;
2618
2619 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2620 dump = ioa_cfg->dump;
2621
2622 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2624 return 0;
2625 }
2626 kref_get(&dump->kref);
2627 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2628
2629 if (off > dump->driver_dump.hdr.len) {
2630 kref_put(&dump->kref, ipr_release_dump);
2631 return 0;
2632 }
2633
2634 if (off + count > dump->driver_dump.hdr.len) {
2635 count = dump->driver_dump.hdr.len - off;
2636 rc = count;
2637 }
2638
2639 if (count && off < sizeof(dump->driver_dump)) {
2640 if (off + count > sizeof(dump->driver_dump))
2641 len = sizeof(dump->driver_dump) - off;
2642 else
2643 len = count;
2644 src = (u8 *)&dump->driver_dump + off;
2645 memcpy(buf, src, len);
2646 buf += len;
2647 off += len;
2648 count -= len;
2649 }
2650
2651 off -= sizeof(dump->driver_dump);
2652
2653 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2654 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2655 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2656 else
2657 len = count;
2658 src = (u8 *)&dump->ioa_dump + off;
2659 memcpy(buf, src, len);
2660 buf += len;
2661 off += len;
2662 count -= len;
2663 }
2664
2665 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2666
2667 while (count) {
2668 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2669 len = PAGE_ALIGN(off) - off;
2670 else
2671 len = count;
2672 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2673 src += off & ~PAGE_MASK;
2674 memcpy(buf, src, len);
2675 buf += len;
2676 off += len;
2677 count -= len;
2678 }
2679
2680 kref_put(&dump->kref, ipr_release_dump);
2681 return rc;
2682}
2683
2684/**
2685 * ipr_alloc_dump - Prepare for adapter dump
2686 * @ioa_cfg: ioa config struct
2687 *
2688 * Return value:
2689 * 0 on success / other on failure
2690 **/
2691static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2692{
2693 struct ipr_dump *dump;
2694 unsigned long lock_flags = 0;
2695
2696 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06002697 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698
2699 if (!dump) {
2700 ipr_err("Dump memory allocation failed\n");
2701 return -ENOMEM;
2702 }
2703
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 kref_init(&dump->kref);
2705 dump->ioa_cfg = ioa_cfg;
2706
2707 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2708
2709 if (INACTIVE != ioa_cfg->sdt_state) {
2710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2711 kfree(dump);
2712 return 0;
2713 }
2714
2715 ioa_cfg->dump = dump;
2716 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2717 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2718 ioa_cfg->dump_taken = 1;
2719 schedule_work(&ioa_cfg->work_q);
2720 }
2721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2722
2723 LEAVE;
2724 return 0;
2725}
2726
2727/**
2728 * ipr_free_dump - Free adapter dump memory
2729 * @ioa_cfg: ioa config struct
2730 *
2731 * Return value:
2732 * 0 on success / other on failure
2733 **/
2734static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2735{
2736 struct ipr_dump *dump;
2737 unsigned long lock_flags = 0;
2738
2739 ENTER;
2740
2741 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2742 dump = ioa_cfg->dump;
2743 if (!dump) {
2744 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2745 return 0;
2746 }
2747
2748 ioa_cfg->dump = NULL;
2749 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2750
2751 kref_put(&dump->kref, ipr_release_dump);
2752
2753 LEAVE;
2754 return 0;
2755}
2756
2757/**
2758 * ipr_write_dump - Setup dump state of adapter
2759 * @kobj: kobject struct
2760 * @buf: buffer
2761 * @off: offset
2762 * @count: buffer size
2763 *
2764 * Return value:
2765 * number of bytes printed to buffer
2766 **/
2767static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2768 loff_t off, size_t count)
2769{
2770 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2771 struct Scsi_Host *shost = class_to_shost(cdev);
2772 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2773 int rc;
2774
2775 if (!capable(CAP_SYS_ADMIN))
2776 return -EACCES;
2777
2778 if (buf[0] == '1')
2779 rc = ipr_alloc_dump(ioa_cfg);
2780 else if (buf[0] == '0')
2781 rc = ipr_free_dump(ioa_cfg);
2782 else
2783 return -EINVAL;
2784
2785 if (rc)
2786 return rc;
2787 else
2788 return count;
2789}
2790
2791static struct bin_attribute ipr_dump_attr = {
2792 .attr = {
2793 .name = "dump",
2794 .mode = S_IRUSR | S_IWUSR,
2795 },
2796 .size = 0,
2797 .read = ipr_read_dump,
2798 .write = ipr_write_dump
2799};
2800#else
2801static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2802#endif
2803
2804/**
2805 * ipr_change_queue_depth - Change the device's queue depth
2806 * @sdev: scsi device struct
2807 * @qdepth: depth to set
2808 *
2809 * Return value:
2810 * actual depth set
2811 **/
2812static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
2813{
2814 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2815 return sdev->queue_depth;
2816}
2817
2818/**
2819 * ipr_change_queue_type - Change the device's queue type
2820 * @dsev: scsi device struct
2821 * @tag_type: type of tags to use
2822 *
2823 * Return value:
2824 * actual queue type set
2825 **/
2826static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
2827{
2828 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2829 struct ipr_resource_entry *res;
2830 unsigned long lock_flags = 0;
2831
2832 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2833 res = (struct ipr_resource_entry *)sdev->hostdata;
2834
2835 if (res) {
2836 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2837 /*
2838 * We don't bother quiescing the device here since the
2839 * adapter firmware does it for us.
2840 */
2841 scsi_set_tag_type(sdev, tag_type);
2842
2843 if (tag_type)
2844 scsi_activate_tcq(sdev, sdev->queue_depth);
2845 else
2846 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2847 } else
2848 tag_type = 0;
2849 } else
2850 tag_type = 0;
2851
2852 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2853 return tag_type;
2854}
2855
2856/**
2857 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2858 * @dev: device struct
2859 * @buf: buffer
2860 *
2861 * Return value:
2862 * number of bytes printed to buffer
2863 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04002864static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865{
2866 struct scsi_device *sdev = to_scsi_device(dev);
2867 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2868 struct ipr_resource_entry *res;
2869 unsigned long lock_flags = 0;
2870 ssize_t len = -ENXIO;
2871
2872 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2873 res = (struct ipr_resource_entry *)sdev->hostdata;
2874 if (res)
2875 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2876 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2877 return len;
2878}
2879
2880static struct device_attribute ipr_adapter_handle_attr = {
2881 .attr = {
2882 .name = "adapter_handle",
2883 .mode = S_IRUSR,
2884 },
2885 .show = ipr_show_adapter_handle
2886};
2887
2888static struct device_attribute *ipr_dev_attrs[] = {
2889 &ipr_adapter_handle_attr,
2890 NULL,
2891};
2892
2893/**
2894 * ipr_biosparam - Return the HSC mapping
2895 * @sdev: scsi device struct
2896 * @block_device: block device pointer
2897 * @capacity: capacity of the device
2898 * @parm: Array containing returned HSC values.
2899 *
2900 * This function generates the HSC parms that fdisk uses.
2901 * We want to make sure we return something that places partitions
2902 * on 4k boundaries for best performance with the IOA.
2903 *
2904 * Return value:
2905 * 0 on success
2906 **/
2907static int ipr_biosparam(struct scsi_device *sdev,
2908 struct block_device *block_device,
2909 sector_t capacity, int *parm)
2910{
2911 int heads, sectors;
2912 sector_t cylinders;
2913
2914 heads = 128;
2915 sectors = 32;
2916
2917 cylinders = capacity;
2918 sector_div(cylinders, (128 * 32));
2919
2920 /* return result */
2921 parm[0] = heads;
2922 parm[1] = sectors;
2923 parm[2] = cylinders;
2924
2925 return 0;
2926}
2927
2928/**
2929 * ipr_slave_destroy - Unconfigure a SCSI device
2930 * @sdev: scsi device struct
2931 *
2932 * Return value:
2933 * nothing
2934 **/
2935static void ipr_slave_destroy(struct scsi_device *sdev)
2936{
2937 struct ipr_resource_entry *res;
2938 struct ipr_ioa_cfg *ioa_cfg;
2939 unsigned long lock_flags = 0;
2940
2941 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2942
2943 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2944 res = (struct ipr_resource_entry *) sdev->hostdata;
2945 if (res) {
2946 sdev->hostdata = NULL;
2947 res->sdev = NULL;
2948 }
2949 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2950}
2951
2952/**
2953 * ipr_slave_configure - Configure a SCSI device
2954 * @sdev: scsi device struct
2955 *
2956 * This function configures the specified scsi device.
2957 *
2958 * Return value:
2959 * 0 on success
2960 **/
2961static int ipr_slave_configure(struct scsi_device *sdev)
2962{
2963 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2964 struct ipr_resource_entry *res;
2965 unsigned long lock_flags = 0;
2966
2967 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2968 res = sdev->hostdata;
2969 if (res) {
2970 if (ipr_is_af_dasd_device(res))
2971 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06002972 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06002974 sdev->no_uld_attach = 1;
2975 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976 if (ipr_is_vset_device(res)) {
2977 sdev->timeout = IPR_VSET_RW_TIMEOUT;
2978 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
2979 }
2980 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
2981 sdev->allow_restart = 1;
2982 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
2983 }
2984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2985 return 0;
2986}
2987
2988/**
2989 * ipr_slave_alloc - Prepare for commands to a device.
2990 * @sdev: scsi device struct
2991 *
2992 * This function saves a pointer to the resource entry
2993 * in the scsi device struct if the device exists. We
2994 * can then use this pointer in ipr_queuecommand when
2995 * handling new commands.
2996 *
2997 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06002998 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 **/
3000static int ipr_slave_alloc(struct scsi_device *sdev)
3001{
3002 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3003 struct ipr_resource_entry *res;
3004 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06003005 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006
3007 sdev->hostdata = NULL;
3008
3009 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3010
3011 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3012 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3013 (res->cfgte.res_addr.target == sdev->id) &&
3014 (res->cfgte.res_addr.lun == sdev->lun)) {
3015 res->sdev = sdev;
3016 res->add_to_ml = 0;
3017 res->in_erp = 0;
3018 sdev->hostdata = res;
3019 res->needs_sync_complete = 1;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06003020 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 break;
3022 }
3023 }
3024
3025 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3026
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06003027 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028}
3029
3030/**
3031 * ipr_eh_host_reset - Reset the host adapter
3032 * @scsi_cmd: scsi command struct
3033 *
3034 * Return value:
3035 * SUCCESS / FAILED
3036 **/
Jeff Garzik df0ae242005-05-28 07:57:14 -04003037static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038{
3039 struct ipr_ioa_cfg *ioa_cfg;
3040 int rc;
3041
3042 ENTER;
3043 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3044
3045 dev_err(&ioa_cfg->pdev->dev,
3046 "Adapter being reset as a result of error recovery.\n");
3047
3048 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3049 ioa_cfg->sdt_state = GET_DUMP;
3050
3051 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3052
3053 LEAVE;
3054 return rc;
3055}
3056
Jeff Garzik df0ae242005-05-28 07:57:14 -04003057static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3058{
3059 int rc;
3060
3061 spin_lock_irq(cmd->device->host->host_lock);
3062 rc = __ipr_eh_host_reset(cmd);
3063 spin_unlock_irq(cmd->device->host->host_lock);
3064
3065 return rc;
3066}
3067
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068/**
3069 * ipr_eh_dev_reset - Reset the device
3070 * @scsi_cmd: scsi command struct
3071 *
3072 * This function issues a device reset to the affected device.
3073 * A LUN reset will be sent to the device first. If that does
3074 * not work, a target reset will be sent.
3075 *
3076 * Return value:
3077 * SUCCESS / FAILED
3078 **/
Jeff Garzik 94d0e7b2005-05-28 07:55:48 -04003079static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080{
3081 struct ipr_cmnd *ipr_cmd;
3082 struct ipr_ioa_cfg *ioa_cfg;
3083 struct ipr_resource_entry *res;
3084 struct ipr_cmd_pkt *cmd_pkt;
3085 u32 ioasc;
3086
3087 ENTER;
3088 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3089 res = scsi_cmd->device->hostdata;
3090
3091 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3092 return FAILED;
3093
3094 /*
3095 * If we are currently going through reset/reload, return failed. This will force the
3096 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3097 * reset to complete
3098 */
3099 if (ioa_cfg->in_reset_reload)
3100 return FAILED;
3101 if (ioa_cfg->ioa_is_dead)
3102 return FAILED;
3103
3104 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3105 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3106 if (ipr_cmd->scsi_cmd)
3107 ipr_cmd->done = ipr_scsi_eh_done;
3108 }
3109 }
3110
3111 res->resetting_device = 1;
3112
3113 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3114
3115 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3116 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3117 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3118 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3119
3120 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3121 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3122
3123 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3124
3125 res->resetting_device = 0;
3126
3127 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3128
3129 LEAVE;
3130 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3131}
3132
Jeff Garzik 94d0e7b2005-05-28 07:55:48 -04003133static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3134{
3135 int rc;
3136
3137 spin_lock_irq(cmd->device->host->host_lock);
3138 rc = __ipr_eh_dev_reset(cmd);
3139 spin_unlock_irq(cmd->device->host->host_lock);
3140
3141 return rc;
3142}
3143
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144/**
3145 * ipr_bus_reset_done - Op done function for bus reset.
3146 * @ipr_cmd: ipr command struct
3147 *
3148 * This function is the op done function for a bus reset
3149 *
3150 * Return value:
3151 * none
3152 **/
3153static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3154{
3155 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3156 struct ipr_resource_entry *res;
3157
3158 ENTER;
3159 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3160 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3161 sizeof(res->cfgte.res_handle))) {
3162 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3163 break;
3164 }
3165 }
3166
3167 /*
3168 * If abort has not completed, indicate the reset has, else call the
3169 * abort's done function to wake the sleeping eh thread
3170 */
3171 if (ipr_cmd->sibling->sibling)
3172 ipr_cmd->sibling->sibling = NULL;
3173 else
3174 ipr_cmd->sibling->done(ipr_cmd->sibling);
3175
3176 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3177 LEAVE;
3178}
3179
3180/**
3181 * ipr_abort_timeout - An abort task has timed out
3182 * @ipr_cmd: ipr command struct
3183 *
3184 * This function handles when an abort task times out. If this
3185 * happens we issue a bus reset since we have resources tied
3186 * up that must be freed before returning to the midlayer.
3187 *
3188 * Return value:
3189 * none
3190 **/
3191static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3192{
3193 struct ipr_cmnd *reset_cmd;
3194 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3195 struct ipr_cmd_pkt *cmd_pkt;
3196 unsigned long lock_flags = 0;
3197
3198 ENTER;
3199 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3200 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3201 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3202 return;
3203 }
3204
3205 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3206 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3207 ipr_cmd->sibling = reset_cmd;
3208 reset_cmd->sibling = ipr_cmd;
3209 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3210 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3211 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3212 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3213 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3214
3215 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3216 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3217 LEAVE;
3218}
3219
3220/**
3221 * ipr_cancel_op - Cancel specified op
3222 * @scsi_cmd: scsi command struct
3223 *
3224 * This function cancels specified op.
3225 *
3226 * Return value:
3227 * SUCCESS / FAILED
3228 **/
3229static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3230{
3231 struct ipr_cmnd *ipr_cmd;
3232 struct ipr_ioa_cfg *ioa_cfg;
3233 struct ipr_resource_entry *res;
3234 struct ipr_cmd_pkt *cmd_pkt;
3235 u32 ioasc;
3236 int op_found = 0;
3237
3238 ENTER;
3239 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3240 res = scsi_cmd->device->hostdata;
3241
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003242 /* If we are currently going through reset/reload, return failed.
3243 * This will force the mid-layer to call ipr_eh_host_reset,
3244 * which will then go to sleep and wait for the reset to complete
3245 */
3246 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3247 return FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3249 return FAILED;
3250
3251 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3252 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3253 ipr_cmd->done = ipr_scsi_eh_done;
3254 op_found = 1;
3255 break;
3256 }
3257 }
3258
3259 if (!op_found)
3260 return SUCCESS;
3261
3262 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3263 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3264 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3265 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3266 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3267 ipr_cmd->u.sdev = scsi_cmd->device;
3268
3269 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3270 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3271 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3272
3273 /*
3274 * If the abort task timed out and we sent a bus reset, we will get
3275 * one the following responses to the abort
3276 */
3277 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3278 ioasc = 0;
3279 ipr_trace;
3280 }
3281
3282 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3283 res->needs_sync_complete = 1;
3284
3285 LEAVE;
3286 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3287}
3288
3289/**
3290 * ipr_eh_abort - Abort a single op
3291 * @scsi_cmd: scsi command struct
3292 *
3293 * Return value:
3294 * SUCCESS / FAILED
3295 **/
3296static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3297{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003298 unsigned long flags;
3299 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300
3301 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003303 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3304 rc = ipr_cancel_op(scsi_cmd);
3305 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306
3307 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003308 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309}
3310
3311/**
3312 * ipr_handle_other_interrupt - Handle "other" interrupts
3313 * @ioa_cfg: ioa config struct
3314 * @int_reg: interrupt register
3315 *
3316 * Return value:
3317 * IRQ_NONE / IRQ_HANDLED
3318 **/
3319static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3320 volatile u32 int_reg)
3321{
3322 irqreturn_t rc = IRQ_HANDLED;
3323
3324 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3325 /* Mask the interrupt */
3326 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3327
3328 /* Clear the interrupt */
3329 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3330 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3331
3332 list_del(&ioa_cfg->reset_cmd->queue);
3333 del_timer(&ioa_cfg->reset_cmd->timer);
3334 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3335 } else {
3336 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3337 ioa_cfg->ioa_unit_checked = 1;
3338 else
3339 dev_err(&ioa_cfg->pdev->dev,
3340 "Permanent IOA failure. 0x%08X\n", int_reg);
3341
3342 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3343 ioa_cfg->sdt_state = GET_DUMP;
3344
3345 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3346 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3347 }
3348
3349 return rc;
3350}
3351
3352/**
3353 * ipr_isr - Interrupt service routine
3354 * @irq: irq number
3355 * @devp: pointer to ioa config struct
3356 * @regs: pt_regs struct
3357 *
3358 * Return value:
3359 * IRQ_NONE / IRQ_HANDLED
3360 **/
3361static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3362{
3363 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3364 unsigned long lock_flags = 0;
3365 volatile u32 int_reg, int_mask_reg;
3366 u32 ioasc;
3367 u16 cmd_index;
3368 struct ipr_cmnd *ipr_cmd;
3369 irqreturn_t rc = IRQ_NONE;
3370
3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372
3373 /* If interrupts are disabled, ignore the interrupt */
3374 if (!ioa_cfg->allow_interrupts) {
3375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3376 return IRQ_NONE;
3377 }
3378
3379 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3380 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3381
3382 /* If an interrupt on the adapter did not occur, ignore it */
3383 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3385 return IRQ_NONE;
3386 }
3387
3388 while (1) {
3389 ipr_cmd = NULL;
3390
3391 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3392 ioa_cfg->toggle_bit) {
3393
3394 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3395 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3396
3397 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3398 ioa_cfg->errors_logged++;
3399 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3400
3401 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3402 ioa_cfg->sdt_state = GET_DUMP;
3403
3404 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3405 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3406 return IRQ_HANDLED;
3407 }
3408
3409 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3410
3411 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3412
3413 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3414
3415 list_del(&ipr_cmd->queue);
3416 del_timer(&ipr_cmd->timer);
3417 ipr_cmd->done(ipr_cmd);
3418
3419 rc = IRQ_HANDLED;
3420
3421 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3422 ioa_cfg->hrrq_curr++;
3423 } else {
3424 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3425 ioa_cfg->toggle_bit ^= 1u;
3426 }
3427 }
3428
3429 if (ipr_cmd != NULL) {
3430 /* Clear the PCI interrupt */
3431 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3432 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3433 } else
3434 break;
3435 }
3436
3437 if (unlikely(rc == IRQ_NONE))
3438 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3439
3440 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3441 return rc;
3442}
3443
3444/**
3445 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3446 * @ioa_cfg: ioa config struct
3447 * @ipr_cmd: ipr command struct
3448 *
3449 * Return value:
3450 * 0 on success / -1 on failure
3451 **/
3452static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3453 struct ipr_cmnd *ipr_cmd)
3454{
3455 int i;
3456 struct scatterlist *sglist;
3457 u32 length;
3458 u32 ioadl_flags = 0;
3459 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3460 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3461 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3462
3463 length = scsi_cmd->request_bufflen;
3464
3465 if (length == 0)
3466 return 0;
3467
3468 if (scsi_cmd->use_sg) {
3469 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3470 scsi_cmd->request_buffer,
3471 scsi_cmd->use_sg,
3472 scsi_cmd->sc_data_direction);
3473
3474 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3475 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3476 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3477 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3478 ioarcb->write_ioadl_len =
3479 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3480 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3481 ioadl_flags = IPR_IOADL_FLAGS_READ;
3482 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3483 ioarcb->read_ioadl_len =
3484 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3485 }
3486
3487 sglist = scsi_cmd->request_buffer;
3488
3489 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3490 ioadl[i].flags_and_data_len =
3491 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3492 ioadl[i].address =
3493 cpu_to_be32(sg_dma_address(&sglist[i]));
3494 }
3495
3496 if (likely(ipr_cmd->dma_use_sg)) {
3497 ioadl[i-1].flags_and_data_len |=
3498 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3499 return 0;
3500 } else
3501 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3502 } else {
3503 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3504 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3505 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3506 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3507 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3508 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3509 ioadl_flags = IPR_IOADL_FLAGS_READ;
3510 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3511 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3512 }
3513
3514 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3515 scsi_cmd->request_buffer, length,
3516 scsi_cmd->sc_data_direction);
3517
3518 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3519 ipr_cmd->dma_use_sg = 1;
3520 ioadl[0].flags_and_data_len =
3521 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3522 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3523 return 0;
3524 } else
3525 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3526 }
3527
3528 return -1;
3529}
3530
3531/**
3532 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3533 * @scsi_cmd: scsi command struct
3534 *
3535 * Return value:
3536 * task attributes
3537 **/
3538static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3539{
3540 u8 tag[2];
3541 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3542
3543 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3544 switch (tag[0]) {
3545 case MSG_SIMPLE_TAG:
3546 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3547 break;
3548 case MSG_HEAD_TAG:
3549 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3550 break;
3551 case MSG_ORDERED_TAG:
3552 rc = IPR_FLAGS_LO_ORDERED_TASK;
3553 break;
3554 };
3555 }
3556
3557 return rc;
3558}
3559
3560/**
3561 * ipr_erp_done - Process completion of ERP for a device
3562 * @ipr_cmd: ipr command struct
3563 *
3564 * This function copies the sense buffer into the scsi_cmd
3565 * struct and pushes the scsi_done function.
3566 *
3567 * Return value:
3568 * nothing
3569 **/
3570static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3571{
3572 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3573 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3574 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3575 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3576
3577 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3578 scsi_cmd->result |= (DID_ERROR << 16);
3579 ipr_sdev_err(scsi_cmd->device,
3580 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3581 } else {
3582 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3583 SCSI_SENSE_BUFFERSIZE);
3584 }
3585
3586 if (res) {
3587 res->needs_sync_complete = 1;
3588 res->in_erp = 0;
3589 }
3590 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3591 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3592 scsi_cmd->scsi_done(scsi_cmd);
3593}
3594
3595/**
3596 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3597 * @ipr_cmd: ipr command struct
3598 *
3599 * Return value:
3600 * none
3601 **/
3602static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3603{
3604 struct ipr_ioarcb *ioarcb;
3605 struct ipr_ioasa *ioasa;
3606
3607 ioarcb = &ipr_cmd->ioarcb;
3608 ioasa = &ipr_cmd->ioasa;
3609
3610 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3611 ioarcb->write_data_transfer_length = 0;
3612 ioarcb->read_data_transfer_length = 0;
3613 ioarcb->write_ioadl_len = 0;
3614 ioarcb->read_ioadl_len = 0;
3615 ioasa->ioasc = 0;
3616 ioasa->residual_data_len = 0;
3617}
3618
3619/**
3620 * ipr_erp_request_sense - Send request sense to a device
3621 * @ipr_cmd: ipr command struct
3622 *
3623 * This function sends a request sense to a device as a result
3624 * of a check condition.
3625 *
3626 * Return value:
3627 * nothing
3628 **/
3629static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3630{
3631 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3632 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3633
3634 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3635 ipr_erp_done(ipr_cmd);
3636 return;
3637 }
3638
3639 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3640
3641 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3642 cmd_pkt->cdb[0] = REQUEST_SENSE;
3643 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3644 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3645 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3646 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3647
3648 ipr_cmd->ioadl[0].flags_and_data_len =
3649 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3650 ipr_cmd->ioadl[0].address =
3651 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3652
3653 ipr_cmd->ioarcb.read_ioadl_len =
3654 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3655 ipr_cmd->ioarcb.read_data_transfer_length =
3656 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3657
3658 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3659 IPR_REQUEST_SENSE_TIMEOUT * 2);
3660}
3661
3662/**
3663 * ipr_erp_cancel_all - Send cancel all to a device
3664 * @ipr_cmd: ipr command struct
3665 *
3666 * This function sends a cancel all to a device to clear the
3667 * queue. If we are running TCQ on the device, QERR is set to 1,
3668 * which means all outstanding ops have been dropped on the floor.
3669 * Cancel all will return them to us.
3670 *
3671 * Return value:
3672 * nothing
3673 **/
3674static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3675{
3676 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3677 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3678 struct ipr_cmd_pkt *cmd_pkt;
3679
3680 res->in_erp = 1;
3681
3682 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3683
3684 if (!scsi_get_tag_type(scsi_cmd->device)) {
3685 ipr_erp_request_sense(ipr_cmd);
3686 return;
3687 }
3688
3689 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3690 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3691 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3692
3693 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3694 IPR_CANCEL_ALL_TIMEOUT);
3695}
3696
3697/**
3698 * ipr_dump_ioasa - Dump contents of IOASA
3699 * @ioa_cfg: ioa config struct
3700 * @ipr_cmd: ipr command struct
3701 *
3702 * This function is invoked by the interrupt handler when ops
3703 * fail. It will log the IOASA if appropriate. Only called
3704 * for GPDD ops.
3705 *
3706 * Return value:
3707 * none
3708 **/
3709static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3710 struct ipr_cmnd *ipr_cmd)
3711{
3712 int i;
3713 u16 data_len;
3714 u32 ioasc;
3715 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3716 __be32 *ioasa_data = (__be32 *)ioasa;
3717 int error_index;
3718
3719 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3720
3721 if (0 == ioasc)
3722 return;
3723
3724 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3725 return;
3726
3727 error_index = ipr_get_error(ioasc);
3728
3729 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3730 /* Don't log an error if the IOA already logged one */
3731 if (ioasa->ilid != 0)
3732 return;
3733
3734 if (ipr_error_table[error_index].log_ioasa == 0)
3735 return;
3736 }
3737
3738 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3739 ipr_error_table[error_index].error);
3740
3741 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3742 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3743 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3744 "Device End state: %s Phase: %s\n",
3745 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3746 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3747 }
3748
3749 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3750 data_len = sizeof(struct ipr_ioasa);
3751 else
3752 data_len = be16_to_cpu(ioasa->ret_stat_len);
3753
3754 ipr_err("IOASA Dump:\n");
3755
3756 for (i = 0; i < data_len / 4; i += 4) {
3757 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3758 be32_to_cpu(ioasa_data[i]),
3759 be32_to_cpu(ioasa_data[i+1]),
3760 be32_to_cpu(ioasa_data[i+2]),
3761 be32_to_cpu(ioasa_data[i+3]));
3762 }
3763}
3764
3765/**
3766 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3767 * @ioasa: IOASA
3768 * @sense_buf: sense data buffer
3769 *
3770 * Return value:
3771 * none
3772 **/
3773static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3774{
3775 u32 failing_lba;
3776 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3777 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3778 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3779 u32 ioasc = be32_to_cpu(ioasa->ioasc);
3780
3781 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3782
3783 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3784 return;
3785
3786 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3787
3788 if (ipr_is_vset_device(res) &&
3789 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3790 ioasa->u.vset.failing_lba_hi != 0) {
3791 sense_buf[0] = 0x72;
3792 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3793 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3794 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3795
3796 sense_buf[7] = 12;
3797 sense_buf[8] = 0;
3798 sense_buf[9] = 0x0A;
3799 sense_buf[10] = 0x80;
3800
3801 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3802
3803 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3804 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3805 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3806 sense_buf[15] = failing_lba & 0x000000ff;
3807
3808 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3809
3810 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3811 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3812 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3813 sense_buf[19] = failing_lba & 0x000000ff;
3814 } else {
3815 sense_buf[0] = 0x70;
3816 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3817 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3818 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3819
3820 /* Illegal request */
3821 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3822 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3823 sense_buf[7] = 10; /* additional length */
3824
3825 /* IOARCB was in error */
3826 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3827 sense_buf[15] = 0xC0;
3828 else /* Parameter data was invalid */
3829 sense_buf[15] = 0x80;
3830
3831 sense_buf[16] =
3832 ((IPR_FIELD_POINTER_MASK &
3833 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3834 sense_buf[17] =
3835 (IPR_FIELD_POINTER_MASK &
3836 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3837 } else {
3838 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3839 if (ipr_is_vset_device(res))
3840 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3841 else
3842 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3843
3844 sense_buf[0] |= 0x80; /* Or in the Valid bit */
3845 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3846 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3847 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3848 sense_buf[6] = failing_lba & 0x000000ff;
3849 }
3850
3851 sense_buf[7] = 6; /* additional length */
3852 }
3853 }
3854}
3855
3856/**
3857 * ipr_erp_start - Process an error response for a SCSI op
3858 * @ioa_cfg: ioa config struct
3859 * @ipr_cmd: ipr command struct
3860 *
3861 * This function determines whether or not to initiate ERP
3862 * on the affected device.
3863 *
3864 * Return value:
3865 * nothing
3866 **/
3867static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3868 struct ipr_cmnd *ipr_cmd)
3869{
3870 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3871 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3872 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3873
3874 if (!res) {
3875 ipr_scsi_eh_done(ipr_cmd);
3876 return;
3877 }
3878
3879 if (ipr_is_gscsi(res))
3880 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3881 else
3882 ipr_gen_sense(ipr_cmd);
3883
3884 switch (ioasc & IPR_IOASC_IOASC_MASK) {
3885 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3886 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3887 break;
3888 case IPR_IOASC_IR_RESOURCE_HANDLE:
3889 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3890 break;
3891 case IPR_IOASC_HW_SEL_TIMEOUT:
3892 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3893 res->needs_sync_complete = 1;
3894 break;
3895 case IPR_IOASC_SYNC_REQUIRED:
3896 if (!res->in_erp)
3897 res->needs_sync_complete = 1;
3898 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3899 break;
3900 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3901 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3902 break;
3903 case IPR_IOASC_BUS_WAS_RESET:
3904 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3905 /*
3906 * Report the bus reset and ask for a retry. The device
3907 * will give CC/UA the next command.
3908 */
3909 if (!res->resetting_device)
3910 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3911 scsi_cmd->result |= (DID_ERROR << 16);
3912 res->needs_sync_complete = 1;
3913 break;
3914 case IPR_IOASC_HW_DEV_BUS_STATUS:
3915 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3916 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3917 ipr_erp_cancel_all(ipr_cmd);
3918 return;
3919 }
3920 res->needs_sync_complete = 1;
3921 break;
3922 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3923 break;
3924 default:
3925 scsi_cmd->result |= (DID_ERROR << 16);
3926 if (!ipr_is_vset_device(res))
3927 res->needs_sync_complete = 1;
3928 break;
3929 }
3930
3931 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3932 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3933 scsi_cmd->scsi_done(scsi_cmd);
3934}
3935
3936/**
3937 * ipr_scsi_done - mid-layer done function
3938 * @ipr_cmd: ipr command struct
3939 *
3940 * This function is invoked by the interrupt handler for
3941 * ops generated by the SCSI mid-layer
3942 *
3943 * Return value:
3944 * none
3945 **/
3946static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3947{
3948 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3949 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3950 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3951
3952 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
3953
3954 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3955 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3956 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3957 scsi_cmd->scsi_done(scsi_cmd);
3958 } else
3959 ipr_erp_start(ioa_cfg, ipr_cmd);
3960}
3961
3962/**
3963 * ipr_save_ioafp_mode_select - Save adapters mode select data
3964 * @ioa_cfg: ioa config struct
3965 * @scsi_cmd: scsi command struct
3966 *
3967 * This function saves mode select data for the adapter to
3968 * use following an adapter reset.
3969 *
3970 * Return value:
3971 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
3972 **/
3973static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
3974 struct scsi_cmnd *scsi_cmd)
3975{
3976 if (!ioa_cfg->saved_mode_pages) {
3977 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
3978 GFP_ATOMIC);
3979 if (!ioa_cfg->saved_mode_pages) {
3980 dev_err(&ioa_cfg->pdev->dev,
3981 "IOA mode select buffer allocation failed\n");
3982 return SCSI_MLQUEUE_HOST_BUSY;
3983 }
3984 }
3985
3986 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
3987 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
3988 return 0;
3989}
3990
3991/**
3992 * ipr_queuecommand - Queue a mid-layer request
3993 * @scsi_cmd: scsi command struct
3994 * @done: done function
3995 *
3996 * This function queues a request generated by the mid-layer.
3997 *
3998 * Return value:
3999 * 0 on success
4000 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4001 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4002 **/
4003static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4004 void (*done) (struct scsi_cmnd *))
4005{
4006 struct ipr_ioa_cfg *ioa_cfg;
4007 struct ipr_resource_entry *res;
4008 struct ipr_ioarcb *ioarcb;
4009 struct ipr_cmnd *ipr_cmd;
4010 int rc = 0;
4011
4012 scsi_cmd->scsi_done = done;
4013 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4014 res = scsi_cmd->device->hostdata;
4015 scsi_cmd->result = (DID_OK << 16);
4016
4017 /*
4018 * We are currently blocking all devices due to a host reset
4019 * We have told the host to stop giving us new requests, but
4020 * ERP ops don't count. FIXME
4021 */
4022 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4023 return SCSI_MLQUEUE_HOST_BUSY;
4024
4025 /*
4026 * FIXME - Create scsi_set_host_offline interface
4027 * and the ioa_is_dead check can be removed
4028 */
4029 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4030 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4031 scsi_cmd->result = (DID_NO_CONNECT << 16);
4032 scsi_cmd->scsi_done(scsi_cmd);
4033 return 0;
4034 }
4035
4036 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4037 ioarcb = &ipr_cmd->ioarcb;
4038 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4039
4040 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4041 ipr_cmd->scsi_cmd = scsi_cmd;
4042 ioarcb->res_handle = res->cfgte.res_handle;
4043 ipr_cmd->done = ipr_scsi_done;
4044 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4045
4046 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4047 if (scsi_cmd->underflow == 0)
4048 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4049
4050 if (res->needs_sync_complete) {
4051 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4052 res->needs_sync_complete = 0;
4053 }
4054
4055 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4056 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4057 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4058 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4059 }
4060
4061 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4062 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4063 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4064
4065 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
4066 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
4067
4068 if (likely(rc == 0))
4069 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4070
4071 if (likely(rc == 0)) {
4072 mb();
4073 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4074 ioa_cfg->regs.ioarrin_reg);
4075 } else {
4076 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4077 return SCSI_MLQUEUE_HOST_BUSY;
4078 }
4079
4080 return 0;
4081}
4082
4083/**
4084 * ipr_info - Get information about the card/driver
4085 * @scsi_host: scsi host struct
4086 *
4087 * Return value:
4088 * pointer to buffer with description string
4089 **/
4090static const char * ipr_ioa_info(struct Scsi_Host *host)
4091{
4092 static char buffer[512];
4093 struct ipr_ioa_cfg *ioa_cfg;
4094 unsigned long lock_flags = 0;
4095
4096 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4097
4098 spin_lock_irqsave(host->host_lock, lock_flags);
4099 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4100 spin_unlock_irqrestore(host->host_lock, lock_flags);
4101
4102 return buffer;
4103}
4104
4105static struct scsi_host_template driver_template = {
4106 .module = THIS_MODULE,
4107 .name = "IPR",
4108 .info = ipr_ioa_info,
4109 .queuecommand = ipr_queuecommand,
4110 .eh_abort_handler = ipr_eh_abort,
4111 .eh_device_reset_handler = ipr_eh_dev_reset,
4112 .eh_host_reset_handler = ipr_eh_host_reset,
4113 .slave_alloc = ipr_slave_alloc,
4114 .slave_configure = ipr_slave_configure,
4115 .slave_destroy = ipr_slave_destroy,
4116 .change_queue_depth = ipr_change_queue_depth,
4117 .change_queue_type = ipr_change_queue_type,
4118 .bios_param = ipr_biosparam,
4119 .can_queue = IPR_MAX_COMMANDS,
4120 .this_id = -1,
4121 .sg_tablesize = IPR_MAX_SGLIST,
4122 .max_sectors = IPR_IOA_MAX_SECTORS,
4123 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4124 .use_clustering = ENABLE_CLUSTERING,
4125 .shost_attrs = ipr_ioa_attrs,
4126 .sdev_attrs = ipr_dev_attrs,
4127 .proc_name = IPR_NAME
4128};
4129
4130#ifdef CONFIG_PPC_PSERIES
4131static const u16 ipr_blocked_processors[] = {
4132 PV_NORTHSTAR,
4133 PV_PULSAR,
4134 PV_POWER4,
4135 PV_ICESTAR,
4136 PV_SSTAR,
4137 PV_POWER4p,
4138 PV_630,
4139 PV_630p
4140};
4141
4142/**
4143 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4144 * @ioa_cfg: ioa cfg struct
4145 *
4146 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4147 * certain pSeries hardware. This function determines if the given
4148 * adapter is in one of these confgurations or not.
4149 *
4150 * Return value:
4151 * 1 if adapter is not supported / 0 if adapter is supported
4152 **/
4153static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4154{
4155 u8 rev_id;
4156 int i;
4157
4158 if (ioa_cfg->type == 0x5702) {
4159 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4160 &rev_id) == PCIBIOS_SUCCESSFUL) {
4161 if (rev_id < 4) {
4162 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4163 if (__is_processor(ipr_blocked_processors[i]))
4164 return 1;
4165 }
4166 }
4167 }
4168 }
4169 return 0;
4170}
4171#else
4172#define ipr_invalid_adapter(ioa_cfg) 0
4173#endif
4174
4175/**
4176 * ipr_ioa_bringdown_done - IOA bring down completion.
4177 * @ipr_cmd: ipr command struct
4178 *
4179 * This function processes the completion of an adapter bring down.
4180 * It wakes any reset sleepers.
4181 *
4182 * Return value:
4183 * IPR_RC_JOB_RETURN
4184 **/
4185static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4186{
4187 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4188
4189 ENTER;
4190 ioa_cfg->in_reset_reload = 0;
4191 ioa_cfg->reset_retries = 0;
4192 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4193 wake_up_all(&ioa_cfg->reset_wait_q);
4194
4195 spin_unlock_irq(ioa_cfg->host->host_lock);
4196 scsi_unblock_requests(ioa_cfg->host);
4197 spin_lock_irq(ioa_cfg->host->host_lock);
4198 LEAVE;
4199
4200 return IPR_RC_JOB_RETURN;
4201}
4202
4203/**
4204 * ipr_ioa_reset_done - IOA reset completion.
4205 * @ipr_cmd: ipr command struct
4206 *
4207 * This function processes the completion of an adapter reset.
4208 * It schedules any necessary mid-layer add/removes and
4209 * wakes any reset sleepers.
4210 *
4211 * Return value:
4212 * IPR_RC_JOB_RETURN
4213 **/
4214static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4215{
4216 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4217 struct ipr_resource_entry *res;
4218 struct ipr_hostrcb *hostrcb, *temp;
4219 int i = 0;
4220
4221 ENTER;
4222 ioa_cfg->in_reset_reload = 0;
4223 ioa_cfg->allow_cmds = 1;
4224 ioa_cfg->reset_cmd = NULL;
4225
4226 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4227 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4228 ipr_trace;
4229 break;
4230 }
4231 }
4232 schedule_work(&ioa_cfg->work_q);
4233
4234 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4235 list_del(&hostrcb->queue);
4236 if (i++ < IPR_NUM_LOG_HCAMS)
4237 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4238 else
4239 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4240 }
4241
4242 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4243
4244 ioa_cfg->reset_retries = 0;
4245 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4246 wake_up_all(&ioa_cfg->reset_wait_q);
4247
4248 spin_unlock_irq(ioa_cfg->host->host_lock);
4249 scsi_unblock_requests(ioa_cfg->host);
4250 spin_lock_irq(ioa_cfg->host->host_lock);
4251
4252 if (!ioa_cfg->allow_cmds)
4253 scsi_block_requests(ioa_cfg->host);
4254
4255 LEAVE;
4256 return IPR_RC_JOB_RETURN;
4257}
4258
4259/**
4260 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4261 * @supported_dev: supported device struct
4262 * @vpids: vendor product id struct
4263 *
4264 * Return value:
4265 * none
4266 **/
4267static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4268 struct ipr_std_inq_vpids *vpids)
4269{
4270 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4271 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4272 supported_dev->num_records = 1;
4273 supported_dev->data_length =
4274 cpu_to_be16(sizeof(struct ipr_supported_device));
4275 supported_dev->reserved = 0;
4276}
4277
4278/**
4279 * ipr_set_supported_devs - Send Set Supported Devices for a device
4280 * @ipr_cmd: ipr command struct
4281 *
4282 * This function send a Set Supported Devices to the adapter
4283 *
4284 * Return value:
4285 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4286 **/
4287static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4288{
4289 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4290 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4291 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4292 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4293 struct ipr_resource_entry *res = ipr_cmd->u.res;
4294
4295 ipr_cmd->job_step = ipr_ioa_reset_done;
4296
4297 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
brking@us.ibm.comd0ad6f52005-11-01 17:00:54 -06004298 if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299 continue;
4300
4301 ipr_cmd->u.res = res;
4302 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4303
4304 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4305 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4306 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4307
4308 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4309 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4310 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4311
4312 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4313 sizeof(struct ipr_supported_device));
4314 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4315 offsetof(struct ipr_misc_cbs, supp_dev));
4316 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4317 ioarcb->write_data_transfer_length =
4318 cpu_to_be32(sizeof(struct ipr_supported_device));
4319
4320 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4321 IPR_SET_SUP_DEVICE_TIMEOUT);
4322
4323 ipr_cmd->job_step = ipr_set_supported_devs;
4324 return IPR_RC_JOB_RETURN;
4325 }
4326
4327 return IPR_RC_JOB_CONTINUE;
4328}
4329
4330/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06004331 * ipr_setup_write_cache - Disable write cache if needed
4332 * @ipr_cmd: ipr command struct
4333 *
4334 * This function sets up adapters write cache to desired setting
4335 *
4336 * Return value:
4337 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4338 **/
4339static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4340{
4341 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4342
4343 ipr_cmd->job_step = ipr_set_supported_devs;
4344 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4345 struct ipr_resource_entry, queue);
4346
4347 if (ioa_cfg->cache_state != CACHE_DISABLED)
4348 return IPR_RC_JOB_CONTINUE;
4349
4350 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4351 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4352 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4353 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4354
4355 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4356
4357 return IPR_RC_JOB_RETURN;
4358}
4359
4360/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004361 * ipr_get_mode_page - Locate specified mode page
4362 * @mode_pages: mode page buffer
4363 * @page_code: page code to find
4364 * @len: minimum required length for mode page
4365 *
4366 * Return value:
4367 * pointer to mode page / NULL on failure
4368 **/
4369static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4370 u32 page_code, u32 len)
4371{
4372 struct ipr_mode_page_hdr *mode_hdr;
4373 u32 page_length;
4374 u32 length;
4375
4376 if (!mode_pages || (mode_pages->hdr.length == 0))
4377 return NULL;
4378
4379 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4380 mode_hdr = (struct ipr_mode_page_hdr *)
4381 (mode_pages->data + mode_pages->hdr.block_desc_len);
4382
4383 while (length) {
4384 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4385 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4386 return mode_hdr;
4387 break;
4388 } else {
4389 page_length = (sizeof(struct ipr_mode_page_hdr) +
4390 mode_hdr->page_length);
4391 length -= page_length;
4392 mode_hdr = (struct ipr_mode_page_hdr *)
4393 ((unsigned long)mode_hdr + page_length);
4394 }
4395 }
4396 return NULL;
4397}
4398
4399/**
4400 * ipr_check_term_power - Check for term power errors
4401 * @ioa_cfg: ioa config struct
4402 * @mode_pages: IOAFP mode pages buffer
4403 *
4404 * Check the IOAFP's mode page 28 for term power errors
4405 *
4406 * Return value:
4407 * nothing
4408 **/
4409static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4410 struct ipr_mode_pages *mode_pages)
4411{
4412 int i;
4413 int entry_length;
4414 struct ipr_dev_bus_entry *bus;
4415 struct ipr_mode_page28 *mode_page;
4416
4417 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4418 sizeof(struct ipr_mode_page28));
4419
4420 entry_length = mode_page->entry_length;
4421
4422 bus = mode_page->bus;
4423
4424 for (i = 0; i < mode_page->num_entries; i++) {
4425 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4426 dev_err(&ioa_cfg->pdev->dev,
4427 "Term power is absent on scsi bus %d\n",
4428 bus->res_addr.bus);
4429 }
4430
4431 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4432 }
4433}
4434
4435/**
4436 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4437 * @ioa_cfg: ioa config struct
4438 *
4439 * Looks through the config table checking for SES devices. If
4440 * the SES device is in the SES table indicating a maximum SCSI
4441 * bus speed, the speed is limited for the bus.
4442 *
4443 * Return value:
4444 * none
4445 **/
4446static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4447{
4448 u32 max_xfer_rate;
4449 int i;
4450
4451 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4452 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4453 ioa_cfg->bus_attr[i].bus_width);
4454
4455 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4456 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4457 }
4458}
4459
4460/**
4461 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4462 * @ioa_cfg: ioa config struct
4463 * @mode_pages: mode page 28 buffer
4464 *
4465 * Updates mode page 28 based on driver configuration
4466 *
4467 * Return value:
4468 * none
4469 **/
4470static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4471 struct ipr_mode_pages *mode_pages)
4472{
4473 int i, entry_length;
4474 struct ipr_dev_bus_entry *bus;
4475 struct ipr_bus_attributes *bus_attr;
4476 struct ipr_mode_page28 *mode_page;
4477
4478 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4479 sizeof(struct ipr_mode_page28));
4480
4481 entry_length = mode_page->entry_length;
4482
4483 /* Loop for each device bus entry */
4484 for (i = 0, bus = mode_page->bus;
4485 i < mode_page->num_entries;
4486 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4487 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4488 dev_err(&ioa_cfg->pdev->dev,
4489 "Invalid resource address reported: 0x%08X\n",
4490 IPR_GET_PHYS_LOC(bus->res_addr));
4491 continue;
4492 }
4493
4494 bus_attr = &ioa_cfg->bus_attr[i];
4495 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4496 bus->bus_width = bus_attr->bus_width;
4497 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4498 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4499 if (bus_attr->qas_enabled)
4500 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4501 else
4502 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4503 }
4504}
4505
4506/**
4507 * ipr_build_mode_select - Build a mode select command
4508 * @ipr_cmd: ipr command struct
4509 * @res_handle: resource handle to send command to
4510 * @parm: Byte 2 of Mode Sense command
4511 * @dma_addr: DMA buffer address
4512 * @xfer_len: data transfer length
4513 *
4514 * Return value:
4515 * none
4516 **/
4517static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4518 __be32 res_handle, u8 parm, u32 dma_addr,
4519 u8 xfer_len)
4520{
4521 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4522 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4523
4524 ioarcb->res_handle = res_handle;
4525 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4526 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4527 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4528 ioarcb->cmd_pkt.cdb[1] = parm;
4529 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4530
4531 ioadl->flags_and_data_len =
4532 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4533 ioadl->address = cpu_to_be32(dma_addr);
4534 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4535 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4536}
4537
4538/**
4539 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4540 * @ipr_cmd: ipr command struct
4541 *
4542 * This function sets up the SCSI bus attributes and sends
4543 * a Mode Select for Page 28 to activate them.
4544 *
4545 * Return value:
4546 * IPR_RC_JOB_RETURN
4547 **/
4548static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4549{
4550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4551 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4552 int length;
4553
4554 ENTER;
4555 if (ioa_cfg->saved_mode_pages) {
4556 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4557 ioa_cfg->saved_mode_page_len);
4558 length = ioa_cfg->saved_mode_page_len;
4559 } else {
4560 ipr_scsi_bus_speed_limit(ioa_cfg);
4561 ipr_check_term_power(ioa_cfg, mode_pages);
4562 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4563 length = mode_pages->hdr.length + 1;
4564 mode_pages->hdr.length = 0;
4565 }
4566
4567 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4568 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4569 length);
4570
brking@us.ibm.com62275042005-11-01 17:01:14 -06004571 ipr_cmd->job_step = ipr_setup_write_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004572 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4573
4574 LEAVE;
4575 return IPR_RC_JOB_RETURN;
4576}
4577
4578/**
4579 * ipr_build_mode_sense - Builds a mode sense command
4580 * @ipr_cmd: ipr command struct
4581 * @res: resource entry struct
4582 * @parm: Byte 2 of mode sense command
4583 * @dma_addr: DMA address of mode sense buffer
4584 * @xfer_len: Size of DMA buffer
4585 *
4586 * Return value:
4587 * none
4588 **/
4589static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4590 __be32 res_handle,
4591 u8 parm, u32 dma_addr, u8 xfer_len)
4592{
4593 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4594 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4595
4596 ioarcb->res_handle = res_handle;
4597 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4598 ioarcb->cmd_pkt.cdb[2] = parm;
4599 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4600 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4601
4602 ioadl->flags_and_data_len =
4603 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4604 ioadl->address = cpu_to_be32(dma_addr);
4605 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4606 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4607}
4608
4609/**
4610 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4611 * @ipr_cmd: ipr command struct
4612 *
4613 * This function send a Page 28 mode sense to the IOA to
4614 * retrieve SCSI bus attributes.
4615 *
4616 * Return value:
4617 * IPR_RC_JOB_RETURN
4618 **/
4619static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4620{
4621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4622
4623 ENTER;
4624 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4625 0x28, ioa_cfg->vpd_cbs_dma +
4626 offsetof(struct ipr_misc_cbs, mode_pages),
4627 sizeof(struct ipr_mode_pages));
4628
4629 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4630
4631 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4632
4633 LEAVE;
4634 return IPR_RC_JOB_RETURN;
4635}
4636
4637/**
4638 * ipr_init_res_table - Initialize the resource table
4639 * @ipr_cmd: ipr command struct
4640 *
4641 * This function looks through the existing resource table, comparing
4642 * it with the config table. This function will take care of old/new
4643 * devices and schedule adding/removing them from the mid-layer
4644 * as appropriate.
4645 *
4646 * Return value:
4647 * IPR_RC_JOB_CONTINUE
4648 **/
4649static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4650{
4651 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4652 struct ipr_resource_entry *res, *temp;
4653 struct ipr_config_table_entry *cfgte;
4654 int found, i;
4655 LIST_HEAD(old_res);
4656
4657 ENTER;
4658 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4659 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4660
4661 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4662 list_move_tail(&res->queue, &old_res);
4663
4664 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4665 cfgte = &ioa_cfg->cfg_table->dev[i];
4666 found = 0;
4667
4668 list_for_each_entry_safe(res, temp, &old_res, queue) {
4669 if (!memcmp(&res->cfgte.res_addr,
4670 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4671 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4672 found = 1;
4673 break;
4674 }
4675 }
4676
4677 if (!found) {
4678 if (list_empty(&ioa_cfg->free_res_q)) {
4679 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4680 break;
4681 }
4682
4683 found = 1;
4684 res = list_entry(ioa_cfg->free_res_q.next,
4685 struct ipr_resource_entry, queue);
4686 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4687 ipr_init_res_entry(res);
4688 res->add_to_ml = 1;
4689 }
4690
4691 if (found)
4692 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4693 }
4694
4695 list_for_each_entry_safe(res, temp, &old_res, queue) {
4696 if (res->sdev) {
4697 res->del_from_ml = 1;
4698 res->sdev->hostdata = NULL;
4699 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4700 } else {
4701 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4702 }
4703 }
4704
4705 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4706
4707 LEAVE;
4708 return IPR_RC_JOB_CONTINUE;
4709}
4710
4711/**
4712 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4713 * @ipr_cmd: ipr command struct
4714 *
4715 * This function sends a Query IOA Configuration command
4716 * to the adapter to retrieve the IOA configuration table.
4717 *
4718 * Return value:
4719 * IPR_RC_JOB_RETURN
4720 **/
4721static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4722{
4723 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4724 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4725 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4726 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4727
4728 ENTER;
4729 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4730 ucode_vpd->major_release, ucode_vpd->card_type,
4731 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4732 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4733 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4734
4735 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4736 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4737 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4738
4739 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4740 ioarcb->read_data_transfer_length =
4741 cpu_to_be32(sizeof(struct ipr_config_table));
4742
4743 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4744 ioadl->flags_and_data_len =
4745 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4746
4747 ipr_cmd->job_step = ipr_init_res_table;
4748
4749 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4750
4751 LEAVE;
4752 return IPR_RC_JOB_RETURN;
4753}
4754
4755/**
4756 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4757 * @ipr_cmd: ipr command struct
4758 *
4759 * This utility function sends an inquiry to the adapter.
4760 *
4761 * Return value:
4762 * none
4763 **/
4764static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4765 u32 dma_addr, u8 xfer_len)
4766{
4767 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4768 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4769
4770 ENTER;
4771 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4772 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4773
4774 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4775 ioarcb->cmd_pkt.cdb[1] = flags;
4776 ioarcb->cmd_pkt.cdb[2] = page;
4777 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4778
4779 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4780 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4781
4782 ioadl->address = cpu_to_be32(dma_addr);
4783 ioadl->flags_and_data_len =
4784 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4785
4786 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4787 LEAVE;
4788}
4789
4790/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06004791 * ipr_inquiry_page_supported - Is the given inquiry page supported
4792 * @page0: inquiry page 0 buffer
4793 * @page: page code.
4794 *
4795 * This function determines if the specified inquiry page is supported.
4796 *
4797 * Return value:
4798 * 1 if page is supported / 0 if not
4799 **/
4800static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
4801{
4802 int i;
4803
4804 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
4805 if (page0->page[i] == page)
4806 return 1;
4807
4808 return 0;
4809}
4810
4811/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004812 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4813 * @ipr_cmd: ipr command struct
4814 *
4815 * This function sends a Page 3 inquiry to the adapter
4816 * to retrieve software VPD information.
4817 *
4818 * Return value:
4819 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4820 **/
4821static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
4822{
4823 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06004824 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
4825
4826 ENTER;
4827
4828 if (!ipr_inquiry_page_supported(page0, 1))
4829 ioa_cfg->cache_state = CACHE_NONE;
4830
4831 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4832
4833 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4834 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4835 sizeof(struct ipr_inquiry_page3));
4836
4837 LEAVE;
4838 return IPR_RC_JOB_RETURN;
4839}
4840
4841/**
4842 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
4843 * @ipr_cmd: ipr command struct
4844 *
4845 * This function sends a Page 0 inquiry to the adapter
4846 * to retrieve supported inquiry pages.
4847 *
4848 * Return value:
4849 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4850 **/
4851static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
4852{
4853 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004854 char type[5];
4855
4856 ENTER;
4857
4858 /* Grab the type out of the VPD and store it away */
4859 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4860 type[4] = '\0';
4861 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4862
brking@us.ibm.com62275042005-11-01 17:01:14 -06004863 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004864
brking@us.ibm.com62275042005-11-01 17:01:14 -06004865 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
4866 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
4867 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004868
4869 LEAVE;
4870 return IPR_RC_JOB_RETURN;
4871}
4872
4873/**
4874 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4875 * @ipr_cmd: ipr command struct
4876 *
4877 * This function sends a standard inquiry to the adapter.
4878 *
4879 * Return value:
4880 * IPR_RC_JOB_RETURN
4881 **/
4882static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4883{
4884 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4885
4886 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06004887 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004888
4889 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4890 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4891 sizeof(struct ipr_ioa_vpd));
4892
4893 LEAVE;
4894 return IPR_RC_JOB_RETURN;
4895}
4896
4897/**
4898 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4899 * @ipr_cmd: ipr command struct
4900 *
4901 * This function send an Identify Host Request Response Queue
4902 * command to establish the HRRQ with the adapter.
4903 *
4904 * Return value:
4905 * IPR_RC_JOB_RETURN
4906 **/
4907static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4908{
4909 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4910 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4911
4912 ENTER;
4913 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4914
4915 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4916 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4917
4918 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4919 ioarcb->cmd_pkt.cdb[2] =
4920 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4921 ioarcb->cmd_pkt.cdb[3] =
4922 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4923 ioarcb->cmd_pkt.cdb[4] =
4924 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4925 ioarcb->cmd_pkt.cdb[5] =
4926 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
4927 ioarcb->cmd_pkt.cdb[7] =
4928 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4929 ioarcb->cmd_pkt.cdb[8] =
4930 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4931
4932 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4933
4934 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4935
4936 LEAVE;
4937 return IPR_RC_JOB_RETURN;
4938}
4939
4940/**
4941 * ipr_reset_timer_done - Adapter reset timer function
4942 * @ipr_cmd: ipr command struct
4943 *
4944 * Description: This function is used in adapter reset processing
4945 * for timing events. If the reset_cmd pointer in the IOA
4946 * config struct is not this adapter's we are doing nested
4947 * resets and fail_all_ops will take care of freeing the
4948 * command block.
4949 *
4950 * Return value:
4951 * none
4952 **/
4953static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
4954{
4955 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4956 unsigned long lock_flags = 0;
4957
4958 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4959
4960 if (ioa_cfg->reset_cmd == ipr_cmd) {
4961 list_del(&ipr_cmd->queue);
4962 ipr_cmd->done(ipr_cmd);
4963 }
4964
4965 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4966}
4967
4968/**
4969 * ipr_reset_start_timer - Start a timer for adapter reset job
4970 * @ipr_cmd: ipr command struct
4971 * @timeout: timeout value
4972 *
4973 * Description: This function is used in adapter reset processing
4974 * for timing events. If the reset_cmd pointer in the IOA
4975 * config struct is not this adapter's we are doing nested
4976 * resets and fail_all_ops will take care of freeing the
4977 * command block.
4978 *
4979 * Return value:
4980 * none
4981 **/
4982static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
4983 unsigned long timeout)
4984{
4985 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
4986 ipr_cmd->done = ipr_reset_ioa_job;
4987
4988 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4989 ipr_cmd->timer.expires = jiffies + timeout;
4990 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
4991 add_timer(&ipr_cmd->timer);
4992}
4993
4994/**
4995 * ipr_init_ioa_mem - Initialize ioa_cfg control block
4996 * @ioa_cfg: ioa cfg struct
4997 *
4998 * Return value:
4999 * nothing
5000 **/
5001static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5002{
5003 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5004
5005 /* Initialize Host RRQ pointers */
5006 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5007 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5008 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5009 ioa_cfg->toggle_bit = 1;
5010
5011 /* Zero out config table */
5012 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5013}
5014
5015/**
5016 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5017 * @ipr_cmd: ipr command struct
5018 *
5019 * This function reinitializes some control blocks and
5020 * enables destructive diagnostics on the adapter.
5021 *
5022 * Return value:
5023 * IPR_RC_JOB_RETURN
5024 **/
5025static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5026{
5027 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5028 volatile u32 int_reg;
5029
5030 ENTER;
5031 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5032 ipr_init_ioa_mem(ioa_cfg);
5033
5034 ioa_cfg->allow_interrupts = 1;
5035 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5036
5037 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5038 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5039 ioa_cfg->regs.clr_interrupt_mask_reg);
5040 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5041 return IPR_RC_JOB_CONTINUE;
5042 }
5043
5044 /* Enable destructive diagnostics on IOA */
5045 writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
5046
5047 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5048 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5049
5050 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5051
5052 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5053 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5054 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5055 ipr_cmd->done = ipr_reset_ioa_job;
5056 add_timer(&ipr_cmd->timer);
5057 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5058
5059 LEAVE;
5060 return IPR_RC_JOB_RETURN;
5061}
5062
5063/**
5064 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5065 * @ipr_cmd: ipr command struct
5066 *
5067 * This function is invoked when an adapter dump has run out
5068 * of processing time.
5069 *
5070 * Return value:
5071 * IPR_RC_JOB_CONTINUE
5072 **/
5073static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5074{
5075 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5076
5077 if (ioa_cfg->sdt_state == GET_DUMP)
5078 ioa_cfg->sdt_state = ABORT_DUMP;
5079
5080 ipr_cmd->job_step = ipr_reset_alert;
5081
5082 return IPR_RC_JOB_CONTINUE;
5083}
5084
5085/**
5086 * ipr_unit_check_no_data - Log a unit check/no data error log
5087 * @ioa_cfg: ioa config struct
5088 *
5089 * Logs an error indicating the adapter unit checked, but for some
5090 * reason, we were unable to fetch the unit check buffer.
5091 *
5092 * Return value:
5093 * nothing
5094 **/
5095static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5096{
5097 ioa_cfg->errors_logged++;
5098 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5099}
5100
5101/**
5102 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5103 * @ioa_cfg: ioa config struct
5104 *
5105 * Fetches the unit check buffer from the adapter by clocking the data
5106 * through the mailbox register.
5107 *
5108 * Return value:
5109 * nothing
5110 **/
5111static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5112{
5113 unsigned long mailbox;
5114 struct ipr_hostrcb *hostrcb;
5115 struct ipr_uc_sdt sdt;
5116 int rc, length;
5117
5118 mailbox = readl(ioa_cfg->ioa_mailbox);
5119
5120 if (!ipr_sdt_is_fmt2(mailbox)) {
5121 ipr_unit_check_no_data(ioa_cfg);
5122 return;
5123 }
5124
5125 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5126 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5127 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5128
5129 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5130 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5131 ipr_unit_check_no_data(ioa_cfg);
5132 return;
5133 }
5134
5135 /* Find length of the first sdt entry (UC buffer) */
5136 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5137 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5138
5139 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5140 struct ipr_hostrcb, queue);
5141 list_del(&hostrcb->queue);
5142 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5143
5144 rc = ipr_get_ldump_data_section(ioa_cfg,
5145 be32_to_cpu(sdt.entry[0].bar_str_offset),
5146 (__be32 *)&hostrcb->hcam,
5147 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5148
5149 if (!rc)
5150 ipr_handle_log_data(ioa_cfg, hostrcb);
5151 else
5152 ipr_unit_check_no_data(ioa_cfg);
5153
5154 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5155}
5156
5157/**
5158 * ipr_reset_restore_cfg_space - Restore PCI config space.
5159 * @ipr_cmd: ipr command struct
5160 *
5161 * Description: This function restores the saved PCI config space of
5162 * the adapter, fails all outstanding ops back to the callers, and
5163 * fetches the dump/unit check if applicable to this reset.
5164 *
5165 * Return value:
5166 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5167 **/
5168static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5169{
5170 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5171 int rc;
5172
5173 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07005174 pci_unblock_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005175 rc = pci_restore_state(ioa_cfg->pdev);
5176
5177 if (rc != PCIBIOS_SUCCESSFUL) {
5178 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5179 return IPR_RC_JOB_CONTINUE;
5180 }
5181
5182 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5183 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5184 return IPR_RC_JOB_CONTINUE;
5185 }
5186
5187 ipr_fail_all_ops(ioa_cfg);
5188
5189 if (ioa_cfg->ioa_unit_checked) {
5190 ioa_cfg->ioa_unit_checked = 0;
5191 ipr_get_unit_check_buffer(ioa_cfg);
5192 ipr_cmd->job_step = ipr_reset_alert;
5193 ipr_reset_start_timer(ipr_cmd, 0);
5194 return IPR_RC_JOB_RETURN;
5195 }
5196
5197 if (ioa_cfg->in_ioa_bringdown) {
5198 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5199 } else {
5200 ipr_cmd->job_step = ipr_reset_enable_ioa;
5201
5202 if (GET_DUMP == ioa_cfg->sdt_state) {
5203 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5204 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5205 schedule_work(&ioa_cfg->work_q);
5206 return IPR_RC_JOB_RETURN;
5207 }
5208 }
5209
5210 ENTER;
5211 return IPR_RC_JOB_CONTINUE;
5212}
5213
5214/**
5215 * ipr_reset_start_bist - Run BIST on the adapter.
5216 * @ipr_cmd: ipr command struct
5217 *
5218 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5219 *
5220 * Return value:
5221 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5222 **/
5223static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5224{
5225 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5226 int rc;
5227
5228 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07005229 pci_block_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5231
5232 if (rc != PCIBIOS_SUCCESSFUL) {
5233 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5234 rc = IPR_RC_JOB_CONTINUE;
5235 } else {
5236 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5237 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5238 rc = IPR_RC_JOB_RETURN;
5239 }
5240
5241 LEAVE;
5242 return rc;
5243}
5244
5245/**
5246 * ipr_reset_allowed - Query whether or not IOA can be reset
5247 * @ioa_cfg: ioa config struct
5248 *
5249 * Return value:
5250 * 0 if reset not allowed / non-zero if reset is allowed
5251 **/
5252static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5253{
5254 volatile u32 temp_reg;
5255
5256 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5257 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5258}
5259
5260/**
5261 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5262 * @ipr_cmd: ipr command struct
5263 *
5264 * Description: This function waits for adapter permission to run BIST,
5265 * then runs BIST. If the adapter does not give permission after a
5266 * reasonable time, we will reset the adapter anyway. The impact of
5267 * resetting the adapter without warning the adapter is the risk of
5268 * losing the persistent error log on the adapter. If the adapter is
5269 * reset while it is writing to the flash on the adapter, the flash
5270 * segment will have bad ECC and be zeroed.
5271 *
5272 * Return value:
5273 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5274 **/
5275static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5276{
5277 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5278 int rc = IPR_RC_JOB_RETURN;
5279
5280 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5281 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5282 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5283 } else {
5284 ipr_cmd->job_step = ipr_reset_start_bist;
5285 rc = IPR_RC_JOB_CONTINUE;
5286 }
5287
5288 return rc;
5289}
5290
5291/**
5292 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5293 * @ipr_cmd: ipr command struct
5294 *
5295 * Description: This function alerts the adapter that it will be reset.
5296 * If memory space is not currently enabled, proceed directly
5297 * to running BIST on the adapter. The timer must always be started
5298 * so we guarantee we do not run BIST from ipr_isr.
5299 *
5300 * Return value:
5301 * IPR_RC_JOB_RETURN
5302 **/
5303static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5304{
5305 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5306 u16 cmd_reg;
5307 int rc;
5308
5309 ENTER;
5310 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5311
5312 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5313 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5314 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5315 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5316 } else {
5317 ipr_cmd->job_step = ipr_reset_start_bist;
5318 }
5319
5320 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5321 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5322
5323 LEAVE;
5324 return IPR_RC_JOB_RETURN;
5325}
5326
5327/**
5328 * ipr_reset_ucode_download_done - Microcode download completion
5329 * @ipr_cmd: ipr command struct
5330 *
5331 * Description: This function unmaps the microcode download buffer.
5332 *
5333 * Return value:
5334 * IPR_RC_JOB_CONTINUE
5335 **/
5336static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5337{
5338 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5339 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5340
5341 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5342 sglist->num_sg, DMA_TO_DEVICE);
5343
5344 ipr_cmd->job_step = ipr_reset_alert;
5345 return IPR_RC_JOB_CONTINUE;
5346}
5347
5348/**
5349 * ipr_reset_ucode_download - Download microcode to the adapter
5350 * @ipr_cmd: ipr command struct
5351 *
5352 * Description: This function checks to see if it there is microcode
5353 * to download to the adapter. If there is, a download is performed.
5354 *
5355 * Return value:
5356 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5357 **/
5358static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5359{
5360 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5361 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5362
5363 ENTER;
5364 ipr_cmd->job_step = ipr_reset_alert;
5365
5366 if (!sglist)
5367 return IPR_RC_JOB_CONTINUE;
5368
5369 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5370 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5371 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5372 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5373 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5374 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5375 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5376
brking@us.ibm.com12baa422005-11-01 17:01:27 -06005377 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005378 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5379
5380 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5381 IPR_WRITE_BUFFER_TIMEOUT);
5382
5383 LEAVE;
5384 return IPR_RC_JOB_RETURN;
5385}
5386
5387/**
5388 * ipr_reset_shutdown_ioa - Shutdown the adapter
5389 * @ipr_cmd: ipr command struct
5390 *
5391 * Description: This function issues an adapter shutdown of the
5392 * specified type to the specified adapter as part of the
5393 * adapter reset job.
5394 *
5395 * Return value:
5396 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5397 **/
5398static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5399{
5400 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5401 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5402 unsigned long timeout;
5403 int rc = IPR_RC_JOB_CONTINUE;
5404
5405 ENTER;
5406 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5407 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5408 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5409 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5410 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5411
5412 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5413 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5414 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5415 timeout = IPR_INTERNAL_TIMEOUT;
5416 else
5417 timeout = IPR_SHUTDOWN_TIMEOUT;
5418
5419 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5420
5421 rc = IPR_RC_JOB_RETURN;
5422 ipr_cmd->job_step = ipr_reset_ucode_download;
5423 } else
5424 ipr_cmd->job_step = ipr_reset_alert;
5425
5426 LEAVE;
5427 return rc;
5428}
5429
5430/**
5431 * ipr_reset_ioa_job - Adapter reset job
5432 * @ipr_cmd: ipr command struct
5433 *
5434 * Description: This function is the job router for the adapter reset job.
5435 *
5436 * Return value:
5437 * none
5438 **/
5439static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5440{
5441 u32 rc, ioasc;
5442 unsigned long scratch = ipr_cmd->u.scratch;
5443 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5444
5445 do {
5446 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5447
5448 if (ioa_cfg->reset_cmd != ipr_cmd) {
5449 /*
5450 * We are doing nested adapter resets and this is
5451 * not the current reset job.
5452 */
5453 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5454 return;
5455 }
5456
5457 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5458 dev_err(&ioa_cfg->pdev->dev,
5459 "0x%02X failed with IOASC: 0x%08X\n",
5460 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5461
5462 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5463 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5464 return;
5465 }
5466
5467 ipr_reinit_ipr_cmnd(ipr_cmd);
5468 ipr_cmd->u.scratch = scratch;
5469 rc = ipr_cmd->job_step(ipr_cmd);
5470 } while(rc == IPR_RC_JOB_CONTINUE);
5471}
5472
5473/**
5474 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5475 * @ioa_cfg: ioa config struct
5476 * @job_step: first job step of reset job
5477 * @shutdown_type: shutdown type
5478 *
5479 * Description: This function will initiate the reset of the given adapter
5480 * starting at the selected job step.
5481 * If the caller needs to wait on the completion of the reset,
5482 * the caller must sleep on the reset_wait_q.
5483 *
5484 * Return value:
5485 * none
5486 **/
5487static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5488 int (*job_step) (struct ipr_cmnd *),
5489 enum ipr_shutdown_type shutdown_type)
5490{
5491 struct ipr_cmnd *ipr_cmd;
5492
5493 ioa_cfg->in_reset_reload = 1;
5494 ioa_cfg->allow_cmds = 0;
5495 scsi_block_requests(ioa_cfg->host);
5496
5497 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5498 ioa_cfg->reset_cmd = ipr_cmd;
5499 ipr_cmd->job_step = job_step;
5500 ipr_cmd->u.shutdown_type = shutdown_type;
5501
5502 ipr_reset_ioa_job(ipr_cmd);
5503}
5504
5505/**
5506 * ipr_initiate_ioa_reset - Initiate an adapter reset
5507 * @ioa_cfg: ioa config struct
5508 * @shutdown_type: shutdown type
5509 *
5510 * Description: This function will initiate the reset of the given adapter.
5511 * If the caller needs to wait on the completion of the reset,
5512 * the caller must sleep on the reset_wait_q.
5513 *
5514 * Return value:
5515 * none
5516 **/
5517static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5518 enum ipr_shutdown_type shutdown_type)
5519{
5520 if (ioa_cfg->ioa_is_dead)
5521 return;
5522
5523 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5524 ioa_cfg->sdt_state = ABORT_DUMP;
5525
5526 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5527 dev_err(&ioa_cfg->pdev->dev,
5528 "IOA taken offline - error recovery failed\n");
5529
5530 ioa_cfg->reset_retries = 0;
5531 ioa_cfg->ioa_is_dead = 1;
5532
5533 if (ioa_cfg->in_ioa_bringdown) {
5534 ioa_cfg->reset_cmd = NULL;
5535 ioa_cfg->in_reset_reload = 0;
5536 ipr_fail_all_ops(ioa_cfg);
5537 wake_up_all(&ioa_cfg->reset_wait_q);
5538
5539 spin_unlock_irq(ioa_cfg->host->host_lock);
5540 scsi_unblock_requests(ioa_cfg->host);
5541 spin_lock_irq(ioa_cfg->host->host_lock);
5542 return;
5543 } else {
5544 ioa_cfg->in_ioa_bringdown = 1;
5545 shutdown_type = IPR_SHUTDOWN_NONE;
5546 }
5547 }
5548
5549 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5550 shutdown_type);
5551}
5552
5553/**
5554 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5555 * @ioa_cfg: ioa cfg struct
5556 *
5557 * Description: This is the second phase of adapter intialization
5558 * This function takes care of initilizing the adapter to the point
5559 * where it can accept new commands.
5560
5561 * Return value:
5562 * 0 on sucess / -EIO on failure
5563 **/
5564static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5565{
5566 int rc = 0;
5567 unsigned long host_lock_flags = 0;
5568
5569 ENTER;
5570 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5571 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5572 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5573
5574 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5575 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5576 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5577
5578 if (ioa_cfg->ioa_is_dead) {
5579 rc = -EIO;
5580 } else if (ipr_invalid_adapter(ioa_cfg)) {
5581 if (!ipr_testmode)
5582 rc = -EIO;
5583
5584 dev_err(&ioa_cfg->pdev->dev,
5585 "Adapter not supported in this hardware configuration.\n");
5586 }
5587
5588 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5589
5590 LEAVE;
5591 return rc;
5592}
5593
5594/**
5595 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5596 * @ioa_cfg: ioa config struct
5597 *
5598 * Return value:
5599 * none
5600 **/
5601static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5602{
5603 int i;
5604
5605 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5606 if (ioa_cfg->ipr_cmnd_list[i])
5607 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5608 ioa_cfg->ipr_cmnd_list[i],
5609 ioa_cfg->ipr_cmnd_list_dma[i]);
5610
5611 ioa_cfg->ipr_cmnd_list[i] = NULL;
5612 }
5613
5614 if (ioa_cfg->ipr_cmd_pool)
5615 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5616
5617 ioa_cfg->ipr_cmd_pool = NULL;
5618}
5619
5620/**
5621 * ipr_free_mem - Frees memory allocated for an adapter
5622 * @ioa_cfg: ioa cfg struct
5623 *
5624 * Return value:
5625 * nothing
5626 **/
5627static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5628{
5629 int i;
5630
5631 kfree(ioa_cfg->res_entries);
5632 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5633 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5634 ipr_free_cmd_blks(ioa_cfg);
5635 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5636 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5637 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5638 ioa_cfg->cfg_table,
5639 ioa_cfg->cfg_table_dma);
5640
5641 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5642 pci_free_consistent(ioa_cfg->pdev,
5643 sizeof(struct ipr_hostrcb),
5644 ioa_cfg->hostrcb[i],
5645 ioa_cfg->hostrcb_dma[i]);
5646 }
5647
5648 ipr_free_dump(ioa_cfg);
5649 kfree(ioa_cfg->saved_mode_pages);
5650 kfree(ioa_cfg->trace);
5651}
5652
5653/**
5654 * ipr_free_all_resources - Free all allocated resources for an adapter.
5655 * @ipr_cmd: ipr command struct
5656 *
5657 * This function frees all allocated resources for the
5658 * specified adapter.
5659 *
5660 * Return value:
5661 * none
5662 **/
5663static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5664{
5665 struct pci_dev *pdev = ioa_cfg->pdev;
5666
5667 ENTER;
5668 free_irq(pdev->irq, ioa_cfg);
5669 iounmap(ioa_cfg->hdw_dma_regs);
5670 pci_release_regions(pdev);
5671 ipr_free_mem(ioa_cfg);
5672 scsi_host_put(ioa_cfg->host);
5673 pci_disable_device(pdev);
5674 LEAVE;
5675}
5676
5677/**
5678 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5679 * @ioa_cfg: ioa config struct
5680 *
5681 * Return value:
5682 * 0 on success / -ENOMEM on allocation failure
5683 **/
5684static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5685{
5686 struct ipr_cmnd *ipr_cmd;
5687 struct ipr_ioarcb *ioarcb;
5688 dma_addr_t dma_addr;
5689 int i;
5690
5691 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5692 sizeof(struct ipr_cmnd), 8, 0);
5693
5694 if (!ioa_cfg->ipr_cmd_pool)
5695 return -ENOMEM;
5696
5697 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5698 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5699
5700 if (!ipr_cmd) {
5701 ipr_free_cmd_blks(ioa_cfg);
5702 return -ENOMEM;
5703 }
5704
5705 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5706 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5707 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5708
5709 ioarcb = &ipr_cmd->ioarcb;
5710 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5711 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5712 ioarcb->write_ioadl_addr =
5713 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5714 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5715 ioarcb->ioasa_host_pci_addr =
5716 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5717 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5718 ipr_cmd->cmd_index = i;
5719 ipr_cmd->ioa_cfg = ioa_cfg;
5720 ipr_cmd->sense_buffer_dma = dma_addr +
5721 offsetof(struct ipr_cmnd, sense_buffer);
5722
5723 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5724 }
5725
5726 return 0;
5727}
5728
5729/**
5730 * ipr_alloc_mem - Allocate memory for an adapter
5731 * @ioa_cfg: ioa config struct
5732 *
5733 * Return value:
5734 * 0 on success / non-zero for error
5735 **/
5736static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5737{
5738 struct pci_dev *pdev = ioa_cfg->pdev;
5739 int i, rc = -ENOMEM;
5740
5741 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06005742 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005743 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5744
5745 if (!ioa_cfg->res_entries)
5746 goto out;
5747
Linus Torvalds1da177e2005-04-16 15:20:36 -07005748 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5749 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5750
5751 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5752 sizeof(struct ipr_misc_cbs),
5753 &ioa_cfg->vpd_cbs_dma);
5754
5755 if (!ioa_cfg->vpd_cbs)
5756 goto out_free_res_entries;
5757
5758 if (ipr_alloc_cmd_blks(ioa_cfg))
5759 goto out_free_vpd_cbs;
5760
5761 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5762 sizeof(u32) * IPR_NUM_CMD_BLKS,
5763 &ioa_cfg->host_rrq_dma);
5764
5765 if (!ioa_cfg->host_rrq)
5766 goto out_ipr_free_cmd_blocks;
5767
5768 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5769 sizeof(struct ipr_config_table),
5770 &ioa_cfg->cfg_table_dma);
5771
5772 if (!ioa_cfg->cfg_table)
5773 goto out_free_host_rrq;
5774
5775 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5776 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5777 sizeof(struct ipr_hostrcb),
5778 &ioa_cfg->hostrcb_dma[i]);
5779
5780 if (!ioa_cfg->hostrcb[i])
5781 goto out_free_hostrcb_dma;
5782
5783 ioa_cfg->hostrcb[i]->hostrcb_dma =
5784 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5785 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5786 }
5787
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06005788 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005789 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5790
5791 if (!ioa_cfg->trace)
5792 goto out_free_hostrcb_dma;
5793
Linus Torvalds1da177e2005-04-16 15:20:36 -07005794 rc = 0;
5795out:
5796 LEAVE;
5797 return rc;
5798
5799out_free_hostrcb_dma:
5800 while (i-- > 0) {
5801 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
5802 ioa_cfg->hostrcb[i],
5803 ioa_cfg->hostrcb_dma[i]);
5804 }
5805 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
5806 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
5807out_free_host_rrq:
5808 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5809 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5810out_ipr_free_cmd_blocks:
5811 ipr_free_cmd_blks(ioa_cfg);
5812out_free_vpd_cbs:
5813 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
5814 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5815out_free_res_entries:
5816 kfree(ioa_cfg->res_entries);
5817 goto out;
5818}
5819
5820/**
5821 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5822 * @ioa_cfg: ioa config struct
5823 *
5824 * Return value:
5825 * none
5826 **/
5827static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5828{
5829 int i;
5830
5831 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5832 ioa_cfg->bus_attr[i].bus = i;
5833 ioa_cfg->bus_attr[i].qas_enabled = 0;
5834 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5835 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5836 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5837 else
5838 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5839 }
5840}
5841
5842/**
5843 * ipr_init_ioa_cfg - Initialize IOA config struct
5844 * @ioa_cfg: ioa config struct
5845 * @host: scsi host struct
5846 * @pdev: PCI dev struct
5847 *
5848 * Return value:
5849 * none
5850 **/
5851static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5852 struct Scsi_Host *host, struct pci_dev *pdev)
5853{
5854 const struct ipr_interrupt_offsets *p;
5855 struct ipr_interrupts *t;
5856 void __iomem *base;
5857
5858 ioa_cfg->host = host;
5859 ioa_cfg->pdev = pdev;
5860 ioa_cfg->log_level = ipr_log_level;
5861 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5862 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5863 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5864 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5865 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5866 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5867 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5868 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5869
5870 INIT_LIST_HEAD(&ioa_cfg->free_q);
5871 INIT_LIST_HEAD(&ioa_cfg->pending_q);
5872 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5873 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5874 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5875 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5876 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5877 init_waitqueue_head(&ioa_cfg->reset_wait_q);
5878 ioa_cfg->sdt_state = INACTIVE;
brking@us.ibm.com62275042005-11-01 17:01:14 -06005879 if (ipr_enable_cache)
5880 ioa_cfg->cache_state = CACHE_ENABLED;
5881 else
5882 ioa_cfg->cache_state = CACHE_DISABLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005883
5884 ipr_initialize_bus_attr(ioa_cfg);
5885
5886 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5887 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5888 host->max_channel = IPR_MAX_BUS_TO_SCAN;
5889 host->unique_id = host->host_no;
5890 host->max_cmd_len = IPR_MAX_CDB_LEN;
5891 pci_set_drvdata(pdev, ioa_cfg);
5892
5893 p = &ioa_cfg->chip_cfg->regs;
5894 t = &ioa_cfg->regs;
5895 base = ioa_cfg->hdw_dma_regs;
5896
5897 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
5898 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
5899 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
5900 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
5901 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
5902 t->ioarrin_reg = base + p->ioarrin_reg;
5903 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
5904 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
5905 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
5906}
5907
5908/**
5909 * ipr_get_chip_cfg - Find adapter chip configuration
5910 * @dev_id: PCI device id struct
5911 *
5912 * Return value:
5913 * ptr to chip config on success / NULL on failure
5914 **/
5915static const struct ipr_chip_cfg_t * __devinit
5916ipr_get_chip_cfg(const struct pci_device_id *dev_id)
5917{
5918 int i;
5919
5920 if (dev_id->driver_data)
5921 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5922
5923 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
5924 if (ipr_chip[i].vendor == dev_id->vendor &&
5925 ipr_chip[i].device == dev_id->device)
5926 return ipr_chip[i].cfg;
5927 return NULL;
5928}
5929
5930/**
5931 * ipr_probe_ioa - Allocates memory and does first stage of initialization
5932 * @pdev: PCI device struct
5933 * @dev_id: PCI device id struct
5934 *
5935 * Return value:
5936 * 0 on success / non-zero on failure
5937 **/
5938static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
5939 const struct pci_device_id *dev_id)
5940{
5941 struct ipr_ioa_cfg *ioa_cfg;
5942 struct Scsi_Host *host;
5943 unsigned long ipr_regs_pci;
5944 void __iomem *ipr_regs;
5945 u32 rc = PCIBIOS_SUCCESSFUL;
5946
5947 ENTER;
5948
5949 if ((rc = pci_enable_device(pdev))) {
5950 dev_err(&pdev->dev, "Cannot enable adapter\n");
5951 goto out;
5952 }
5953
5954 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
5955
5956 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
5957
5958 if (!host) {
5959 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
5960 rc = -ENOMEM;
5961 goto out_disable;
5962 }
5963
5964 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
5965 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
5966
5967 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
5968
5969 if (!ioa_cfg->chip_cfg) {
5970 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
5971 dev_id->vendor, dev_id->device);
5972 goto out_scsi_host_put;
5973 }
5974
5975 ipr_regs_pci = pci_resource_start(pdev, 0);
5976
5977 rc = pci_request_regions(pdev, IPR_NAME);
5978 if (rc < 0) {
5979 dev_err(&pdev->dev,
5980 "Couldn't register memory range of registers\n");
5981 goto out_scsi_host_put;
5982 }
5983
5984 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
5985
5986 if (!ipr_regs) {
5987 dev_err(&pdev->dev,
5988 "Couldn't map memory range of registers\n");
5989 rc = -ENOMEM;
5990 goto out_release_regions;
5991 }
5992
5993 ioa_cfg->hdw_dma_regs = ipr_regs;
5994 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
5995 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
5996
5997 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
5998
5999 pci_set_master(pdev);
6000
6001 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6002 if (rc < 0) {
6003 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6004 goto cleanup_nomem;
6005 }
6006
6007 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6008 ioa_cfg->chip_cfg->cache_line_size);
6009
6010 if (rc != PCIBIOS_SUCCESSFUL) {
6011 dev_err(&pdev->dev, "Write of cache line size failed\n");
6012 rc = -EIO;
6013 goto cleanup_nomem;
6014 }
6015
6016 /* Save away PCI config space for use following IOA reset */
6017 rc = pci_save_state(pdev);
6018
6019 if (rc != PCIBIOS_SUCCESSFUL) {
6020 dev_err(&pdev->dev, "Failed to save PCI config space\n");
6021 rc = -EIO;
6022 goto cleanup_nomem;
6023 }
6024
6025 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6026 goto cleanup_nomem;
6027
6028 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6029 goto cleanup_nomem;
6030
6031 rc = ipr_alloc_mem(ioa_cfg);
6032 if (rc < 0) {
6033 dev_err(&pdev->dev,
6034 "Couldn't allocate enough memory for device driver!\n");
6035 goto cleanup_nomem;
6036 }
6037
6038 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6039 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6040
6041 if (rc) {
6042 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6043 pdev->irq, rc);
6044 goto cleanup_nolog;
6045 }
6046
6047 spin_lock(&ipr_driver_lock);
6048 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6049 spin_unlock(&ipr_driver_lock);
6050
6051 LEAVE;
6052out:
6053 return rc;
6054
6055cleanup_nolog:
6056 ipr_free_mem(ioa_cfg);
6057cleanup_nomem:
6058 iounmap(ipr_regs);
6059out_release_regions:
6060 pci_release_regions(pdev);
6061out_scsi_host_put:
6062 scsi_host_put(host);
6063out_disable:
6064 pci_disable_device(pdev);
6065 goto out;
6066}
6067
6068/**
6069 * ipr_scan_vsets - Scans for VSET devices
6070 * @ioa_cfg: ioa config struct
6071 *
6072 * Description: Since the VSET resources do not follow SAM in that we can have
6073 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6074 *
6075 * Return value:
6076 * none
6077 **/
6078static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6079{
6080 int target, lun;
6081
6082 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6083 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6084 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6085}
6086
6087/**
6088 * ipr_initiate_ioa_bringdown - Bring down an adapter
6089 * @ioa_cfg: ioa config struct
6090 * @shutdown_type: shutdown type
6091 *
6092 * Description: This function will initiate bringing down the adapter.
6093 * This consists of issuing an IOA shutdown to the adapter
6094 * to flush the cache, and running BIST.
6095 * If the caller needs to wait on the completion of the reset,
6096 * the caller must sleep on the reset_wait_q.
6097 *
6098 * Return value:
6099 * none
6100 **/
6101static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6102 enum ipr_shutdown_type shutdown_type)
6103{
6104 ENTER;
6105 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6106 ioa_cfg->sdt_state = ABORT_DUMP;
6107 ioa_cfg->reset_retries = 0;
6108 ioa_cfg->in_ioa_bringdown = 1;
6109 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6110 LEAVE;
6111}
6112
6113/**
6114 * __ipr_remove - Remove a single adapter
6115 * @pdev: pci device struct
6116 *
6117 * Adapter hot plug remove entry point.
6118 *
6119 * Return value:
6120 * none
6121 **/
6122static void __ipr_remove(struct pci_dev *pdev)
6123{
6124 unsigned long host_lock_flags = 0;
6125 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6126 ENTER;
6127
6128 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6129 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6130
6131 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6132 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
brking@us.ibm.com 5cbf5ea2005-05-02 19:50:47 -05006133 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006134 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6135
6136 spin_lock(&ipr_driver_lock);
6137 list_del(&ioa_cfg->queue);
6138 spin_unlock(&ipr_driver_lock);
6139
6140 if (ioa_cfg->sdt_state == ABORT_DUMP)
6141 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6142 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6143
6144 ipr_free_all_resources(ioa_cfg);
6145
6146 LEAVE;
6147}
6148
6149/**
6150 * ipr_remove - IOA hot plug remove entry point
6151 * @pdev: pci device struct
6152 *
6153 * Adapter hot plug remove entry point.
6154 *
6155 * Return value:
6156 * none
6157 **/
6158static void ipr_remove(struct pci_dev *pdev)
6159{
6160 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6161
6162 ENTER;
6163
Linus Torvalds1da177e2005-04-16 15:20:36 -07006164 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6165 &ipr_trace_attr);
6166 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6167 &ipr_dump_attr);
6168 scsi_remove_host(ioa_cfg->host);
6169
6170 __ipr_remove(pdev);
6171
6172 LEAVE;
6173}
6174
6175/**
6176 * ipr_probe - Adapter hot plug add entry point
6177 *
6178 * Return value:
6179 * 0 on success / non-zero on failure
6180 **/
6181static int __devinit ipr_probe(struct pci_dev *pdev,
6182 const struct pci_device_id *dev_id)
6183{
6184 struct ipr_ioa_cfg *ioa_cfg;
6185 int rc;
6186
6187 rc = ipr_probe_ioa(pdev, dev_id);
6188
6189 if (rc)
6190 return rc;
6191
6192 ioa_cfg = pci_get_drvdata(pdev);
6193 rc = ipr_probe_ioa_part2(ioa_cfg);
6194
6195 if (rc) {
6196 __ipr_remove(pdev);
6197 return rc;
6198 }
6199
6200 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6201
6202 if (rc) {
6203 __ipr_remove(pdev);
6204 return rc;
6205 }
6206
6207 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6208 &ipr_trace_attr);
6209
6210 if (rc) {
6211 scsi_remove_host(ioa_cfg->host);
6212 __ipr_remove(pdev);
6213 return rc;
6214 }
6215
6216 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6217 &ipr_dump_attr);
6218
6219 if (rc) {
6220 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6221 &ipr_trace_attr);
6222 scsi_remove_host(ioa_cfg->host);
6223 __ipr_remove(pdev);
6224 return rc;
6225 }
6226
6227 scsi_scan_host(ioa_cfg->host);
6228 ipr_scan_vsets(ioa_cfg);
6229 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6230 ioa_cfg->allow_ml_add_del = 1;
brking@us.ibm.com11cd8f12005-11-01 17:00:11 -06006231 ioa_cfg->host->max_channel = IPR_VSET_BUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006232 schedule_work(&ioa_cfg->work_q);
6233 return 0;
6234}
6235
6236/**
6237 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006238 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006239 *
6240 * This function is invoked upon system shutdown/reboot. It will issue
6241 * an adapter shutdown to the adapter to flush the write cache.
6242 *
6243 * Return value:
6244 * none
6245 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006246static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006247{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006248 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006249 unsigned long lock_flags = 0;
6250
6251 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6252 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6253 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6254 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6255}
6256
6257static struct pci_device_id ipr_pci_table[] __devinitdata = {
6258 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6259 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6260 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6261 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6262 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6263 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6264 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6265 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6266 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6267 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6268 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6269 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6270 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6271 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6272 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6273 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6274 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6275 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6276 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6277 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6278 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6279 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6280 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6281 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6282 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6283 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6284 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6285 { }
6286};
6287MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6288
6289static struct pci_driver ipr_driver = {
6290 .name = IPR_NAME,
6291 .id_table = ipr_pci_table,
6292 .probe = ipr_probe,
6293 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006294 .shutdown = ipr_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006295};
6296
6297/**
6298 * ipr_init - Module entry point
6299 *
6300 * Return value:
6301 * 0 on success / negative value on failure
6302 **/
6303static int __init ipr_init(void)
6304{
6305 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6306 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6307
6308 return pci_module_init(&ipr_driver);
6309}
6310
6311/**
6312 * ipr_exit - Module unload
6313 *
6314 * Module unload entry point.
6315 *
6316 * Return value:
6317 * none
6318 **/
6319static void __exit ipr_exit(void)
6320{
6321 pci_unregister_driver(&ipr_driver);
6322}
6323
6324module_init(ipr_init);
6325module_exit(ipr_exit);