blob: 133106f6cb006343b02111b8ad398d703dd2111c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57#include <linux/config.h>
58#include <linux/fs.h>
59#include <linux/init.h>
60#include <linux/types.h>
61#include <linux/errno.h>
62#include <linux/kernel.h>
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_request.h>
83#include "ipr.h"
84
85/*
86 * Global Data
87 */
88static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
brking@us.ibm.com62275042005-11-01 17:01:14 -060094static unsigned int ipr_enable_cache = 1;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060095static unsigned int ipr_debug = 0;
brking@us.ibm.com32d29772005-11-01 17:02:01 -060096static int ipr_auto_create = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097static DEFINE_SPINLOCK(ipr_driver_lock);
98
99/* This table describes the differences between DMA controller chips */
100static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101 { /* Gemstone and Citrine */
102 .mailbox = 0x0042C,
103 .cache_line_size = 0x20,
104 {
105 .set_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_mask_reg = 0x00230,
107 .sense_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_reg = 0x00228,
109 .sense_interrupt_reg = 0x00224,
110 .ioarrin_reg = 0x00404,
111 .sense_uproc_interrupt_reg = 0x00214,
112 .set_uproc_interrupt_reg = 0x00214,
113 .clr_uproc_interrupt_reg = 0x00218
114 }
115 },
116 { /* Snipe and Scamp */
117 .mailbox = 0x0052C,
118 .cache_line_size = 0x20,
119 {
120 .set_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_mask_reg = 0x0028C,
122 .sense_interrupt_mask_reg = 0x00288,
123 .clr_interrupt_reg = 0x00284,
124 .sense_interrupt_reg = 0x00280,
125 .ioarrin_reg = 0x00504,
126 .sense_uproc_interrupt_reg = 0x00290,
127 .set_uproc_interrupt_reg = 0x00290,
128 .clr_uproc_interrupt_reg = 0x00294
129 }
130 },
131};
132
133static const struct ipr_chip_t ipr_chip[] = {
134 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
137 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
138};
139
140static int ipr_max_bus_speeds [] = {
141 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
142};
143
144MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
145MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
146module_param_named(max_speed, ipr_max_speed, uint, 0);
147MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
148module_param_named(log_level, ipr_log_level, uint, 0);
149MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
150module_param_named(testmode, ipr_testmode, int, 0);
151MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
152module_param_named(fastfail, ipr_fastfail, int, 0);
153MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
154module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
155MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
brking@us.ibm.com62275042005-11-01 17:01:14 -0600156module_param_named(enable_cache, ipr_enable_cache, int, 0);
157MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600158module_param_named(debug, ipr_debug, int, 0);
159MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
brking@us.ibm.com32d29772005-11-01 17:02:01 -0600160module_param_named(auto_create, ipr_auto_create, int, 0);
161MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162MODULE_LICENSE("GPL");
163MODULE_VERSION(IPR_DRIVER_VERSION);
164
165static const char *ipr_gpdd_dev_end_states[] = {
166 "Command complete",
167 "Terminated by host",
168 "Terminated by device reset",
169 "Terminated by bus reset",
170 "Unknown",
171 "Command not started"
172};
173
174static const char *ipr_gpdd_dev_bus_phases[] = {
175 "Bus free",
176 "Arbitration",
177 "Selection",
178 "Message out",
179 "Command",
180 "Message in",
181 "Data out",
182 "Data in",
183 "Status",
184 "Reselection",
185 "Unknown"
186};
187
188/* A constant array of IOASCs/URCs/Error Messages */
189static const
190struct ipr_error_table_t ipr_error_table[] = {
191 {0x00000000, 1, 1,
192 "8155: An unknown error was received"},
193 {0x00330000, 0, 0,
194 "Soft underlength error"},
195 {0x005A0000, 0, 0,
196 "Command to be cancelled not found"},
197 {0x00808000, 0, 0,
198 "Qualified success"},
199 {0x01080000, 1, 1,
200 "FFFE: Soft device bus error recovered by the IOA"},
201 {0x01170600, 0, 1,
202 "FFF9: Device sector reassign successful"},
203 {0x01170900, 0, 1,
204 "FFF7: Media error recovered by device rewrite procedures"},
205 {0x01180200, 0, 1,
206 "7001: IOA sector reassignment successful"},
207 {0x01180500, 0, 1,
208 "FFF9: Soft media error. Sector reassignment recommended"},
209 {0x01180600, 0, 1,
210 "FFF7: Media error recovered by IOA rewrite procedures"},
211 {0x01418000, 0, 1,
212 "FF3D: Soft PCI bus error recovered by the IOA"},
213 {0x01440000, 1, 1,
214 "FFF6: Device hardware error recovered by the IOA"},
215 {0x01448100, 0, 1,
216 "FFF6: Device hardware error recovered by the device"},
217 {0x01448200, 1, 1,
218 "FF3D: Soft IOA error recovered by the IOA"},
219 {0x01448300, 0, 1,
220 "FFFA: Undefined device response recovered by the IOA"},
221 {0x014A0000, 1, 1,
222 "FFF6: Device bus error, message or command phase"},
223 {0x015D0000, 0, 1,
224 "FFF6: Failure prediction threshold exceeded"},
225 {0x015D9200, 0, 1,
226 "8009: Impending cache battery pack failure"},
227 {0x02040400, 0, 0,
228 "34FF: Disk device format in progress"},
229 {0x023F0000, 0, 0,
230 "Synchronization required"},
231 {0x024E0000, 0, 0,
232 "No ready, IOA shutdown"},
233 {0x025A0000, 0, 0,
234 "Not ready, IOA has been shutdown"},
235 {0x02670100, 0, 1,
236 "3020: Storage subsystem configuration error"},
237 {0x03110B00, 0, 0,
238 "FFF5: Medium error, data unreadable, recommend reassign"},
239 {0x03110C00, 0, 0,
240 "7000: Medium error, data unreadable, do not reassign"},
241 {0x03310000, 0, 1,
242 "FFF3: Disk media format bad"},
243 {0x04050000, 0, 1,
244 "3002: Addressed device failed to respond to selection"},
245 {0x04080000, 1, 1,
246 "3100: Device bus error"},
247 {0x04080100, 0, 1,
248 "3109: IOA timed out a device command"},
249 {0x04088000, 0, 0,
250 "3120: SCSI bus is not operational"},
251 {0x04118000, 0, 1,
252 "9000: IOA reserved area data check"},
253 {0x04118100, 0, 1,
254 "9001: IOA reserved area invalid data pattern"},
255 {0x04118200, 0, 1,
256 "9002: IOA reserved area LRC error"},
257 {0x04320000, 0, 1,
258 "102E: Out of alternate sectors for disk storage"},
259 {0x04330000, 1, 1,
260 "FFF4: Data transfer underlength error"},
261 {0x04338000, 1, 1,
262 "FFF4: Data transfer overlength error"},
263 {0x043E0100, 0, 1,
264 "3400: Logical unit failure"},
265 {0x04408500, 0, 1,
266 "FFF4: Device microcode is corrupt"},
267 {0x04418000, 1, 1,
268 "8150: PCI bus error"},
269 {0x04430000, 1, 0,
270 "Unsupported device bus message received"},
271 {0x04440000, 1, 1,
272 "FFF4: Disk device problem"},
273 {0x04448200, 1, 1,
274 "8150: Permanent IOA failure"},
275 {0x04448300, 0, 1,
276 "3010: Disk device returned wrong response to IOA"},
277 {0x04448400, 0, 1,
278 "8151: IOA microcode error"},
279 {0x04448500, 0, 0,
280 "Device bus status error"},
281 {0x04448600, 0, 1,
282 "8157: IOA error requiring IOA reset to recover"},
283 {0x04490000, 0, 0,
284 "Message reject received from the device"},
285 {0x04449200, 0, 1,
286 "8008: A permanent cache battery pack failure occurred"},
287 {0x0444A000, 0, 1,
288 "9090: Disk unit has been modified after the last known status"},
289 {0x0444A200, 0, 1,
290 "9081: IOA detected device error"},
291 {0x0444A300, 0, 1,
292 "9082: IOA detected device error"},
293 {0x044A0000, 1, 1,
294 "3110: Device bus error, message or command phase"},
295 {0x04670400, 0, 1,
296 "9091: Incorrect hardware configuration change has been detected"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600297 {0x04678000, 0, 1,
298 "9073: Invalid multi-adapter configuration"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 {0x046E0000, 0, 1,
300 "FFF4: Command to logical unit failed"},
301 {0x05240000, 1, 0,
302 "Illegal request, invalid request type or request packet"},
303 {0x05250000, 0, 0,
304 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600305 {0x05258000, 0, 0,
306 "Illegal request, commands not allowed to this device"},
307 {0x05258100, 0, 0,
308 "Illegal request, command not allowed to a secondary adapter"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 {0x05260000, 0, 0,
310 "Illegal request, invalid field in parameter list"},
311 {0x05260100, 0, 0,
312 "Illegal request, parameter not supported"},
313 {0x05260200, 0, 0,
314 "Illegal request, parameter value invalid"},
315 {0x052C0000, 0, 0,
316 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600317 {0x052C8000, 1, 0,
318 "Illegal request, dual adapter support not enabled"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 {0x06040500, 0, 1,
320 "9031: Array protection temporarily suspended, protection resuming"},
321 {0x06040600, 0, 1,
322 "9040: Array protection temporarily suspended, protection resuming"},
323 {0x06290000, 0, 1,
324 "FFFB: SCSI bus was reset"},
325 {0x06290500, 0, 0,
326 "FFFE: SCSI bus transition to single ended"},
327 {0x06290600, 0, 0,
328 "FFFE: SCSI bus transition to LVD"},
329 {0x06298000, 0, 1,
330 "FFFB: SCSI bus was reset by another initiator"},
331 {0x063F0300, 0, 1,
332 "3029: A device replacement has occurred"},
333 {0x064C8000, 0, 1,
334 "9051: IOA cache data exists for a missing or failed device"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600335 {0x064C8100, 0, 1,
336 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 {0x06670100, 0, 1,
338 "9025: Disk unit is not supported at its physical location"},
339 {0x06670600, 0, 1,
340 "3020: IOA detected a SCSI bus configuration error"},
341 {0x06678000, 0, 1,
342 "3150: SCSI bus configuration error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600343 {0x06678100, 0, 1,
344 "9074: Asymmetric advanced function disk configuration"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 {0x06690200, 0, 1,
346 "9041: Array protection temporarily suspended"},
347 {0x06698200, 0, 1,
348 "9042: Corrupt array parity detected on specified device"},
349 {0x066B0200, 0, 1,
350 "9030: Array no longer protected due to missing or failed disk unit"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600351 {0x066B8000, 0, 1,
352 "9071: Link operational transition"},
353 {0x066B8100, 0, 1,
354 "9072: Link not operational transition"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 {0x066B8200, 0, 1,
356 "9032: Array exposed but still protected"},
357 {0x07270000, 0, 0,
358 "Failure due to other device"},
359 {0x07278000, 0, 1,
360 "9008: IOA does not support functions expected by devices"},
361 {0x07278100, 0, 1,
362 "9010: Cache data associated with attached devices cannot be found"},
363 {0x07278200, 0, 1,
364 "9011: Cache data belongs to devices other than those attached"},
365 {0x07278400, 0, 1,
366 "9020: Array missing 2 or more devices with only 1 device present"},
367 {0x07278500, 0, 1,
368 "9021: Array missing 2 or more devices with 2 or more devices present"},
369 {0x07278600, 0, 1,
370 "9022: Exposed array is missing a required device"},
371 {0x07278700, 0, 1,
372 "9023: Array member(s) not at required physical locations"},
373 {0x07278800, 0, 1,
374 "9024: Array not functional due to present hardware configuration"},
375 {0x07278900, 0, 1,
376 "9026: Array not functional due to present hardware configuration"},
377 {0x07278A00, 0, 1,
378 "9027: Array is missing a device and parity is out of sync"},
379 {0x07278B00, 0, 1,
380 "9028: Maximum number of arrays already exist"},
381 {0x07278C00, 0, 1,
382 "9050: Required cache data cannot be located for a disk unit"},
383 {0x07278D00, 0, 1,
384 "9052: Cache data exists for a device that has been modified"},
385 {0x07278F00, 0, 1,
386 "9054: IOA resources not available due to previous problems"},
387 {0x07279100, 0, 1,
388 "9092: Disk unit requires initialization before use"},
389 {0x07279200, 0, 1,
390 "9029: Incorrect hardware configuration change has been detected"},
391 {0x07279600, 0, 1,
392 "9060: One or more disk pairs are missing from an array"},
393 {0x07279700, 0, 1,
394 "9061: One or more disks are missing from an array"},
395 {0x07279800, 0, 1,
396 "9062: One or more disks are missing from an array"},
397 {0x07279900, 0, 1,
398 "9063: Maximum number of functional arrays has been exceeded"},
399 {0x0B260000, 0, 0,
400 "Aborted command, invalid descriptor"},
401 {0x0B5A0000, 0, 0,
402 "Command terminated by host"}
403};
404
405static const struct ipr_ses_table_entry ipr_ses_table[] = {
406 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
407 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
408 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
409 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
410 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
411 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
412 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
413 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
414 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
415 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
416 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
417 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
418 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
419};
420
421/*
422 * Function Prototypes
423 */
424static int ipr_reset_alert(struct ipr_cmnd *);
425static void ipr_process_ccn(struct ipr_cmnd *);
426static void ipr_process_error(struct ipr_cmnd *);
427static void ipr_reset_ioa_job(struct ipr_cmnd *);
428static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
429 enum ipr_shutdown_type);
430
431#ifdef CONFIG_SCSI_IPR_TRACE
432/**
433 * ipr_trc_hook - Add a trace entry to the driver trace
434 * @ipr_cmd: ipr command struct
435 * @type: trace type
436 * @add_data: additional data
437 *
438 * Return value:
439 * none
440 **/
441static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
442 u8 type, u32 add_data)
443{
444 struct ipr_trace_entry *trace_entry;
445 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
446
447 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
448 trace_entry->time = jiffies;
449 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
450 trace_entry->type = type;
451 trace_entry->cmd_index = ipr_cmd->cmd_index;
452 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
453 trace_entry->u.add_data = add_data;
454}
455#else
456#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
457#endif
458
459/**
460 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
461 * @ipr_cmd: ipr command struct
462 *
463 * Return value:
464 * none
465 **/
466static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
467{
468 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
469 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
470
471 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
472 ioarcb->write_data_transfer_length = 0;
473 ioarcb->read_data_transfer_length = 0;
474 ioarcb->write_ioadl_len = 0;
475 ioarcb->read_ioadl_len = 0;
476 ioasa->ioasc = 0;
477 ioasa->residual_data_len = 0;
478
479 ipr_cmd->scsi_cmd = NULL;
480 ipr_cmd->sense_buffer[0] = 0;
481 ipr_cmd->dma_use_sg = 0;
482}
483
484/**
485 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
486 * @ipr_cmd: ipr command struct
487 *
488 * Return value:
489 * none
490 **/
491static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
492{
493 ipr_reinit_ipr_cmnd(ipr_cmd);
494 ipr_cmd->u.scratch = 0;
495 ipr_cmd->sibling = NULL;
496 init_timer(&ipr_cmd->timer);
497}
498
499/**
500 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
501 * @ioa_cfg: ioa config struct
502 *
503 * Return value:
504 * pointer to ipr command struct
505 **/
506static
507struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
508{
509 struct ipr_cmnd *ipr_cmd;
510
511 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
512 list_del(&ipr_cmd->queue);
513 ipr_init_ipr_cmnd(ipr_cmd);
514
515 return ipr_cmd;
516}
517
518/**
519 * ipr_unmap_sglist - Unmap scatterlist if mapped
520 * @ioa_cfg: ioa config struct
521 * @ipr_cmd: ipr command struct
522 *
523 * Return value:
524 * nothing
525 **/
526static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
527 struct ipr_cmnd *ipr_cmd)
528{
529 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
530
531 if (ipr_cmd->dma_use_sg) {
532 if (scsi_cmd->use_sg > 0) {
533 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
534 scsi_cmd->use_sg,
535 scsi_cmd->sc_data_direction);
536 } else {
537 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
538 scsi_cmd->request_bufflen,
539 scsi_cmd->sc_data_direction);
540 }
541 }
542}
543
544/**
545 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
546 * @ioa_cfg: ioa config struct
547 * @clr_ints: interrupts to clear
548 *
549 * This function masks all interrupts on the adapter, then clears the
550 * interrupts specified in the mask
551 *
552 * Return value:
553 * none
554 **/
555static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
556 u32 clr_ints)
557{
558 volatile u32 int_reg;
559
560 /* Stop new interrupts */
561 ioa_cfg->allow_interrupts = 0;
562
563 /* Set interrupt mask to stop all new interrupts */
564 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
565
566 /* Clear any pending interrupts */
567 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
568 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
569}
570
571/**
572 * ipr_save_pcix_cmd_reg - Save PCI-X command register
573 * @ioa_cfg: ioa config struct
574 *
575 * Return value:
576 * 0 on success / -EIO on failure
577 **/
578static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
579{
580 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
581
582 if (pcix_cmd_reg == 0) {
583 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
584 return -EIO;
585 }
586
587 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
588 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
589 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
590 return -EIO;
591 }
592
593 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
594 return 0;
595}
596
597/**
598 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
599 * @ioa_cfg: ioa config struct
600 *
601 * Return value:
602 * 0 on success / -EIO on failure
603 **/
604static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
605{
606 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
607
608 if (pcix_cmd_reg) {
609 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
610 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
611 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
612 return -EIO;
613 }
614 } else {
615 dev_err(&ioa_cfg->pdev->dev,
616 "Failed to setup PCI-X command register\n");
617 return -EIO;
618 }
619
620 return 0;
621}
622
623/**
624 * ipr_scsi_eh_done - mid-layer done function for aborted ops
625 * @ipr_cmd: ipr command struct
626 *
627 * This function is invoked by the interrupt handler for
628 * ops generated by the SCSI mid-layer which are being aborted.
629 *
630 * Return value:
631 * none
632 **/
633static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
634{
635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
637
638 scsi_cmd->result |= (DID_ERROR << 16);
639
640 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
641 scsi_cmd->scsi_done(scsi_cmd);
642 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
643}
644
645/**
646 * ipr_fail_all_ops - Fails all outstanding ops.
647 * @ioa_cfg: ioa config struct
648 *
649 * This function fails all outstanding ops.
650 *
651 * Return value:
652 * none
653 **/
654static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
655{
656 struct ipr_cmnd *ipr_cmd, *temp;
657
658 ENTER;
659 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
660 list_del(&ipr_cmd->queue);
661
662 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
663 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
664
665 if (ipr_cmd->scsi_cmd)
666 ipr_cmd->done = ipr_scsi_eh_done;
667
668 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
669 del_timer(&ipr_cmd->timer);
670 ipr_cmd->done(ipr_cmd);
671 }
672
673 LEAVE;
674}
675
676/**
677 * ipr_do_req - Send driver initiated requests.
678 * @ipr_cmd: ipr command struct
679 * @done: done function
680 * @timeout_func: timeout function
681 * @timeout: timeout value
682 *
683 * This function sends the specified command to the adapter with the
684 * timeout given. The done function is invoked on command completion.
685 *
686 * Return value:
687 * none
688 **/
689static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
690 void (*done) (struct ipr_cmnd *),
691 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
692{
693 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
694
695 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
696
697 ipr_cmd->done = done;
698
699 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
700 ipr_cmd->timer.expires = jiffies + timeout;
701 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
702
703 add_timer(&ipr_cmd->timer);
704
705 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
706
707 mb();
708 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
709 ioa_cfg->regs.ioarrin_reg);
710}
711
712/**
713 * ipr_internal_cmd_done - Op done function for an internally generated op.
714 * @ipr_cmd: ipr command struct
715 *
716 * This function is the op done function for an internally generated,
717 * blocking op. It simply wakes the sleeping thread.
718 *
719 * Return value:
720 * none
721 **/
722static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
723{
724 if (ipr_cmd->sibling)
725 ipr_cmd->sibling = NULL;
726 else
727 complete(&ipr_cmd->completion);
728}
729
730/**
731 * ipr_send_blocking_cmd - Send command and sleep on its completion.
732 * @ipr_cmd: ipr command struct
733 * @timeout_func: function to invoke if command times out
734 * @timeout: timeout
735 *
736 * Return value:
737 * none
738 **/
739static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
740 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
741 u32 timeout)
742{
743 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
744
745 init_completion(&ipr_cmd->completion);
746 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
747
748 spin_unlock_irq(ioa_cfg->host->host_lock);
749 wait_for_completion(&ipr_cmd->completion);
750 spin_lock_irq(ioa_cfg->host->host_lock);
751}
752
753/**
754 * ipr_send_hcam - Send an HCAM to the adapter.
755 * @ioa_cfg: ioa config struct
756 * @type: HCAM type
757 * @hostrcb: hostrcb struct
758 *
759 * This function will send a Host Controlled Async command to the adapter.
760 * If HCAMs are currently not allowed to be issued to the adapter, it will
761 * place the hostrcb on the free queue.
762 *
763 * Return value:
764 * none
765 **/
766static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
767 struct ipr_hostrcb *hostrcb)
768{
769 struct ipr_cmnd *ipr_cmd;
770 struct ipr_ioarcb *ioarcb;
771
772 if (ioa_cfg->allow_cmds) {
773 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
774 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
775 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
776
777 ipr_cmd->u.hostrcb = hostrcb;
778 ioarcb = &ipr_cmd->ioarcb;
779
780 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
781 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
782 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
783 ioarcb->cmd_pkt.cdb[1] = type;
784 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
785 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
786
787 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
788 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
789 ipr_cmd->ioadl[0].flags_and_data_len =
790 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
791 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
792
793 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
794 ipr_cmd->done = ipr_process_ccn;
795 else
796 ipr_cmd->done = ipr_process_error;
797
798 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
799
800 mb();
801 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
802 ioa_cfg->regs.ioarrin_reg);
803 } else {
804 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
805 }
806}
807
808/**
809 * ipr_init_res_entry - Initialize a resource entry struct.
810 * @res: resource entry struct
811 *
812 * Return value:
813 * none
814 **/
815static void ipr_init_res_entry(struct ipr_resource_entry *res)
816{
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -0600817 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 res->in_erp = 0;
819 res->add_to_ml = 0;
820 res->del_from_ml = 0;
821 res->resetting_device = 0;
822 res->sdev = NULL;
823}
824
825/**
826 * ipr_handle_config_change - Handle a config change from the adapter
827 * @ioa_cfg: ioa config struct
828 * @hostrcb: hostrcb
829 *
830 * Return value:
831 * none
832 **/
833static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
834 struct ipr_hostrcb *hostrcb)
835{
836 struct ipr_resource_entry *res = NULL;
837 struct ipr_config_table_entry *cfgte;
838 u32 is_ndn = 1;
839
840 cfgte = &hostrcb->hcam.u.ccn.cfgte;
841
842 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
843 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
844 sizeof(cfgte->res_addr))) {
845 is_ndn = 0;
846 break;
847 }
848 }
849
850 if (is_ndn) {
851 if (list_empty(&ioa_cfg->free_res_q)) {
852 ipr_send_hcam(ioa_cfg,
853 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
854 hostrcb);
855 return;
856 }
857
858 res = list_entry(ioa_cfg->free_res_q.next,
859 struct ipr_resource_entry, queue);
860
861 list_del(&res->queue);
862 ipr_init_res_entry(res);
863 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
864 }
865
866 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
867
868 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
869 if (res->sdev) {
870 res->sdev->hostdata = NULL;
871 res->del_from_ml = 1;
872 if (ioa_cfg->allow_ml_add_del)
873 schedule_work(&ioa_cfg->work_q);
874 } else
875 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
876 } else if (!res->sdev) {
877 res->add_to_ml = 1;
878 if (ioa_cfg->allow_ml_add_del)
879 schedule_work(&ioa_cfg->work_q);
880 }
881
882 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
883}
884
885/**
886 * ipr_process_ccn - Op done function for a CCN.
887 * @ipr_cmd: ipr command struct
888 *
889 * This function is the op done function for a configuration
890 * change notification host controlled async from the adapter.
891 *
892 * Return value:
893 * none
894 **/
895static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
896{
897 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
898 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
899 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
900
901 list_del(&hostrcb->queue);
902 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
903
904 if (ioasc) {
905 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
906 dev_err(&ioa_cfg->pdev->dev,
907 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
908
909 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
910 } else {
911 ipr_handle_config_change(ioa_cfg, hostrcb);
912 }
913}
914
915/**
916 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600917 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 *
919 * Return value:
920 * none
921 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600922static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923{
924 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
925 + IPR_SERIAL_NUM_LEN];
926
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600927 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
928 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 IPR_PROD_ID_LEN);
930 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
931 ipr_err("Vendor/Product ID: %s\n", buffer);
932
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600933 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 buffer[IPR_SERIAL_NUM_LEN] = '\0';
935 ipr_err(" Serial Number: %s\n", buffer);
936}
937
938/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -0600939 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
940 * @vpd: vendor/product id/sn/wwn struct
941 *
942 * Return value:
943 * none
944 **/
945static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
946{
947 ipr_log_vpd(&vpd->vpd);
948 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
949 be32_to_cpu(vpd->wwid[1]));
950}
951
952/**
953 * ipr_log_enhanced_cache_error - Log a cache error.
954 * @ioa_cfg: ioa config struct
955 * @hostrcb: hostrcb struct
956 *
957 * Return value:
958 * none
959 **/
960static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
961 struct ipr_hostrcb *hostrcb)
962{
963 struct ipr_hostrcb_type_12_error *error =
964 &hostrcb->hcam.u.error.u.type_12_error;
965
966 ipr_err("-----Current Configuration-----\n");
967 ipr_err("Cache Directory Card Information:\n");
968 ipr_log_ext_vpd(&error->ioa_vpd);
969 ipr_err("Adapter Card Information:\n");
970 ipr_log_ext_vpd(&error->cfc_vpd);
971
972 ipr_err("-----Expected Configuration-----\n");
973 ipr_err("Cache Directory Card Information:\n");
974 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
975 ipr_err("Adapter Card Information:\n");
976 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
977
978 ipr_err("Additional IOA Data: %08X %08X %08X\n",
979 be32_to_cpu(error->ioa_data[0]),
980 be32_to_cpu(error->ioa_data[1]),
981 be32_to_cpu(error->ioa_data[2]));
982}
983
984/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 * ipr_log_cache_error - Log a cache error.
986 * @ioa_cfg: ioa config struct
987 * @hostrcb: hostrcb struct
988 *
989 * Return value:
990 * none
991 **/
992static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
993 struct ipr_hostrcb *hostrcb)
994{
995 struct ipr_hostrcb_type_02_error *error =
996 &hostrcb->hcam.u.error.u.type_02_error;
997
998 ipr_err("-----Current Configuration-----\n");
999 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001000 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001002 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
1004 ipr_err("-----Expected Configuration-----\n");
1005 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001006 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001008 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
1010 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1011 be32_to_cpu(error->ioa_data[0]),
1012 be32_to_cpu(error->ioa_data[1]),
1013 be32_to_cpu(error->ioa_data[2]));
1014}
1015
1016/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001017 * ipr_log_enhanced_config_error - Log a configuration error.
1018 * @ioa_cfg: ioa config struct
1019 * @hostrcb: hostrcb struct
1020 *
1021 * Return value:
1022 * none
1023 **/
1024static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1025 struct ipr_hostrcb *hostrcb)
1026{
1027 int errors_logged, i;
1028 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1029 struct ipr_hostrcb_type_13_error *error;
1030
1031 error = &hostrcb->hcam.u.error.u.type_13_error;
1032 errors_logged = be32_to_cpu(error->errors_logged);
1033
1034 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1035 be32_to_cpu(error->errors_detected), errors_logged);
1036
1037 dev_entry = error->dev;
1038
1039 for (i = 0; i < errors_logged; i++, dev_entry++) {
1040 ipr_err_separator;
1041
1042 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1043 ipr_log_ext_vpd(&dev_entry->vpd);
1044
1045 ipr_err("-----New Device Information-----\n");
1046 ipr_log_ext_vpd(&dev_entry->new_vpd);
1047
1048 ipr_err("Cache Directory Card Information:\n");
1049 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1050
1051 ipr_err("Adapter Card Information:\n");
1052 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1053 }
1054}
1055
1056/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 * ipr_log_config_error - Log a configuration error.
1058 * @ioa_cfg: ioa config struct
1059 * @hostrcb: hostrcb struct
1060 *
1061 * Return value:
1062 * none
1063 **/
1064static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1065 struct ipr_hostrcb *hostrcb)
1066{
1067 int errors_logged, i;
1068 struct ipr_hostrcb_device_data_entry *dev_entry;
1069 struct ipr_hostrcb_type_03_error *error;
1070
1071 error = &hostrcb->hcam.u.error.u.type_03_error;
1072 errors_logged = be32_to_cpu(error->errors_logged);
1073
1074 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1075 be32_to_cpu(error->errors_detected), errors_logged);
1076
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001077 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
1079 for (i = 0; i < errors_logged; i++, dev_entry++) {
1080 ipr_err_separator;
1081
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001082 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001083 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
1085 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001086 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001089 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
1091 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001092 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093
1094 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1095 be32_to_cpu(dev_entry->ioa_data[0]),
1096 be32_to_cpu(dev_entry->ioa_data[1]),
1097 be32_to_cpu(dev_entry->ioa_data[2]),
1098 be32_to_cpu(dev_entry->ioa_data[3]),
1099 be32_to_cpu(dev_entry->ioa_data[4]));
1100 }
1101}
1102
1103/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001104 * ipr_log_enhanced_array_error - Log an array configuration error.
1105 * @ioa_cfg: ioa config struct
1106 * @hostrcb: hostrcb struct
1107 *
1108 * Return value:
1109 * none
1110 **/
1111static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1112 struct ipr_hostrcb *hostrcb)
1113{
1114 int i, num_entries;
1115 struct ipr_hostrcb_type_14_error *error;
1116 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1117 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1118
1119 error = &hostrcb->hcam.u.error.u.type_14_error;
1120
1121 ipr_err_separator;
1122
1123 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1124 error->protection_level,
1125 ioa_cfg->host->host_no,
1126 error->last_func_vset_res_addr.bus,
1127 error->last_func_vset_res_addr.target,
1128 error->last_func_vset_res_addr.lun);
1129
1130 ipr_err_separator;
1131
1132 array_entry = error->array_member;
1133 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1134 sizeof(error->array_member));
1135
1136 for (i = 0; i < num_entries; i++, array_entry++) {
1137 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1138 continue;
1139
1140 if (be32_to_cpu(error->exposed_mode_adn) == i)
1141 ipr_err("Exposed Array Member %d:\n", i);
1142 else
1143 ipr_err("Array Member %d:\n", i);
1144
1145 ipr_log_ext_vpd(&array_entry->vpd);
1146 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1147 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1148 "Expected Location");
1149
1150 ipr_err_separator;
1151 }
1152}
1153
1154/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 * ipr_log_array_error - Log an array configuration error.
1156 * @ioa_cfg: ioa config struct
1157 * @hostrcb: hostrcb struct
1158 *
1159 * Return value:
1160 * none
1161 **/
1162static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1163 struct ipr_hostrcb *hostrcb)
1164{
1165 int i;
1166 struct ipr_hostrcb_type_04_error *error;
1167 struct ipr_hostrcb_array_data_entry *array_entry;
1168 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1169
1170 error = &hostrcb->hcam.u.error.u.type_04_error;
1171
1172 ipr_err_separator;
1173
1174 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1175 error->protection_level,
1176 ioa_cfg->host->host_no,
1177 error->last_func_vset_res_addr.bus,
1178 error->last_func_vset_res_addr.target,
1179 error->last_func_vset_res_addr.lun);
1180
1181 ipr_err_separator;
1182
1183 array_entry = error->array_member;
1184
1185 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001186 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 continue;
1188
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001189 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001191 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001194 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001196 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1197 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1198 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
1200 ipr_err_separator;
1201
1202 if (i == 9)
1203 array_entry = error->array_member2;
1204 else
1205 array_entry++;
1206 }
1207}
1208
1209/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001210 * ipr_log_hex_data - Log additional hex IOA error data.
1211 * @data: IOA error data
1212 * @len: data length
1213 *
1214 * Return value:
1215 * none
1216 **/
1217static void ipr_log_hex_data(u32 *data, int len)
1218{
1219 int i;
1220
1221 if (len == 0)
1222 return;
1223
1224 for (i = 0; i < len / 4; i += 4) {
1225 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1226 be32_to_cpu(data[i]),
1227 be32_to_cpu(data[i+1]),
1228 be32_to_cpu(data[i+2]),
1229 be32_to_cpu(data[i+3]));
1230 }
1231}
1232
1233/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001234 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1235 * @ioa_cfg: ioa config struct
1236 * @hostrcb: hostrcb struct
1237 *
1238 * Return value:
1239 * none
1240 **/
1241static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1242 struct ipr_hostrcb *hostrcb)
1243{
1244 struct ipr_hostrcb_type_17_error *error;
1245
1246 error = &hostrcb->hcam.u.error.u.type_17_error;
1247 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1248
1249 ipr_err("%s\n", error->failure_reason);
1250 ipr_err("Remote Adapter VPD:\n");
1251 ipr_log_ext_vpd(&error->vpd);
1252 ipr_log_hex_data(error->data,
1253 be32_to_cpu(hostrcb->hcam.length) -
1254 (offsetof(struct ipr_hostrcb_error, u) +
1255 offsetof(struct ipr_hostrcb_type_17_error, data)));
1256}
1257
1258/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001259 * ipr_log_dual_ioa_error - Log a dual adapter error.
1260 * @ioa_cfg: ioa config struct
1261 * @hostrcb: hostrcb struct
1262 *
1263 * Return value:
1264 * none
1265 **/
1266static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1267 struct ipr_hostrcb *hostrcb)
1268{
1269 struct ipr_hostrcb_type_07_error *error;
1270
1271 error = &hostrcb->hcam.u.error.u.type_07_error;
1272 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1273
1274 ipr_err("%s\n", error->failure_reason);
1275 ipr_err("Remote Adapter VPD:\n");
1276 ipr_log_vpd(&error->vpd);
1277 ipr_log_hex_data(error->data,
1278 be32_to_cpu(hostrcb->hcam.length) -
1279 (offsetof(struct ipr_hostrcb_error, u) +
1280 offsetof(struct ipr_hostrcb_type_07_error, data)));
1281}
1282
1283/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 * ipr_log_generic_error - Log an adapter error.
1285 * @ioa_cfg: ioa config struct
1286 * @hostrcb: hostrcb struct
1287 *
1288 * Return value:
1289 * none
1290 **/
1291static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1292 struct ipr_hostrcb *hostrcb)
1293{
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001294 ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1295 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296}
1297
1298/**
1299 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1300 * @ioasc: IOASC
1301 *
1302 * This function will return the index of into the ipr_error_table
1303 * for the specified IOASC. If the IOASC is not in the table,
1304 * 0 will be returned, which points to the entry used for unknown errors.
1305 *
1306 * Return value:
1307 * index into the ipr_error_table
1308 **/
1309static u32 ipr_get_error(u32 ioasc)
1310{
1311 int i;
1312
1313 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1314 if (ipr_error_table[i].ioasc == ioasc)
1315 return i;
1316
1317 return 0;
1318}
1319
1320/**
1321 * ipr_handle_log_data - Log an adapter error.
1322 * @ioa_cfg: ioa config struct
1323 * @hostrcb: hostrcb struct
1324 *
1325 * This function logs an adapter error to the system.
1326 *
1327 * Return value:
1328 * none
1329 **/
1330static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1331 struct ipr_hostrcb *hostrcb)
1332{
1333 u32 ioasc;
1334 int error_index;
1335
1336 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1337 return;
1338
1339 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1340 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1341
1342 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1343
1344 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1345 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1346 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1347 scsi_report_bus_reset(ioa_cfg->host,
1348 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1349 }
1350
1351 error_index = ipr_get_error(ioasc);
1352
1353 if (!ipr_error_table[error_index].log_hcam)
1354 return;
1355
1356 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1357 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1358 "%s\n", ipr_error_table[error_index].error);
1359 } else {
1360 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1361 ipr_error_table[error_index].error);
1362 }
1363
1364 /* Set indication we have logged an error */
1365 ioa_cfg->errors_logged++;
1366
1367 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1368 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06001369 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1370 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
1372 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 case IPR_HOST_RCB_OVERLAY_ID_2:
1374 ipr_log_cache_error(ioa_cfg, hostrcb);
1375 break;
1376 case IPR_HOST_RCB_OVERLAY_ID_3:
1377 ipr_log_config_error(ioa_cfg, hostrcb);
1378 break;
1379 case IPR_HOST_RCB_OVERLAY_ID_4:
1380 case IPR_HOST_RCB_OVERLAY_ID_6:
1381 ipr_log_array_error(ioa_cfg, hostrcb);
1382 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001383 case IPR_HOST_RCB_OVERLAY_ID_7:
1384 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1385 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001386 case IPR_HOST_RCB_OVERLAY_ID_12:
1387 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1388 break;
1389 case IPR_HOST_RCB_OVERLAY_ID_13:
1390 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1391 break;
1392 case IPR_HOST_RCB_OVERLAY_ID_14:
1393 case IPR_HOST_RCB_OVERLAY_ID_16:
1394 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1395 break;
1396 case IPR_HOST_RCB_OVERLAY_ID_17:
1397 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1398 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06001399 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06001402 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 break;
1404 }
1405}
1406
1407/**
1408 * ipr_process_error - Op done function for an adapter error log.
1409 * @ipr_cmd: ipr command struct
1410 *
1411 * This function is the op done function for an error log host
1412 * controlled async from the adapter. It will log the error and
1413 * send the HCAM back to the adapter.
1414 *
1415 * Return value:
1416 * none
1417 **/
1418static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1419{
1420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1421 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1422 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1423
1424 list_del(&hostrcb->queue);
1425 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1426
1427 if (!ioasc) {
1428 ipr_handle_log_data(ioa_cfg, hostrcb);
1429 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1430 dev_err(&ioa_cfg->pdev->dev,
1431 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1432 }
1433
1434 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1435}
1436
1437/**
1438 * ipr_timeout - An internally generated op has timed out.
1439 * @ipr_cmd: ipr command struct
1440 *
1441 * This function blocks host requests and initiates an
1442 * adapter reset.
1443 *
1444 * Return value:
1445 * none
1446 **/
1447static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1448{
1449 unsigned long lock_flags = 0;
1450 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1451
1452 ENTER;
1453 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1454
1455 ioa_cfg->errors_logged++;
1456 dev_err(&ioa_cfg->pdev->dev,
1457 "Adapter being reset due to command timeout.\n");
1458
1459 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1460 ioa_cfg->sdt_state = GET_DUMP;
1461
1462 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1463 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1464
1465 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1466 LEAVE;
1467}
1468
1469/**
1470 * ipr_oper_timeout - Adapter timed out transitioning to operational
1471 * @ipr_cmd: ipr command struct
1472 *
1473 * This function blocks host requests and initiates an
1474 * adapter reset.
1475 *
1476 * Return value:
1477 * none
1478 **/
1479static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1480{
1481 unsigned long lock_flags = 0;
1482 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1483
1484 ENTER;
1485 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1486
1487 ioa_cfg->errors_logged++;
1488 dev_err(&ioa_cfg->pdev->dev,
1489 "Adapter timed out transitioning to operational.\n");
1490
1491 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1492 ioa_cfg->sdt_state = GET_DUMP;
1493
1494 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1495 if (ipr_fastfail)
1496 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1497 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1498 }
1499
1500 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1501 LEAVE;
1502}
1503
1504/**
1505 * ipr_reset_reload - Reset/Reload the IOA
1506 * @ioa_cfg: ioa config struct
1507 * @shutdown_type: shutdown type
1508 *
1509 * This function resets the adapter and re-initializes it.
1510 * This function assumes that all new host commands have been stopped.
1511 * Return value:
1512 * SUCCESS / FAILED
1513 **/
1514static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1515 enum ipr_shutdown_type shutdown_type)
1516{
1517 if (!ioa_cfg->in_reset_reload)
1518 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1519
1520 spin_unlock_irq(ioa_cfg->host->host_lock);
1521 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1522 spin_lock_irq(ioa_cfg->host->host_lock);
1523
1524 /* If we got hit with a host reset while we were already resetting
1525 the adapter for some reason, and the reset failed. */
1526 if (ioa_cfg->ioa_is_dead) {
1527 ipr_trace;
1528 return FAILED;
1529 }
1530
1531 return SUCCESS;
1532}
1533
1534/**
1535 * ipr_find_ses_entry - Find matching SES in SES table
1536 * @res: resource entry struct of SES
1537 *
1538 * Return value:
1539 * pointer to SES table entry / NULL on failure
1540 **/
1541static const struct ipr_ses_table_entry *
1542ipr_find_ses_entry(struct ipr_resource_entry *res)
1543{
1544 int i, j, matches;
1545 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1546
1547 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1548 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1549 if (ste->compare_product_id_byte[j] == 'X') {
1550 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1551 matches++;
1552 else
1553 break;
1554 } else
1555 matches++;
1556 }
1557
1558 if (matches == IPR_PROD_ID_LEN)
1559 return ste;
1560 }
1561
1562 return NULL;
1563}
1564
1565/**
1566 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1567 * @ioa_cfg: ioa config struct
1568 * @bus: SCSI bus
1569 * @bus_width: bus width
1570 *
1571 * Return value:
1572 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1573 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1574 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1575 * max 160MHz = max 320MB/sec).
1576 **/
1577static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1578{
1579 struct ipr_resource_entry *res;
1580 const struct ipr_ses_table_entry *ste;
1581 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1582
1583 /* Loop through each config table entry in the config table buffer */
1584 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1585 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1586 continue;
1587
1588 if (bus != res->cfgte.res_addr.bus)
1589 continue;
1590
1591 if (!(ste = ipr_find_ses_entry(res)))
1592 continue;
1593
1594 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1595 }
1596
1597 return max_xfer_rate;
1598}
1599
1600/**
1601 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1602 * @ioa_cfg: ioa config struct
1603 * @max_delay: max delay in micro-seconds to wait
1604 *
1605 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1606 *
1607 * Return value:
1608 * 0 on success / other on failure
1609 **/
1610static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1611{
1612 volatile u32 pcii_reg;
1613 int delay = 1;
1614
1615 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1616 while (delay < max_delay) {
1617 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1618
1619 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1620 return 0;
1621
1622 /* udelay cannot be used if delay is more than a few milliseconds */
1623 if ((delay / 1000) > MAX_UDELAY_MS)
1624 mdelay(delay / 1000);
1625 else
1626 udelay(delay);
1627
1628 delay += delay;
1629 }
1630 return -EIO;
1631}
1632
1633/**
1634 * ipr_get_ldump_data_section - Dump IOA memory
1635 * @ioa_cfg: ioa config struct
1636 * @start_addr: adapter address to dump
1637 * @dest: destination kernel buffer
1638 * @length_in_words: length to dump in 4 byte words
1639 *
1640 * Return value:
1641 * 0 on success / -EIO on failure
1642 **/
1643static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1644 u32 start_addr,
1645 __be32 *dest, u32 length_in_words)
1646{
1647 volatile u32 temp_pcii_reg;
1648 int i, delay = 0;
1649
1650 /* Write IOA interrupt reg starting LDUMP state */
1651 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1652 ioa_cfg->regs.set_uproc_interrupt_reg);
1653
1654 /* Wait for IO debug acknowledge */
1655 if (ipr_wait_iodbg_ack(ioa_cfg,
1656 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1657 dev_err(&ioa_cfg->pdev->dev,
1658 "IOA dump long data transfer timeout\n");
1659 return -EIO;
1660 }
1661
1662 /* Signal LDUMP interlocked - clear IO debug ack */
1663 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1664 ioa_cfg->regs.clr_interrupt_reg);
1665
1666 /* Write Mailbox with starting address */
1667 writel(start_addr, ioa_cfg->ioa_mailbox);
1668
1669 /* Signal address valid - clear IOA Reset alert */
1670 writel(IPR_UPROCI_RESET_ALERT,
1671 ioa_cfg->regs.clr_uproc_interrupt_reg);
1672
1673 for (i = 0; i < length_in_words; i++) {
1674 /* Wait for IO debug acknowledge */
1675 if (ipr_wait_iodbg_ack(ioa_cfg,
1676 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1677 dev_err(&ioa_cfg->pdev->dev,
1678 "IOA dump short data transfer timeout\n");
1679 return -EIO;
1680 }
1681
1682 /* Read data from mailbox and increment destination pointer */
1683 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1684 dest++;
1685
1686 /* For all but the last word of data, signal data received */
1687 if (i < (length_in_words - 1)) {
1688 /* Signal dump data received - Clear IO debug Ack */
1689 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1690 ioa_cfg->regs.clr_interrupt_reg);
1691 }
1692 }
1693
1694 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1695 writel(IPR_UPROCI_RESET_ALERT,
1696 ioa_cfg->regs.set_uproc_interrupt_reg);
1697
1698 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1699 ioa_cfg->regs.clr_uproc_interrupt_reg);
1700
1701 /* Signal dump data received - Clear IO debug Ack */
1702 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1703 ioa_cfg->regs.clr_interrupt_reg);
1704
1705 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1706 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1707 temp_pcii_reg =
1708 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1709
1710 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1711 return 0;
1712
1713 udelay(10);
1714 delay += 10;
1715 }
1716
1717 return 0;
1718}
1719
1720#ifdef CONFIG_SCSI_IPR_DUMP
1721/**
1722 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1723 * @ioa_cfg: ioa config struct
1724 * @pci_address: adapter address
1725 * @length: length of data to copy
1726 *
1727 * Copy data from PCI adapter to kernel buffer.
1728 * Note: length MUST be a 4 byte multiple
1729 * Return value:
1730 * 0 on success / other on failure
1731 **/
1732static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1733 unsigned long pci_address, u32 length)
1734{
1735 int bytes_copied = 0;
1736 int cur_len, rc, rem_len, rem_page_len;
1737 __be32 *page;
1738 unsigned long lock_flags = 0;
1739 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1740
1741 while (bytes_copied < length &&
1742 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1743 if (ioa_dump->page_offset >= PAGE_SIZE ||
1744 ioa_dump->page_offset == 0) {
1745 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1746
1747 if (!page) {
1748 ipr_trace;
1749 return bytes_copied;
1750 }
1751
1752 ioa_dump->page_offset = 0;
1753 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1754 ioa_dump->next_page_index++;
1755 } else
1756 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1757
1758 rem_len = length - bytes_copied;
1759 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1760 cur_len = min(rem_len, rem_page_len);
1761
1762 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1763 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1764 rc = -EIO;
1765 } else {
1766 rc = ipr_get_ldump_data_section(ioa_cfg,
1767 pci_address + bytes_copied,
1768 &page[ioa_dump->page_offset / 4],
1769 (cur_len / sizeof(u32)));
1770 }
1771 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1772
1773 if (!rc) {
1774 ioa_dump->page_offset += cur_len;
1775 bytes_copied += cur_len;
1776 } else {
1777 ipr_trace;
1778 break;
1779 }
1780 schedule();
1781 }
1782
1783 return bytes_copied;
1784}
1785
1786/**
1787 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1788 * @hdr: dump entry header struct
1789 *
1790 * Return value:
1791 * nothing
1792 **/
1793static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1794{
1795 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1796 hdr->num_elems = 1;
1797 hdr->offset = sizeof(*hdr);
1798 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1799}
1800
1801/**
1802 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1803 * @ioa_cfg: ioa config struct
1804 * @driver_dump: driver dump struct
1805 *
1806 * Return value:
1807 * nothing
1808 **/
1809static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1810 struct ipr_driver_dump *driver_dump)
1811{
1812 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1813
1814 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1815 driver_dump->ioa_type_entry.hdr.len =
1816 sizeof(struct ipr_dump_ioa_type_entry) -
1817 sizeof(struct ipr_dump_entry_header);
1818 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1819 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1820 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1821 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1822 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1823 ucode_vpd->minor_release[1];
1824 driver_dump->hdr.num_entries++;
1825}
1826
1827/**
1828 * ipr_dump_version_data - Fill in the driver version in the dump.
1829 * @ioa_cfg: ioa config struct
1830 * @driver_dump: driver dump struct
1831 *
1832 * Return value:
1833 * nothing
1834 **/
1835static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1836 struct ipr_driver_dump *driver_dump)
1837{
1838 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1839 driver_dump->version_entry.hdr.len =
1840 sizeof(struct ipr_dump_version_entry) -
1841 sizeof(struct ipr_dump_entry_header);
1842 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1843 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1844 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1845 driver_dump->hdr.num_entries++;
1846}
1847
1848/**
1849 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1850 * @ioa_cfg: ioa config struct
1851 * @driver_dump: driver dump struct
1852 *
1853 * Return value:
1854 * nothing
1855 **/
1856static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1857 struct ipr_driver_dump *driver_dump)
1858{
1859 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1860 driver_dump->trace_entry.hdr.len =
1861 sizeof(struct ipr_dump_trace_entry) -
1862 sizeof(struct ipr_dump_entry_header);
1863 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1864 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1865 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1866 driver_dump->hdr.num_entries++;
1867}
1868
1869/**
1870 * ipr_dump_location_data - Fill in the IOA location in the dump.
1871 * @ioa_cfg: ioa config struct
1872 * @driver_dump: driver dump struct
1873 *
1874 * Return value:
1875 * nothing
1876 **/
1877static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1878 struct ipr_driver_dump *driver_dump)
1879{
1880 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1881 driver_dump->location_entry.hdr.len =
1882 sizeof(struct ipr_dump_location_entry) -
1883 sizeof(struct ipr_dump_entry_header);
1884 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1885 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1886 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1887 driver_dump->hdr.num_entries++;
1888}
1889
1890/**
1891 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1892 * @ioa_cfg: ioa config struct
1893 * @dump: dump struct
1894 *
1895 * Return value:
1896 * nothing
1897 **/
1898static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1899{
1900 unsigned long start_addr, sdt_word;
1901 unsigned long lock_flags = 0;
1902 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1903 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1904 u32 num_entries, start_off, end_off;
1905 u32 bytes_to_copy, bytes_copied, rc;
1906 struct ipr_sdt *sdt;
1907 int i;
1908
1909 ENTER;
1910
1911 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1912
1913 if (ioa_cfg->sdt_state != GET_DUMP) {
1914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1915 return;
1916 }
1917
1918 start_addr = readl(ioa_cfg->ioa_mailbox);
1919
1920 if (!ipr_sdt_is_fmt2(start_addr)) {
1921 dev_err(&ioa_cfg->pdev->dev,
1922 "Invalid dump table format: %lx\n", start_addr);
1923 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1924 return;
1925 }
1926
1927 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1928
1929 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1930
1931 /* Initialize the overall dump header */
1932 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1933 driver_dump->hdr.num_entries = 1;
1934 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1935 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1936 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1937 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1938
1939 ipr_dump_version_data(ioa_cfg, driver_dump);
1940 ipr_dump_location_data(ioa_cfg, driver_dump);
1941 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1942 ipr_dump_trace_data(ioa_cfg, driver_dump);
1943
1944 /* Update dump_header */
1945 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1946
1947 /* IOA Dump entry */
1948 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1949 ioa_dump->format = IPR_SDT_FMT2;
1950 ioa_dump->hdr.len = 0;
1951 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1952 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1953
1954 /* First entries in sdt are actually a list of dump addresses and
1955 lengths to gather the real dump data. sdt represents the pointer
1956 to the ioa generated dump table. Dump data will be extracted based
1957 on entries in this table */
1958 sdt = &ioa_dump->sdt;
1959
1960 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1961 sizeof(struct ipr_sdt) / sizeof(__be32));
1962
1963 /* Smart Dump table is ready to use and the first entry is valid */
1964 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1965 dev_err(&ioa_cfg->pdev->dev,
1966 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1967 rc, be32_to_cpu(sdt->hdr.state));
1968 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1969 ioa_cfg->sdt_state = DUMP_OBTAINED;
1970 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1971 return;
1972 }
1973
1974 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1975
1976 if (num_entries > IPR_NUM_SDT_ENTRIES)
1977 num_entries = IPR_NUM_SDT_ENTRIES;
1978
1979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1980
1981 for (i = 0; i < num_entries; i++) {
1982 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1983 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1984 break;
1985 }
1986
1987 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1988 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1989 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1990 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1991
1992 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1993 bytes_to_copy = end_off - start_off;
1994 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1995 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1996 continue;
1997 }
1998
1999 /* Copy data from adapter to driver buffers */
2000 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2001 bytes_to_copy);
2002
2003 ioa_dump->hdr.len += bytes_copied;
2004
2005 if (bytes_copied != bytes_to_copy) {
2006 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2007 break;
2008 }
2009 }
2010 }
2011 }
2012
2013 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2014
2015 /* Update dump_header */
2016 driver_dump->hdr.len += ioa_dump->hdr.len;
2017 wmb();
2018 ioa_cfg->sdt_state = DUMP_OBTAINED;
2019 LEAVE;
2020}
2021
2022#else
2023#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2024#endif
2025
2026/**
2027 * ipr_release_dump - Free adapter dump memory
2028 * @kref: kref struct
2029 *
2030 * Return value:
2031 * nothing
2032 **/
2033static void ipr_release_dump(struct kref *kref)
2034{
2035 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2036 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2037 unsigned long lock_flags = 0;
2038 int i;
2039
2040 ENTER;
2041 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2042 ioa_cfg->dump = NULL;
2043 ioa_cfg->sdt_state = INACTIVE;
2044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2045
2046 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2047 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2048
2049 kfree(dump);
2050 LEAVE;
2051}
2052
2053/**
2054 * ipr_worker_thread - Worker thread
2055 * @data: ioa config struct
2056 *
2057 * Called at task level from a work thread. This function takes care
2058 * of adding and removing device from the mid-layer as configuration
2059 * changes are detected by the adapter.
2060 *
2061 * Return value:
2062 * nothing
2063 **/
2064static void ipr_worker_thread(void *data)
2065{
2066 unsigned long lock_flags;
2067 struct ipr_resource_entry *res;
2068 struct scsi_device *sdev;
2069 struct ipr_dump *dump;
2070 struct ipr_ioa_cfg *ioa_cfg = data;
2071 u8 bus, target, lun;
2072 int did_work;
2073
2074 ENTER;
2075 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2076
2077 if (ioa_cfg->sdt_state == GET_DUMP) {
2078 dump = ioa_cfg->dump;
2079 if (!dump) {
2080 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2081 return;
2082 }
2083 kref_get(&dump->kref);
2084 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2085 ipr_get_ioa_dump(ioa_cfg, dump);
2086 kref_put(&dump->kref, ipr_release_dump);
2087
2088 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2089 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2090 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2091 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2092 return;
2093 }
2094
2095restart:
2096 do {
2097 did_work = 0;
2098 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2099 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2100 return;
2101 }
2102
2103 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2104 if (res->del_from_ml && res->sdev) {
2105 did_work = 1;
2106 sdev = res->sdev;
2107 if (!scsi_device_get(sdev)) {
2108 res->sdev = NULL;
2109 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2110 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2111 scsi_remove_device(sdev);
2112 scsi_device_put(sdev);
2113 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2114 }
2115 break;
2116 }
2117 }
2118 } while(did_work);
2119
2120 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2121 if (res->add_to_ml) {
2122 bus = res->cfgte.res_addr.bus;
2123 target = res->cfgte.res_addr.target;
2124 lun = res->cfgte.res_addr.lun;
2125 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2126 scsi_add_device(ioa_cfg->host, bus, target, lun);
2127 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2128 goto restart;
2129 }
2130 }
2131
2132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2133 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
2134 LEAVE;
2135}
2136
2137#ifdef CONFIG_SCSI_IPR_TRACE
2138/**
2139 * ipr_read_trace - Dump the adapter trace
2140 * @kobj: kobject struct
2141 * @buf: buffer
2142 * @off: offset
2143 * @count: buffer size
2144 *
2145 * Return value:
2146 * number of bytes printed to buffer
2147 **/
2148static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2149 loff_t off, size_t count)
2150{
2151 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2152 struct Scsi_Host *shost = class_to_shost(cdev);
2153 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2154 unsigned long lock_flags = 0;
2155 int size = IPR_TRACE_SIZE;
2156 char *src = (char *)ioa_cfg->trace;
2157
2158 if (off > size)
2159 return 0;
2160 if (off + count > size) {
2161 size -= off;
2162 count = size;
2163 }
2164
2165 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2166 memcpy(buf, &src[off], count);
2167 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2168 return count;
2169}
2170
2171static struct bin_attribute ipr_trace_attr = {
2172 .attr = {
2173 .name = "trace",
2174 .mode = S_IRUGO,
2175 },
2176 .size = 0,
2177 .read = ipr_read_trace,
2178};
2179#endif
2180
brking@us.ibm.com62275042005-11-01 17:01:14 -06002181static const struct {
2182 enum ipr_cache_state state;
2183 char *name;
2184} cache_state [] = {
2185 { CACHE_NONE, "none" },
2186 { CACHE_DISABLED, "disabled" },
2187 { CACHE_ENABLED, "enabled" }
2188};
2189
2190/**
2191 * ipr_show_write_caching - Show the write caching attribute
2192 * @class_dev: class device struct
2193 * @buf: buffer
2194 *
2195 * Return value:
2196 * number of bytes printed to buffer
2197 **/
2198static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2199{
2200 struct Scsi_Host *shost = class_to_shost(class_dev);
2201 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2202 unsigned long lock_flags = 0;
2203 int i, len = 0;
2204
2205 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2206 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2207 if (cache_state[i].state == ioa_cfg->cache_state) {
2208 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2209 break;
2210 }
2211 }
2212 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2213 return len;
2214}
2215
2216
2217/**
2218 * ipr_store_write_caching - Enable/disable adapter write cache
2219 * @class_dev: class_device struct
2220 * @buf: buffer
2221 * @count: buffer size
2222 *
2223 * This function will enable/disable adapter write cache.
2224 *
2225 * Return value:
2226 * count on success / other on failure
2227 **/
2228static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2229 const char *buf, size_t count)
2230{
2231 struct Scsi_Host *shost = class_to_shost(class_dev);
2232 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2233 unsigned long lock_flags = 0;
2234 enum ipr_cache_state new_state = CACHE_INVALID;
2235 int i;
2236
2237 if (!capable(CAP_SYS_ADMIN))
2238 return -EACCES;
2239 if (ioa_cfg->cache_state == CACHE_NONE)
2240 return -EINVAL;
2241
2242 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2243 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2244 new_state = cache_state[i].state;
2245 break;
2246 }
2247 }
2248
2249 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2250 return -EINVAL;
2251
2252 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2253 if (ioa_cfg->cache_state == new_state) {
2254 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2255 return count;
2256 }
2257
2258 ioa_cfg->cache_state = new_state;
2259 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2260 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2261 if (!ioa_cfg->in_reset_reload)
2262 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2263 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2264 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2265
2266 return count;
2267}
2268
2269static struct class_device_attribute ipr_ioa_cache_attr = {
2270 .attr = {
2271 .name = "write_cache",
2272 .mode = S_IRUGO | S_IWUSR,
2273 },
2274 .show = ipr_show_write_caching,
2275 .store = ipr_store_write_caching
2276};
2277
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278/**
2279 * ipr_show_fw_version - Show the firmware version
2280 * @class_dev: class device struct
2281 * @buf: buffer
2282 *
2283 * Return value:
2284 * number of bytes printed to buffer
2285 **/
2286static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2287{
2288 struct Scsi_Host *shost = class_to_shost(class_dev);
2289 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2290 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2291 unsigned long lock_flags = 0;
2292 int len;
2293
2294 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2295 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2296 ucode_vpd->major_release, ucode_vpd->card_type,
2297 ucode_vpd->minor_release[0],
2298 ucode_vpd->minor_release[1]);
2299 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2300 return len;
2301}
2302
2303static struct class_device_attribute ipr_fw_version_attr = {
2304 .attr = {
2305 .name = "fw_version",
2306 .mode = S_IRUGO,
2307 },
2308 .show = ipr_show_fw_version,
2309};
2310
2311/**
2312 * ipr_show_log_level - Show the adapter's error logging level
2313 * @class_dev: class device struct
2314 * @buf: buffer
2315 *
2316 * Return value:
2317 * number of bytes printed to buffer
2318 **/
2319static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2320{
2321 struct Scsi_Host *shost = class_to_shost(class_dev);
2322 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2323 unsigned long lock_flags = 0;
2324 int len;
2325
2326 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2327 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2329 return len;
2330}
2331
2332/**
2333 * ipr_store_log_level - Change the adapter's error logging level
2334 * @class_dev: class device struct
2335 * @buf: buffer
2336 *
2337 * Return value:
2338 * number of bytes printed to buffer
2339 **/
2340static ssize_t ipr_store_log_level(struct class_device *class_dev,
2341 const char *buf, size_t count)
2342{
2343 struct Scsi_Host *shost = class_to_shost(class_dev);
2344 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2345 unsigned long lock_flags = 0;
2346
2347 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2348 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2350 return strlen(buf);
2351}
2352
2353static struct class_device_attribute ipr_log_level_attr = {
2354 .attr = {
2355 .name = "log_level",
2356 .mode = S_IRUGO | S_IWUSR,
2357 },
2358 .show = ipr_show_log_level,
2359 .store = ipr_store_log_level
2360};
2361
2362/**
2363 * ipr_store_diagnostics - IOA Diagnostics interface
2364 * @class_dev: class_device struct
2365 * @buf: buffer
2366 * @count: buffer size
2367 *
2368 * This function will reset the adapter and wait a reasonable
2369 * amount of time for any errors that the adapter might log.
2370 *
2371 * Return value:
2372 * count on success / other on failure
2373 **/
2374static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2375 const char *buf, size_t count)
2376{
2377 struct Scsi_Host *shost = class_to_shost(class_dev);
2378 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2379 unsigned long lock_flags = 0;
2380 int rc = count;
2381
2382 if (!capable(CAP_SYS_ADMIN))
2383 return -EACCES;
2384
2385 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2387 ioa_cfg->errors_logged = 0;
2388 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2389
2390 if (ioa_cfg->in_reset_reload) {
2391 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2392 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2393
2394 /* Wait for a second for any errors to be logged */
2395 msleep(1000);
2396 } else {
2397 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2398 return -EIO;
2399 }
2400
2401 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2402 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2403 rc = -EIO;
2404 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2405
2406 return rc;
2407}
2408
2409static struct class_device_attribute ipr_diagnostics_attr = {
2410 .attr = {
2411 .name = "run_diagnostics",
2412 .mode = S_IWUSR,
2413 },
2414 .store = ipr_store_diagnostics
2415};
2416
2417/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06002418 * ipr_show_adapter_state - Show the adapter's state
2419 * @class_dev: class device struct
2420 * @buf: buffer
2421 *
2422 * Return value:
2423 * number of bytes printed to buffer
2424 **/
2425static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2426{
2427 struct Scsi_Host *shost = class_to_shost(class_dev);
2428 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2429 unsigned long lock_flags = 0;
2430 int len;
2431
2432 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2433 if (ioa_cfg->ioa_is_dead)
2434 len = snprintf(buf, PAGE_SIZE, "offline\n");
2435 else
2436 len = snprintf(buf, PAGE_SIZE, "online\n");
2437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2438 return len;
2439}
2440
2441/**
2442 * ipr_store_adapter_state - Change adapter state
2443 * @class_dev: class_device struct
2444 * @buf: buffer
2445 * @count: buffer size
2446 *
2447 * This function will change the adapter's state.
2448 *
2449 * Return value:
2450 * count on success / other on failure
2451 **/
2452static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2453 const char *buf, size_t count)
2454{
2455 struct Scsi_Host *shost = class_to_shost(class_dev);
2456 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2457 unsigned long lock_flags;
2458 int result = count;
2459
2460 if (!capable(CAP_SYS_ADMIN))
2461 return -EACCES;
2462
2463 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2464 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2465 ioa_cfg->ioa_is_dead = 0;
2466 ioa_cfg->reset_retries = 0;
2467 ioa_cfg->in_ioa_bringdown = 0;
2468 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2469 }
2470 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2471 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2472
2473 return result;
2474}
2475
2476static struct class_device_attribute ipr_ioa_state_attr = {
2477 .attr = {
2478 .name = "state",
2479 .mode = S_IRUGO | S_IWUSR,
2480 },
2481 .show = ipr_show_adapter_state,
2482 .store = ipr_store_adapter_state
2483};
2484
2485/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 * ipr_store_reset_adapter - Reset the adapter
2487 * @class_dev: class_device struct
2488 * @buf: buffer
2489 * @count: buffer size
2490 *
2491 * This function will reset the adapter.
2492 *
2493 * Return value:
2494 * count on success / other on failure
2495 **/
2496static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2497 const char *buf, size_t count)
2498{
2499 struct Scsi_Host *shost = class_to_shost(class_dev);
2500 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2501 unsigned long lock_flags;
2502 int result = count;
2503
2504 if (!capable(CAP_SYS_ADMIN))
2505 return -EACCES;
2506
2507 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2508 if (!ioa_cfg->in_reset_reload)
2509 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2510 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2511 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2512
2513 return result;
2514}
2515
2516static struct class_device_attribute ipr_ioa_reset_attr = {
2517 .attr = {
2518 .name = "reset_host",
2519 .mode = S_IWUSR,
2520 },
2521 .store = ipr_store_reset_adapter
2522};
2523
2524/**
2525 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2526 * @buf_len: buffer length
2527 *
2528 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2529 * list to use for microcode download
2530 *
2531 * Return value:
2532 * pointer to sglist / NULL on failure
2533 **/
2534static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2535{
2536 int sg_size, order, bsize_elem, num_elem, i, j;
2537 struct ipr_sglist *sglist;
2538 struct scatterlist *scatterlist;
2539 struct page *page;
2540
2541 /* Get the minimum size per scatter/gather element */
2542 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2543
2544 /* Get the actual size per element */
2545 order = get_order(sg_size);
2546
2547 /* Determine the actual number of bytes per element */
2548 bsize_elem = PAGE_SIZE * (1 << order);
2549
2550 /* Determine the actual number of sg entries needed */
2551 if (buf_len % bsize_elem)
2552 num_elem = (buf_len / bsize_elem) + 1;
2553 else
2554 num_elem = buf_len / bsize_elem;
2555
2556 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06002557 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 (sizeof(struct scatterlist) * (num_elem - 1)),
2559 GFP_KERNEL);
2560
2561 if (sglist == NULL) {
2562 ipr_trace;
2563 return NULL;
2564 }
2565
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 scatterlist = sglist->scatterlist;
2567
2568 sglist->order = order;
2569 sglist->num_sg = num_elem;
2570
2571 /* Allocate a bunch of sg elements */
2572 for (i = 0; i < num_elem; i++) {
2573 page = alloc_pages(GFP_KERNEL, order);
2574 if (!page) {
2575 ipr_trace;
2576
2577 /* Free up what we already allocated */
2578 for (j = i - 1; j >= 0; j--)
2579 __free_pages(scatterlist[j].page, order);
2580 kfree(sglist);
2581 return NULL;
2582 }
2583
2584 scatterlist[i].page = page;
2585 }
2586
2587 return sglist;
2588}
2589
2590/**
2591 * ipr_free_ucode_buffer - Frees a microcode download buffer
2592 * @p_dnld: scatter/gather list pointer
2593 *
2594 * Free a DMA'able ucode download buffer previously allocated with
2595 * ipr_alloc_ucode_buffer
2596 *
2597 * Return value:
2598 * nothing
2599 **/
2600static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2601{
2602 int i;
2603
2604 for (i = 0; i < sglist->num_sg; i++)
2605 __free_pages(sglist->scatterlist[i].page, sglist->order);
2606
2607 kfree(sglist);
2608}
2609
2610/**
2611 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2612 * @sglist: scatter/gather list pointer
2613 * @buffer: buffer pointer
2614 * @len: buffer length
2615 *
2616 * Copy a microcode image from a user buffer into a buffer allocated by
2617 * ipr_alloc_ucode_buffer
2618 *
2619 * Return value:
2620 * 0 on success / other on failure
2621 **/
2622static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2623 u8 *buffer, u32 len)
2624{
2625 int bsize_elem, i, result = 0;
2626 struct scatterlist *scatterlist;
2627 void *kaddr;
2628
2629 /* Determine the actual number of bytes per element */
2630 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2631
2632 scatterlist = sglist->scatterlist;
2633
2634 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2635 kaddr = kmap(scatterlist[i].page);
2636 memcpy(kaddr, buffer, bsize_elem);
2637 kunmap(scatterlist[i].page);
2638
2639 scatterlist[i].length = bsize_elem;
2640
2641 if (result != 0) {
2642 ipr_trace;
2643 return result;
2644 }
2645 }
2646
2647 if (len % bsize_elem) {
2648 kaddr = kmap(scatterlist[i].page);
2649 memcpy(kaddr, buffer, len % bsize_elem);
2650 kunmap(scatterlist[i].page);
2651
2652 scatterlist[i].length = len % bsize_elem;
2653 }
2654
2655 sglist->buffer_len = len;
2656 return result;
2657}
2658
2659/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002660 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661 * @ipr_cmd: ipr command struct
2662 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002664 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002667static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2668 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2671 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2672 struct scatterlist *scatterlist = sglist->scatterlist;
2673 int i;
2674
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002675 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002677 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 ioarcb->write_ioadl_len =
2679 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2680
2681 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2682 ioadl[i].flags_and_data_len =
2683 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2684 ioadl[i].address =
2685 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2686 }
2687
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002688 ioadl[i-1].flags_and_data_len |=
2689 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2690}
2691
2692/**
2693 * ipr_update_ioa_ucode - Update IOA's microcode
2694 * @ioa_cfg: ioa config struct
2695 * @sglist: scatter/gather list
2696 *
2697 * Initiate an adapter reset to update the IOA's microcode
2698 *
2699 * Return value:
2700 * 0 on success / -EIO on failure
2701 **/
2702static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2703 struct ipr_sglist *sglist)
2704{
2705 unsigned long lock_flags;
2706
2707 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2708
2709 if (ioa_cfg->ucode_sglist) {
2710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2711 dev_err(&ioa_cfg->pdev->dev,
2712 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 return -EIO;
2714 }
2715
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002716 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2717 sglist->num_sg, DMA_TO_DEVICE);
2718
2719 if (!sglist->num_dma_sg) {
2720 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2721 dev_err(&ioa_cfg->pdev->dev,
2722 "Failed to map microcode download buffer!\n");
2723 return -EIO;
2724 }
2725
2726 ioa_cfg->ucode_sglist = sglist;
2727 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2728 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2729 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2730
2731 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2732 ioa_cfg->ucode_sglist = NULL;
2733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 return 0;
2735}
2736
2737/**
2738 * ipr_store_update_fw - Update the firmware on the adapter
2739 * @class_dev: class_device struct
2740 * @buf: buffer
2741 * @count: buffer size
2742 *
2743 * This function will update the firmware on the adapter.
2744 *
2745 * Return value:
2746 * count on success / other on failure
2747 **/
2748static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2749 const char *buf, size_t count)
2750{
2751 struct Scsi_Host *shost = class_to_shost(class_dev);
2752 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2753 struct ipr_ucode_image_header *image_hdr;
2754 const struct firmware *fw_entry;
2755 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 char fname[100];
2757 char *src;
2758 int len, result, dnld_size;
2759
2760 if (!capable(CAP_SYS_ADMIN))
2761 return -EACCES;
2762
2763 len = snprintf(fname, 99, "%s", buf);
2764 fname[len-1] = '\0';
2765
2766 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2767 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2768 return -EIO;
2769 }
2770
2771 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2772
2773 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2774 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2775 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2776 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2777 release_firmware(fw_entry);
2778 return -EINVAL;
2779 }
2780
2781 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2782 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2783 sglist = ipr_alloc_ucode_buffer(dnld_size);
2784
2785 if (!sglist) {
2786 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2787 release_firmware(fw_entry);
2788 return -ENOMEM;
2789 }
2790
2791 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2792
2793 if (result) {
2794 dev_err(&ioa_cfg->pdev->dev,
2795 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002796 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 }
2798
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002799 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002801 if (!result)
2802 result = count;
2803out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 ipr_free_ucode_buffer(sglist);
2805 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002806 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807}
2808
2809static struct class_device_attribute ipr_update_fw_attr = {
2810 .attr = {
2811 .name = "update_fw",
2812 .mode = S_IWUSR,
2813 },
2814 .store = ipr_store_update_fw
2815};
2816
2817static struct class_device_attribute *ipr_ioa_attrs[] = {
2818 &ipr_fw_version_attr,
2819 &ipr_log_level_attr,
2820 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06002821 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 &ipr_ioa_reset_attr,
2823 &ipr_update_fw_attr,
brking@us.ibm.com62275042005-11-01 17:01:14 -06002824 &ipr_ioa_cache_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 NULL,
2826};
2827
2828#ifdef CONFIG_SCSI_IPR_DUMP
2829/**
2830 * ipr_read_dump - Dump the adapter
2831 * @kobj: kobject struct
2832 * @buf: buffer
2833 * @off: offset
2834 * @count: buffer size
2835 *
2836 * Return value:
2837 * number of bytes printed to buffer
2838 **/
2839static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2840 loff_t off, size_t count)
2841{
2842 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2843 struct Scsi_Host *shost = class_to_shost(cdev);
2844 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2845 struct ipr_dump *dump;
2846 unsigned long lock_flags = 0;
2847 char *src;
2848 int len;
2849 size_t rc = count;
2850
2851 if (!capable(CAP_SYS_ADMIN))
2852 return -EACCES;
2853
2854 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2855 dump = ioa_cfg->dump;
2856
2857 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2858 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2859 return 0;
2860 }
2861 kref_get(&dump->kref);
2862 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2863
2864 if (off > dump->driver_dump.hdr.len) {
2865 kref_put(&dump->kref, ipr_release_dump);
2866 return 0;
2867 }
2868
2869 if (off + count > dump->driver_dump.hdr.len) {
2870 count = dump->driver_dump.hdr.len - off;
2871 rc = count;
2872 }
2873
2874 if (count && off < sizeof(dump->driver_dump)) {
2875 if (off + count > sizeof(dump->driver_dump))
2876 len = sizeof(dump->driver_dump) - off;
2877 else
2878 len = count;
2879 src = (u8 *)&dump->driver_dump + off;
2880 memcpy(buf, src, len);
2881 buf += len;
2882 off += len;
2883 count -= len;
2884 }
2885
2886 off -= sizeof(dump->driver_dump);
2887
2888 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2889 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2890 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2891 else
2892 len = count;
2893 src = (u8 *)&dump->ioa_dump + off;
2894 memcpy(buf, src, len);
2895 buf += len;
2896 off += len;
2897 count -= len;
2898 }
2899
2900 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2901
2902 while (count) {
2903 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2904 len = PAGE_ALIGN(off) - off;
2905 else
2906 len = count;
2907 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2908 src += off & ~PAGE_MASK;
2909 memcpy(buf, src, len);
2910 buf += len;
2911 off += len;
2912 count -= len;
2913 }
2914
2915 kref_put(&dump->kref, ipr_release_dump);
2916 return rc;
2917}
2918
2919/**
2920 * ipr_alloc_dump - Prepare for adapter dump
2921 * @ioa_cfg: ioa config struct
2922 *
2923 * Return value:
2924 * 0 on success / other on failure
2925 **/
2926static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2927{
2928 struct ipr_dump *dump;
2929 unsigned long lock_flags = 0;
2930
2931 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06002932 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933
2934 if (!dump) {
2935 ipr_err("Dump memory allocation failed\n");
2936 return -ENOMEM;
2937 }
2938
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939 kref_init(&dump->kref);
2940 dump->ioa_cfg = ioa_cfg;
2941
2942 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2943
2944 if (INACTIVE != ioa_cfg->sdt_state) {
2945 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2946 kfree(dump);
2947 return 0;
2948 }
2949
2950 ioa_cfg->dump = dump;
2951 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2952 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2953 ioa_cfg->dump_taken = 1;
2954 schedule_work(&ioa_cfg->work_q);
2955 }
2956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2957
2958 LEAVE;
2959 return 0;
2960}
2961
2962/**
2963 * ipr_free_dump - Free adapter dump memory
2964 * @ioa_cfg: ioa config struct
2965 *
2966 * Return value:
2967 * 0 on success / other on failure
2968 **/
2969static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2970{
2971 struct ipr_dump *dump;
2972 unsigned long lock_flags = 0;
2973
2974 ENTER;
2975
2976 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2977 dump = ioa_cfg->dump;
2978 if (!dump) {
2979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2980 return 0;
2981 }
2982
2983 ioa_cfg->dump = NULL;
2984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2985
2986 kref_put(&dump->kref, ipr_release_dump);
2987
2988 LEAVE;
2989 return 0;
2990}
2991
2992/**
2993 * ipr_write_dump - Setup dump state of adapter
2994 * @kobj: kobject struct
2995 * @buf: buffer
2996 * @off: offset
2997 * @count: buffer size
2998 *
2999 * Return value:
3000 * number of bytes printed to buffer
3001 **/
3002static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3003 loff_t off, size_t count)
3004{
3005 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3006 struct Scsi_Host *shost = class_to_shost(cdev);
3007 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3008 int rc;
3009
3010 if (!capable(CAP_SYS_ADMIN))
3011 return -EACCES;
3012
3013 if (buf[0] == '1')
3014 rc = ipr_alloc_dump(ioa_cfg);
3015 else if (buf[0] == '0')
3016 rc = ipr_free_dump(ioa_cfg);
3017 else
3018 return -EINVAL;
3019
3020 if (rc)
3021 return rc;
3022 else
3023 return count;
3024}
3025
3026static struct bin_attribute ipr_dump_attr = {
3027 .attr = {
3028 .name = "dump",
3029 .mode = S_IRUSR | S_IWUSR,
3030 },
3031 .size = 0,
3032 .read = ipr_read_dump,
3033 .write = ipr_write_dump
3034};
3035#else
3036static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3037#endif
3038
3039/**
3040 * ipr_change_queue_depth - Change the device's queue depth
3041 * @sdev: scsi device struct
3042 * @qdepth: depth to set
3043 *
3044 * Return value:
3045 * actual depth set
3046 **/
3047static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3048{
3049 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3050 return sdev->queue_depth;
3051}
3052
3053/**
3054 * ipr_change_queue_type - Change the device's queue type
3055 * @dsev: scsi device struct
3056 * @tag_type: type of tags to use
3057 *
3058 * Return value:
3059 * actual queue type set
3060 **/
3061static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3062{
3063 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3064 struct ipr_resource_entry *res;
3065 unsigned long lock_flags = 0;
3066
3067 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068 res = (struct ipr_resource_entry *)sdev->hostdata;
3069
3070 if (res) {
3071 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3072 /*
3073 * We don't bother quiescing the device here since the
3074 * adapter firmware does it for us.
3075 */
3076 scsi_set_tag_type(sdev, tag_type);
3077
3078 if (tag_type)
3079 scsi_activate_tcq(sdev, sdev->queue_depth);
3080 else
3081 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3082 } else
3083 tag_type = 0;
3084 } else
3085 tag_type = 0;
3086
3087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3088 return tag_type;
3089}
3090
3091/**
3092 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3093 * @dev: device struct
3094 * @buf: buffer
3095 *
3096 * Return value:
3097 * number of bytes printed to buffer
3098 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04003099static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100{
3101 struct scsi_device *sdev = to_scsi_device(dev);
3102 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3103 struct ipr_resource_entry *res;
3104 unsigned long lock_flags = 0;
3105 ssize_t len = -ENXIO;
3106
3107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3108 res = (struct ipr_resource_entry *)sdev->hostdata;
3109 if (res)
3110 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3111 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3112 return len;
3113}
3114
3115static struct device_attribute ipr_adapter_handle_attr = {
3116 .attr = {
3117 .name = "adapter_handle",
3118 .mode = S_IRUSR,
3119 },
3120 .show = ipr_show_adapter_handle
3121};
3122
3123static struct device_attribute *ipr_dev_attrs[] = {
3124 &ipr_adapter_handle_attr,
3125 NULL,
3126};
3127
3128/**
3129 * ipr_biosparam - Return the HSC mapping
3130 * @sdev: scsi device struct
3131 * @block_device: block device pointer
3132 * @capacity: capacity of the device
3133 * @parm: Array containing returned HSC values.
3134 *
3135 * This function generates the HSC parms that fdisk uses.
3136 * We want to make sure we return something that places partitions
3137 * on 4k boundaries for best performance with the IOA.
3138 *
3139 * Return value:
3140 * 0 on success
3141 **/
3142static int ipr_biosparam(struct scsi_device *sdev,
3143 struct block_device *block_device,
3144 sector_t capacity, int *parm)
3145{
3146 int heads, sectors;
3147 sector_t cylinders;
3148
3149 heads = 128;
3150 sectors = 32;
3151
3152 cylinders = capacity;
3153 sector_div(cylinders, (128 * 32));
3154
3155 /* return result */
3156 parm[0] = heads;
3157 parm[1] = sectors;
3158 parm[2] = cylinders;
3159
3160 return 0;
3161}
3162
3163/**
3164 * ipr_slave_destroy - Unconfigure a SCSI device
3165 * @sdev: scsi device struct
3166 *
3167 * Return value:
3168 * nothing
3169 **/
3170static void ipr_slave_destroy(struct scsi_device *sdev)
3171{
3172 struct ipr_resource_entry *res;
3173 struct ipr_ioa_cfg *ioa_cfg;
3174 unsigned long lock_flags = 0;
3175
3176 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3177
3178 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3179 res = (struct ipr_resource_entry *) sdev->hostdata;
3180 if (res) {
3181 sdev->hostdata = NULL;
3182 res->sdev = NULL;
3183 }
3184 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3185}
3186
3187/**
3188 * ipr_slave_configure - Configure a SCSI device
3189 * @sdev: scsi device struct
3190 *
3191 * This function configures the specified scsi device.
3192 *
3193 * Return value:
3194 * 0 on success
3195 **/
3196static int ipr_slave_configure(struct scsi_device *sdev)
3197{
3198 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3199 struct ipr_resource_entry *res;
3200 unsigned long lock_flags = 0;
3201
3202 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3203 res = sdev->hostdata;
3204 if (res) {
3205 if (ipr_is_af_dasd_device(res))
3206 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06003207 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06003209 sdev->no_uld_attach = 1;
3210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 if (ipr_is_vset_device(res)) {
3212 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3213 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3214 }
3215 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
3216 sdev->allow_restart = 1;
3217 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3218 }
3219 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3220 return 0;
3221}
3222
3223/**
3224 * ipr_slave_alloc - Prepare for commands to a device.
3225 * @sdev: scsi device struct
3226 *
3227 * This function saves a pointer to the resource entry
3228 * in the scsi device struct if the device exists. We
3229 * can then use this pointer in ipr_queuecommand when
3230 * handling new commands.
3231 *
3232 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06003233 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 **/
3235static int ipr_slave_alloc(struct scsi_device *sdev)
3236{
3237 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3238 struct ipr_resource_entry *res;
3239 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06003240 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241
3242 sdev->hostdata = NULL;
3243
3244 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3245
3246 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3247 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3248 (res->cfgte.res_addr.target == sdev->id) &&
3249 (res->cfgte.res_addr.lun == sdev->lun)) {
3250 res->sdev = sdev;
3251 res->add_to_ml = 0;
3252 res->in_erp = 0;
3253 sdev->hostdata = res;
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06003254 if (!ipr_is_naca_model(res))
3255 res->needs_sync_complete = 1;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06003256 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257 break;
3258 }
3259 }
3260
3261 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3262
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06003263 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264}
3265
3266/**
3267 * ipr_eh_host_reset - Reset the host adapter
3268 * @scsi_cmd: scsi command struct
3269 *
3270 * Return value:
3271 * SUCCESS / FAILED
3272 **/
Jeff Garzik df0ae242005-05-28 07:57:14 -04003273static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274{
3275 struct ipr_ioa_cfg *ioa_cfg;
3276 int rc;
3277
3278 ENTER;
3279 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3280
3281 dev_err(&ioa_cfg->pdev->dev,
3282 "Adapter being reset as a result of error recovery.\n");
3283
3284 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3285 ioa_cfg->sdt_state = GET_DUMP;
3286
3287 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3288
3289 LEAVE;
3290 return rc;
3291}
3292
Jeff Garzik df0ae242005-05-28 07:57:14 -04003293static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3294{
3295 int rc;
3296
3297 spin_lock_irq(cmd->device->host->host_lock);
3298 rc = __ipr_eh_host_reset(cmd);
3299 spin_unlock_irq(cmd->device->host->host_lock);
3300
3301 return rc;
3302}
3303
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304/**
3305 * ipr_eh_dev_reset - Reset the device
3306 * @scsi_cmd: scsi command struct
3307 *
3308 * This function issues a device reset to the affected device.
3309 * A LUN reset will be sent to the device first. If that does
3310 * not work, a target reset will be sent.
3311 *
3312 * Return value:
3313 * SUCCESS / FAILED
3314 **/
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04003315static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316{
3317 struct ipr_cmnd *ipr_cmd;
3318 struct ipr_ioa_cfg *ioa_cfg;
3319 struct ipr_resource_entry *res;
3320 struct ipr_cmd_pkt *cmd_pkt;
3321 u32 ioasc;
3322
3323 ENTER;
3324 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3325 res = scsi_cmd->device->hostdata;
3326
brking@us.ibm.comeeb883072005-11-01 17:02:29 -06003327 if (!res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328 return FAILED;
3329
3330 /*
3331 * If we are currently going through reset/reload, return failed. This will force the
3332 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3333 * reset to complete
3334 */
3335 if (ioa_cfg->in_reset_reload)
3336 return FAILED;
3337 if (ioa_cfg->ioa_is_dead)
3338 return FAILED;
3339
3340 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3341 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3342 if (ipr_cmd->scsi_cmd)
3343 ipr_cmd->done = ipr_scsi_eh_done;
3344 }
3345 }
3346
3347 res->resetting_device = 1;
3348
3349 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3350
3351 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3352 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3353 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3354 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3355
3356 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3357 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3358
3359 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3360
3361 res->resetting_device = 0;
3362
3363 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3364
3365 LEAVE;
3366 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3367}
3368
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04003369static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3370{
3371 int rc;
3372
3373 spin_lock_irq(cmd->device->host->host_lock);
3374 rc = __ipr_eh_dev_reset(cmd);
3375 spin_unlock_irq(cmd->device->host->host_lock);
3376
3377 return rc;
3378}
3379
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380/**
3381 * ipr_bus_reset_done - Op done function for bus reset.
3382 * @ipr_cmd: ipr command struct
3383 *
3384 * This function is the op done function for a bus reset
3385 *
3386 * Return value:
3387 * none
3388 **/
3389static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3390{
3391 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3392 struct ipr_resource_entry *res;
3393
3394 ENTER;
3395 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3396 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3397 sizeof(res->cfgte.res_handle))) {
3398 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3399 break;
3400 }
3401 }
3402
3403 /*
3404 * If abort has not completed, indicate the reset has, else call the
3405 * abort's done function to wake the sleeping eh thread
3406 */
3407 if (ipr_cmd->sibling->sibling)
3408 ipr_cmd->sibling->sibling = NULL;
3409 else
3410 ipr_cmd->sibling->done(ipr_cmd->sibling);
3411
3412 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3413 LEAVE;
3414}
3415
3416/**
3417 * ipr_abort_timeout - An abort task has timed out
3418 * @ipr_cmd: ipr command struct
3419 *
3420 * This function handles when an abort task times out. If this
3421 * happens we issue a bus reset since we have resources tied
3422 * up that must be freed before returning to the midlayer.
3423 *
3424 * Return value:
3425 * none
3426 **/
3427static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3428{
3429 struct ipr_cmnd *reset_cmd;
3430 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3431 struct ipr_cmd_pkt *cmd_pkt;
3432 unsigned long lock_flags = 0;
3433
3434 ENTER;
3435 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3436 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3438 return;
3439 }
3440
3441 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3442 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3443 ipr_cmd->sibling = reset_cmd;
3444 reset_cmd->sibling = ipr_cmd;
3445 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3446 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3447 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3448 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3449 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3450
3451 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3452 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3453 LEAVE;
3454}
3455
3456/**
3457 * ipr_cancel_op - Cancel specified op
3458 * @scsi_cmd: scsi command struct
3459 *
3460 * This function cancels specified op.
3461 *
3462 * Return value:
3463 * SUCCESS / FAILED
3464 **/
3465static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3466{
3467 struct ipr_cmnd *ipr_cmd;
3468 struct ipr_ioa_cfg *ioa_cfg;
3469 struct ipr_resource_entry *res;
3470 struct ipr_cmd_pkt *cmd_pkt;
3471 u32 ioasc;
3472 int op_found = 0;
3473
3474 ENTER;
3475 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3476 res = scsi_cmd->device->hostdata;
3477
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003478 /* If we are currently going through reset/reload, return failed.
3479 * This will force the mid-layer to call ipr_eh_host_reset,
3480 * which will then go to sleep and wait for the reset to complete
3481 */
3482 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3483 return FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3485 return FAILED;
3486
3487 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3488 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3489 ipr_cmd->done = ipr_scsi_eh_done;
3490 op_found = 1;
3491 break;
3492 }
3493 }
3494
3495 if (!op_found)
3496 return SUCCESS;
3497
3498 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3499 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3500 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3501 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3502 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3503 ipr_cmd->u.sdev = scsi_cmd->device;
3504
3505 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3506 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3507 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3508
3509 /*
3510 * If the abort task timed out and we sent a bus reset, we will get
3511 * one the following responses to the abort
3512 */
3513 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3514 ioasc = 0;
3515 ipr_trace;
3516 }
3517
3518 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06003519 if (!ipr_is_naca_model(res))
3520 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521
3522 LEAVE;
3523 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3524}
3525
3526/**
3527 * ipr_eh_abort - Abort a single op
3528 * @scsi_cmd: scsi command struct
3529 *
3530 * Return value:
3531 * SUCCESS / FAILED
3532 **/
3533static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3534{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003535 unsigned long flags;
3536 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537
3538 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003540 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3541 rc = ipr_cancel_op(scsi_cmd);
3542 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543
3544 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003545 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546}
3547
3548/**
3549 * ipr_handle_other_interrupt - Handle "other" interrupts
3550 * @ioa_cfg: ioa config struct
3551 * @int_reg: interrupt register
3552 *
3553 * Return value:
3554 * IRQ_NONE / IRQ_HANDLED
3555 **/
3556static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3557 volatile u32 int_reg)
3558{
3559 irqreturn_t rc = IRQ_HANDLED;
3560
3561 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3562 /* Mask the interrupt */
3563 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3564
3565 /* Clear the interrupt */
3566 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3567 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3568
3569 list_del(&ioa_cfg->reset_cmd->queue);
3570 del_timer(&ioa_cfg->reset_cmd->timer);
3571 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3572 } else {
3573 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3574 ioa_cfg->ioa_unit_checked = 1;
3575 else
3576 dev_err(&ioa_cfg->pdev->dev,
3577 "Permanent IOA failure. 0x%08X\n", int_reg);
3578
3579 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3580 ioa_cfg->sdt_state = GET_DUMP;
3581
3582 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3583 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3584 }
3585
3586 return rc;
3587}
3588
3589/**
3590 * ipr_isr - Interrupt service routine
3591 * @irq: irq number
3592 * @devp: pointer to ioa config struct
3593 * @regs: pt_regs struct
3594 *
3595 * Return value:
3596 * IRQ_NONE / IRQ_HANDLED
3597 **/
3598static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3599{
3600 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3601 unsigned long lock_flags = 0;
3602 volatile u32 int_reg, int_mask_reg;
3603 u32 ioasc;
3604 u16 cmd_index;
3605 struct ipr_cmnd *ipr_cmd;
3606 irqreturn_t rc = IRQ_NONE;
3607
3608 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3609
3610 /* If interrupts are disabled, ignore the interrupt */
3611 if (!ioa_cfg->allow_interrupts) {
3612 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3613 return IRQ_NONE;
3614 }
3615
3616 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3617 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3618
3619 /* If an interrupt on the adapter did not occur, ignore it */
3620 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3622 return IRQ_NONE;
3623 }
3624
3625 while (1) {
3626 ipr_cmd = NULL;
3627
3628 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3629 ioa_cfg->toggle_bit) {
3630
3631 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3632 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3633
3634 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3635 ioa_cfg->errors_logged++;
3636 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3637
3638 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3639 ioa_cfg->sdt_state = GET_DUMP;
3640
3641 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3642 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3643 return IRQ_HANDLED;
3644 }
3645
3646 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3647
3648 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3649
3650 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3651
3652 list_del(&ipr_cmd->queue);
3653 del_timer(&ipr_cmd->timer);
3654 ipr_cmd->done(ipr_cmd);
3655
3656 rc = IRQ_HANDLED;
3657
3658 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3659 ioa_cfg->hrrq_curr++;
3660 } else {
3661 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3662 ioa_cfg->toggle_bit ^= 1u;
3663 }
3664 }
3665
3666 if (ipr_cmd != NULL) {
3667 /* Clear the PCI interrupt */
3668 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3669 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3670 } else
3671 break;
3672 }
3673
3674 if (unlikely(rc == IRQ_NONE))
3675 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3676
3677 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3678 return rc;
3679}
3680
3681/**
3682 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3683 * @ioa_cfg: ioa config struct
3684 * @ipr_cmd: ipr command struct
3685 *
3686 * Return value:
3687 * 0 on success / -1 on failure
3688 **/
3689static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3690 struct ipr_cmnd *ipr_cmd)
3691{
3692 int i;
3693 struct scatterlist *sglist;
3694 u32 length;
3695 u32 ioadl_flags = 0;
3696 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3697 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3698 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3699
3700 length = scsi_cmd->request_bufflen;
3701
3702 if (length == 0)
3703 return 0;
3704
3705 if (scsi_cmd->use_sg) {
3706 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3707 scsi_cmd->request_buffer,
3708 scsi_cmd->use_sg,
3709 scsi_cmd->sc_data_direction);
3710
3711 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3712 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3713 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3714 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3715 ioarcb->write_ioadl_len =
3716 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3717 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3718 ioadl_flags = IPR_IOADL_FLAGS_READ;
3719 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3720 ioarcb->read_ioadl_len =
3721 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3722 }
3723
3724 sglist = scsi_cmd->request_buffer;
3725
3726 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3727 ioadl[i].flags_and_data_len =
3728 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3729 ioadl[i].address =
3730 cpu_to_be32(sg_dma_address(&sglist[i]));
3731 }
3732
3733 if (likely(ipr_cmd->dma_use_sg)) {
3734 ioadl[i-1].flags_and_data_len |=
3735 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3736 return 0;
3737 } else
3738 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3739 } else {
3740 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3741 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3742 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3743 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3744 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3745 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3746 ioadl_flags = IPR_IOADL_FLAGS_READ;
3747 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3748 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3749 }
3750
3751 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3752 scsi_cmd->request_buffer, length,
3753 scsi_cmd->sc_data_direction);
3754
3755 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3756 ipr_cmd->dma_use_sg = 1;
3757 ioadl[0].flags_and_data_len =
3758 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3759 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3760 return 0;
3761 } else
3762 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3763 }
3764
3765 return -1;
3766}
3767
3768/**
3769 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3770 * @scsi_cmd: scsi command struct
3771 *
3772 * Return value:
3773 * task attributes
3774 **/
3775static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3776{
3777 u8 tag[2];
3778 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3779
3780 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3781 switch (tag[0]) {
3782 case MSG_SIMPLE_TAG:
3783 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3784 break;
3785 case MSG_HEAD_TAG:
3786 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3787 break;
3788 case MSG_ORDERED_TAG:
3789 rc = IPR_FLAGS_LO_ORDERED_TASK;
3790 break;
3791 };
3792 }
3793
3794 return rc;
3795}
3796
3797/**
3798 * ipr_erp_done - Process completion of ERP for a device
3799 * @ipr_cmd: ipr command struct
3800 *
3801 * This function copies the sense buffer into the scsi_cmd
3802 * struct and pushes the scsi_done function.
3803 *
3804 * Return value:
3805 * nothing
3806 **/
3807static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3808{
3809 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3810 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3811 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3812 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3813
3814 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3815 scsi_cmd->result |= (DID_ERROR << 16);
3816 ipr_sdev_err(scsi_cmd->device,
3817 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3818 } else {
3819 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3820 SCSI_SENSE_BUFFERSIZE);
3821 }
3822
3823 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06003824 if (!ipr_is_naca_model(res))
3825 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826 res->in_erp = 0;
3827 }
3828 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3829 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3830 scsi_cmd->scsi_done(scsi_cmd);
3831}
3832
3833/**
3834 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3835 * @ipr_cmd: ipr command struct
3836 *
3837 * Return value:
3838 * none
3839 **/
3840static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3841{
3842 struct ipr_ioarcb *ioarcb;
3843 struct ipr_ioasa *ioasa;
3844
3845 ioarcb = &ipr_cmd->ioarcb;
3846 ioasa = &ipr_cmd->ioasa;
3847
3848 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3849 ioarcb->write_data_transfer_length = 0;
3850 ioarcb->read_data_transfer_length = 0;
3851 ioarcb->write_ioadl_len = 0;
3852 ioarcb->read_ioadl_len = 0;
3853 ioasa->ioasc = 0;
3854 ioasa->residual_data_len = 0;
3855}
3856
3857/**
3858 * ipr_erp_request_sense - Send request sense to a device
3859 * @ipr_cmd: ipr command struct
3860 *
3861 * This function sends a request sense to a device as a result
3862 * of a check condition.
3863 *
3864 * Return value:
3865 * nothing
3866 **/
3867static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3868{
3869 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3870 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3871
3872 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3873 ipr_erp_done(ipr_cmd);
3874 return;
3875 }
3876
3877 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3878
3879 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3880 cmd_pkt->cdb[0] = REQUEST_SENSE;
3881 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3882 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3883 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3884 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3885
3886 ipr_cmd->ioadl[0].flags_and_data_len =
3887 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3888 ipr_cmd->ioadl[0].address =
3889 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3890
3891 ipr_cmd->ioarcb.read_ioadl_len =
3892 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3893 ipr_cmd->ioarcb.read_data_transfer_length =
3894 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3895
3896 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3897 IPR_REQUEST_SENSE_TIMEOUT * 2);
3898}
3899
3900/**
3901 * ipr_erp_cancel_all - Send cancel all to a device
3902 * @ipr_cmd: ipr command struct
3903 *
3904 * This function sends a cancel all to a device to clear the
3905 * queue. If we are running TCQ on the device, QERR is set to 1,
3906 * which means all outstanding ops have been dropped on the floor.
3907 * Cancel all will return them to us.
3908 *
3909 * Return value:
3910 * nothing
3911 **/
3912static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3913{
3914 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3915 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3916 struct ipr_cmd_pkt *cmd_pkt;
3917
3918 res->in_erp = 1;
3919
3920 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3921
3922 if (!scsi_get_tag_type(scsi_cmd->device)) {
3923 ipr_erp_request_sense(ipr_cmd);
3924 return;
3925 }
3926
3927 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3928 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3929 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3930
3931 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3932 IPR_CANCEL_ALL_TIMEOUT);
3933}
3934
3935/**
3936 * ipr_dump_ioasa - Dump contents of IOASA
3937 * @ioa_cfg: ioa config struct
3938 * @ipr_cmd: ipr command struct
3939 *
3940 * This function is invoked by the interrupt handler when ops
3941 * fail. It will log the IOASA if appropriate. Only called
3942 * for GPDD ops.
3943 *
3944 * Return value:
3945 * none
3946 **/
3947static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3948 struct ipr_cmnd *ipr_cmd)
3949{
3950 int i;
3951 u16 data_len;
3952 u32 ioasc;
3953 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3954 __be32 *ioasa_data = (__be32 *)ioasa;
3955 int error_index;
3956
3957 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3958
3959 if (0 == ioasc)
3960 return;
3961
3962 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3963 return;
3964
3965 error_index = ipr_get_error(ioasc);
3966
3967 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3968 /* Don't log an error if the IOA already logged one */
3969 if (ioasa->ilid != 0)
3970 return;
3971
3972 if (ipr_error_table[error_index].log_ioasa == 0)
3973 return;
3974 }
3975
3976 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3977 ipr_error_table[error_index].error);
3978
3979 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3980 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3981 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3982 "Device End state: %s Phase: %s\n",
3983 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3984 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3985 }
3986
3987 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3988 data_len = sizeof(struct ipr_ioasa);
3989 else
3990 data_len = be16_to_cpu(ioasa->ret_stat_len);
3991
3992 ipr_err("IOASA Dump:\n");
3993
3994 for (i = 0; i < data_len / 4; i += 4) {
3995 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3996 be32_to_cpu(ioasa_data[i]),
3997 be32_to_cpu(ioasa_data[i+1]),
3998 be32_to_cpu(ioasa_data[i+2]),
3999 be32_to_cpu(ioasa_data[i+3]));
4000 }
4001}
4002
4003/**
4004 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4005 * @ioasa: IOASA
4006 * @sense_buf: sense data buffer
4007 *
4008 * Return value:
4009 * none
4010 **/
4011static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4012{
4013 u32 failing_lba;
4014 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4015 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4016 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4017 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4018
4019 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4020
4021 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4022 return;
4023
4024 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4025
4026 if (ipr_is_vset_device(res) &&
4027 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4028 ioasa->u.vset.failing_lba_hi != 0) {
4029 sense_buf[0] = 0x72;
4030 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4031 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4032 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4033
4034 sense_buf[7] = 12;
4035 sense_buf[8] = 0;
4036 sense_buf[9] = 0x0A;
4037 sense_buf[10] = 0x80;
4038
4039 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4040
4041 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4042 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4043 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4044 sense_buf[15] = failing_lba & 0x000000ff;
4045
4046 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4047
4048 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4049 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4050 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4051 sense_buf[19] = failing_lba & 0x000000ff;
4052 } else {
4053 sense_buf[0] = 0x70;
4054 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4055 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4056 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4057
4058 /* Illegal request */
4059 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4060 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4061 sense_buf[7] = 10; /* additional length */
4062
4063 /* IOARCB was in error */
4064 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4065 sense_buf[15] = 0xC0;
4066 else /* Parameter data was invalid */
4067 sense_buf[15] = 0x80;
4068
4069 sense_buf[16] =
4070 ((IPR_FIELD_POINTER_MASK &
4071 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4072 sense_buf[17] =
4073 (IPR_FIELD_POINTER_MASK &
4074 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4075 } else {
4076 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4077 if (ipr_is_vset_device(res))
4078 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4079 else
4080 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4081
4082 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4083 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4084 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4085 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4086 sense_buf[6] = failing_lba & 0x000000ff;
4087 }
4088
4089 sense_buf[7] = 6; /* additional length */
4090 }
4091 }
4092}
4093
4094/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004095 * ipr_get_autosense - Copy autosense data to sense buffer
4096 * @ipr_cmd: ipr command struct
4097 *
4098 * This function copies the autosense buffer to the buffer
4099 * in the scsi_cmd, if there is autosense available.
4100 *
4101 * Return value:
4102 * 1 if autosense was available / 0 if not
4103 **/
4104static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4105{
4106 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4107
4108 if ((be32_to_cpu(ioasa->ioasc_specific) &
4109 (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4110 return 0;
4111
4112 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4113 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4114 SCSI_SENSE_BUFFERSIZE));
4115 return 1;
4116}
4117
4118/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119 * ipr_erp_start - Process an error response for a SCSI op
4120 * @ioa_cfg: ioa config struct
4121 * @ipr_cmd: ipr command struct
4122 *
4123 * This function determines whether or not to initiate ERP
4124 * on the affected device.
4125 *
4126 * Return value:
4127 * nothing
4128 **/
4129static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4130 struct ipr_cmnd *ipr_cmd)
4131{
4132 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4133 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4134 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4135
4136 if (!res) {
4137 ipr_scsi_eh_done(ipr_cmd);
4138 return;
4139 }
4140
4141 if (ipr_is_gscsi(res))
4142 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
4143 else
4144 ipr_gen_sense(ipr_cmd);
4145
4146 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4147 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004148 if (ipr_is_naca_model(res))
4149 scsi_cmd->result |= (DID_ABORT << 16);
4150 else
4151 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152 break;
4153 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06004154 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4156 break;
4157 case IPR_IOASC_HW_SEL_TIMEOUT:
4158 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004159 if (!ipr_is_naca_model(res))
4160 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161 break;
4162 case IPR_IOASC_SYNC_REQUIRED:
4163 if (!res->in_erp)
4164 res->needs_sync_complete = 1;
4165 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4166 break;
4167 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06004168 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4170 break;
4171 case IPR_IOASC_BUS_WAS_RESET:
4172 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4173 /*
4174 * Report the bus reset and ask for a retry. The device
4175 * will give CC/UA the next command.
4176 */
4177 if (!res->resetting_device)
4178 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4179 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004180 if (!ipr_is_naca_model(res))
4181 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182 break;
4183 case IPR_IOASC_HW_DEV_BUS_STATUS:
4184 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4185 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004186 if (!ipr_get_autosense(ipr_cmd)) {
4187 if (!ipr_is_naca_model(res)) {
4188 ipr_erp_cancel_all(ipr_cmd);
4189 return;
4190 }
4191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004193 if (!ipr_is_naca_model(res))
4194 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195 break;
4196 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4197 break;
4198 default:
4199 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004200 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201 res->needs_sync_complete = 1;
4202 break;
4203 }
4204
4205 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4206 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4207 scsi_cmd->scsi_done(scsi_cmd);
4208}
4209
4210/**
4211 * ipr_scsi_done - mid-layer done function
4212 * @ipr_cmd: ipr command struct
4213 *
4214 * This function is invoked by the interrupt handler for
4215 * ops generated by the SCSI mid-layer
4216 *
4217 * Return value:
4218 * none
4219 **/
4220static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4221{
4222 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4223 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4224 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4225
4226 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4227
4228 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4229 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4230 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4231 scsi_cmd->scsi_done(scsi_cmd);
4232 } else
4233 ipr_erp_start(ioa_cfg, ipr_cmd);
4234}
4235
4236/**
4237 * ipr_save_ioafp_mode_select - Save adapters mode select data
4238 * @ioa_cfg: ioa config struct
4239 * @scsi_cmd: scsi command struct
4240 *
4241 * This function saves mode select data for the adapter to
4242 * use following an adapter reset.
4243 *
4244 * Return value:
4245 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
4246 **/
4247static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
4248 struct scsi_cmnd *scsi_cmd)
4249{
4250 if (!ioa_cfg->saved_mode_pages) {
4251 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
4252 GFP_ATOMIC);
4253 if (!ioa_cfg->saved_mode_pages) {
4254 dev_err(&ioa_cfg->pdev->dev,
4255 "IOA mode select buffer allocation failed\n");
4256 return SCSI_MLQUEUE_HOST_BUSY;
4257 }
4258 }
4259
4260 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
4261 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
4262 return 0;
4263}
4264
4265/**
4266 * ipr_queuecommand - Queue a mid-layer request
4267 * @scsi_cmd: scsi command struct
4268 * @done: done function
4269 *
4270 * This function queues a request generated by the mid-layer.
4271 *
4272 * Return value:
4273 * 0 on success
4274 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4275 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4276 **/
4277static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4278 void (*done) (struct scsi_cmnd *))
4279{
4280 struct ipr_ioa_cfg *ioa_cfg;
4281 struct ipr_resource_entry *res;
4282 struct ipr_ioarcb *ioarcb;
4283 struct ipr_cmnd *ipr_cmd;
4284 int rc = 0;
4285
4286 scsi_cmd->scsi_done = done;
4287 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4288 res = scsi_cmd->device->hostdata;
4289 scsi_cmd->result = (DID_OK << 16);
4290
4291 /*
4292 * We are currently blocking all devices due to a host reset
4293 * We have told the host to stop giving us new requests, but
4294 * ERP ops don't count. FIXME
4295 */
4296 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4297 return SCSI_MLQUEUE_HOST_BUSY;
4298
4299 /*
4300 * FIXME - Create scsi_set_host_offline interface
4301 * and the ioa_is_dead check can be removed
4302 */
4303 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4304 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4305 scsi_cmd->result = (DID_NO_CONNECT << 16);
4306 scsi_cmd->scsi_done(scsi_cmd);
4307 return 0;
4308 }
4309
4310 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4311 ioarcb = &ipr_cmd->ioarcb;
4312 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4313
4314 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4315 ipr_cmd->scsi_cmd = scsi_cmd;
4316 ioarcb->res_handle = res->cfgte.res_handle;
4317 ipr_cmd->done = ipr_scsi_done;
4318 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4319
4320 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4321 if (scsi_cmd->underflow == 0)
4322 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4323
4324 if (res->needs_sync_complete) {
4325 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4326 res->needs_sync_complete = 0;
4327 }
4328
4329 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4330 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4331 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4332 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4333 }
4334
4335 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4336 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4337 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4338
4339 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
4340 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
4341
4342 if (likely(rc == 0))
4343 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4344
4345 if (likely(rc == 0)) {
4346 mb();
4347 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4348 ioa_cfg->regs.ioarrin_reg);
4349 } else {
4350 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4351 return SCSI_MLQUEUE_HOST_BUSY;
4352 }
4353
4354 return 0;
4355}
4356
4357/**
4358 * ipr_info - Get information about the card/driver
4359 * @scsi_host: scsi host struct
4360 *
4361 * Return value:
4362 * pointer to buffer with description string
4363 **/
4364static const char * ipr_ioa_info(struct Scsi_Host *host)
4365{
4366 static char buffer[512];
4367 struct ipr_ioa_cfg *ioa_cfg;
4368 unsigned long lock_flags = 0;
4369
4370 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4371
4372 spin_lock_irqsave(host->host_lock, lock_flags);
4373 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4374 spin_unlock_irqrestore(host->host_lock, lock_flags);
4375
4376 return buffer;
4377}
4378
4379static struct scsi_host_template driver_template = {
4380 .module = THIS_MODULE,
4381 .name = "IPR",
4382 .info = ipr_ioa_info,
4383 .queuecommand = ipr_queuecommand,
4384 .eh_abort_handler = ipr_eh_abort,
4385 .eh_device_reset_handler = ipr_eh_dev_reset,
4386 .eh_host_reset_handler = ipr_eh_host_reset,
4387 .slave_alloc = ipr_slave_alloc,
4388 .slave_configure = ipr_slave_configure,
4389 .slave_destroy = ipr_slave_destroy,
4390 .change_queue_depth = ipr_change_queue_depth,
4391 .change_queue_type = ipr_change_queue_type,
4392 .bios_param = ipr_biosparam,
4393 .can_queue = IPR_MAX_COMMANDS,
4394 .this_id = -1,
4395 .sg_tablesize = IPR_MAX_SGLIST,
4396 .max_sectors = IPR_IOA_MAX_SECTORS,
4397 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4398 .use_clustering = ENABLE_CLUSTERING,
4399 .shost_attrs = ipr_ioa_attrs,
4400 .sdev_attrs = ipr_dev_attrs,
4401 .proc_name = IPR_NAME
4402};
4403
4404#ifdef CONFIG_PPC_PSERIES
4405static const u16 ipr_blocked_processors[] = {
4406 PV_NORTHSTAR,
4407 PV_PULSAR,
4408 PV_POWER4,
4409 PV_ICESTAR,
4410 PV_SSTAR,
4411 PV_POWER4p,
4412 PV_630,
4413 PV_630p
4414};
4415
4416/**
4417 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4418 * @ioa_cfg: ioa cfg struct
4419 *
4420 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4421 * certain pSeries hardware. This function determines if the given
4422 * adapter is in one of these confgurations or not.
4423 *
4424 * Return value:
4425 * 1 if adapter is not supported / 0 if adapter is supported
4426 **/
4427static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4428{
4429 u8 rev_id;
4430 int i;
4431
4432 if (ioa_cfg->type == 0x5702) {
4433 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4434 &rev_id) == PCIBIOS_SUCCESSFUL) {
4435 if (rev_id < 4) {
4436 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4437 if (__is_processor(ipr_blocked_processors[i]))
4438 return 1;
4439 }
4440 }
4441 }
4442 }
4443 return 0;
4444}
4445#else
4446#define ipr_invalid_adapter(ioa_cfg) 0
4447#endif
4448
4449/**
4450 * ipr_ioa_bringdown_done - IOA bring down completion.
4451 * @ipr_cmd: ipr command struct
4452 *
4453 * This function processes the completion of an adapter bring down.
4454 * It wakes any reset sleepers.
4455 *
4456 * Return value:
4457 * IPR_RC_JOB_RETURN
4458 **/
4459static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4460{
4461 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4462
4463 ENTER;
4464 ioa_cfg->in_reset_reload = 0;
4465 ioa_cfg->reset_retries = 0;
4466 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4467 wake_up_all(&ioa_cfg->reset_wait_q);
4468
4469 spin_unlock_irq(ioa_cfg->host->host_lock);
4470 scsi_unblock_requests(ioa_cfg->host);
4471 spin_lock_irq(ioa_cfg->host->host_lock);
4472 LEAVE;
4473
4474 return IPR_RC_JOB_RETURN;
4475}
4476
4477/**
4478 * ipr_ioa_reset_done - IOA reset completion.
4479 * @ipr_cmd: ipr command struct
4480 *
4481 * This function processes the completion of an adapter reset.
4482 * It schedules any necessary mid-layer add/removes and
4483 * wakes any reset sleepers.
4484 *
4485 * Return value:
4486 * IPR_RC_JOB_RETURN
4487 **/
4488static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4489{
4490 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4491 struct ipr_resource_entry *res;
4492 struct ipr_hostrcb *hostrcb, *temp;
4493 int i = 0;
4494
4495 ENTER;
4496 ioa_cfg->in_reset_reload = 0;
4497 ioa_cfg->allow_cmds = 1;
4498 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06004499 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500
4501 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4502 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4503 ipr_trace;
4504 break;
4505 }
4506 }
4507 schedule_work(&ioa_cfg->work_q);
4508
4509 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4510 list_del(&hostrcb->queue);
4511 if (i++ < IPR_NUM_LOG_HCAMS)
4512 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4513 else
4514 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4515 }
4516
4517 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4518
4519 ioa_cfg->reset_retries = 0;
4520 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4521 wake_up_all(&ioa_cfg->reset_wait_q);
4522
4523 spin_unlock_irq(ioa_cfg->host->host_lock);
4524 scsi_unblock_requests(ioa_cfg->host);
4525 spin_lock_irq(ioa_cfg->host->host_lock);
4526
4527 if (!ioa_cfg->allow_cmds)
4528 scsi_block_requests(ioa_cfg->host);
4529
4530 LEAVE;
4531 return IPR_RC_JOB_RETURN;
4532}
4533
4534/**
4535 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4536 * @supported_dev: supported device struct
4537 * @vpids: vendor product id struct
4538 *
4539 * Return value:
4540 * none
4541 **/
4542static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4543 struct ipr_std_inq_vpids *vpids)
4544{
4545 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4546 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4547 supported_dev->num_records = 1;
4548 supported_dev->data_length =
4549 cpu_to_be16(sizeof(struct ipr_supported_device));
4550 supported_dev->reserved = 0;
4551}
4552
4553/**
4554 * ipr_set_supported_devs - Send Set Supported Devices for a device
4555 * @ipr_cmd: ipr command struct
4556 *
4557 * This function send a Set Supported Devices to the adapter
4558 *
4559 * Return value:
4560 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4561 **/
4562static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4563{
4564 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4565 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4566 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4567 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4568 struct ipr_resource_entry *res = ipr_cmd->u.res;
4569
4570 ipr_cmd->job_step = ipr_ioa_reset_done;
4571
4572 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
brking@us.ibm.comd0ad6f52005-11-01 17:00:54 -06004573 if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574 continue;
4575
4576 ipr_cmd->u.res = res;
4577 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4578
4579 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4580 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4581 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4582
4583 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4584 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4585 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4586
4587 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4588 sizeof(struct ipr_supported_device));
4589 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4590 offsetof(struct ipr_misc_cbs, supp_dev));
4591 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4592 ioarcb->write_data_transfer_length =
4593 cpu_to_be32(sizeof(struct ipr_supported_device));
4594
4595 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4596 IPR_SET_SUP_DEVICE_TIMEOUT);
4597
4598 ipr_cmd->job_step = ipr_set_supported_devs;
4599 return IPR_RC_JOB_RETURN;
4600 }
4601
4602 return IPR_RC_JOB_CONTINUE;
4603}
4604
4605/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06004606 * ipr_setup_write_cache - Disable write cache if needed
4607 * @ipr_cmd: ipr command struct
4608 *
4609 * This function sets up adapters write cache to desired setting
4610 *
4611 * Return value:
4612 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4613 **/
4614static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4615{
4616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4617
4618 ipr_cmd->job_step = ipr_set_supported_devs;
4619 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4620 struct ipr_resource_entry, queue);
4621
4622 if (ioa_cfg->cache_state != CACHE_DISABLED)
4623 return IPR_RC_JOB_CONTINUE;
4624
4625 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4626 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4627 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4628 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4629
4630 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4631
4632 return IPR_RC_JOB_RETURN;
4633}
4634
4635/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636 * ipr_get_mode_page - Locate specified mode page
4637 * @mode_pages: mode page buffer
4638 * @page_code: page code to find
4639 * @len: minimum required length for mode page
4640 *
4641 * Return value:
4642 * pointer to mode page / NULL on failure
4643 **/
4644static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4645 u32 page_code, u32 len)
4646{
4647 struct ipr_mode_page_hdr *mode_hdr;
4648 u32 page_length;
4649 u32 length;
4650
4651 if (!mode_pages || (mode_pages->hdr.length == 0))
4652 return NULL;
4653
4654 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4655 mode_hdr = (struct ipr_mode_page_hdr *)
4656 (mode_pages->data + mode_pages->hdr.block_desc_len);
4657
4658 while (length) {
4659 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4660 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4661 return mode_hdr;
4662 break;
4663 } else {
4664 page_length = (sizeof(struct ipr_mode_page_hdr) +
4665 mode_hdr->page_length);
4666 length -= page_length;
4667 mode_hdr = (struct ipr_mode_page_hdr *)
4668 ((unsigned long)mode_hdr + page_length);
4669 }
4670 }
4671 return NULL;
4672}
4673
4674/**
4675 * ipr_check_term_power - Check for term power errors
4676 * @ioa_cfg: ioa config struct
4677 * @mode_pages: IOAFP mode pages buffer
4678 *
4679 * Check the IOAFP's mode page 28 for term power errors
4680 *
4681 * Return value:
4682 * nothing
4683 **/
4684static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4685 struct ipr_mode_pages *mode_pages)
4686{
4687 int i;
4688 int entry_length;
4689 struct ipr_dev_bus_entry *bus;
4690 struct ipr_mode_page28 *mode_page;
4691
4692 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4693 sizeof(struct ipr_mode_page28));
4694
4695 entry_length = mode_page->entry_length;
4696
4697 bus = mode_page->bus;
4698
4699 for (i = 0; i < mode_page->num_entries; i++) {
4700 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4701 dev_err(&ioa_cfg->pdev->dev,
4702 "Term power is absent on scsi bus %d\n",
4703 bus->res_addr.bus);
4704 }
4705
4706 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4707 }
4708}
4709
4710/**
4711 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4712 * @ioa_cfg: ioa config struct
4713 *
4714 * Looks through the config table checking for SES devices. If
4715 * the SES device is in the SES table indicating a maximum SCSI
4716 * bus speed, the speed is limited for the bus.
4717 *
4718 * Return value:
4719 * none
4720 **/
4721static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4722{
4723 u32 max_xfer_rate;
4724 int i;
4725
4726 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4727 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4728 ioa_cfg->bus_attr[i].bus_width);
4729
4730 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4731 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4732 }
4733}
4734
4735/**
4736 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4737 * @ioa_cfg: ioa config struct
4738 * @mode_pages: mode page 28 buffer
4739 *
4740 * Updates mode page 28 based on driver configuration
4741 *
4742 * Return value:
4743 * none
4744 **/
4745static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4746 struct ipr_mode_pages *mode_pages)
4747{
4748 int i, entry_length;
4749 struct ipr_dev_bus_entry *bus;
4750 struct ipr_bus_attributes *bus_attr;
4751 struct ipr_mode_page28 *mode_page;
4752
4753 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4754 sizeof(struct ipr_mode_page28));
4755
4756 entry_length = mode_page->entry_length;
4757
4758 /* Loop for each device bus entry */
4759 for (i = 0, bus = mode_page->bus;
4760 i < mode_page->num_entries;
4761 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4762 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4763 dev_err(&ioa_cfg->pdev->dev,
4764 "Invalid resource address reported: 0x%08X\n",
4765 IPR_GET_PHYS_LOC(bus->res_addr));
4766 continue;
4767 }
4768
4769 bus_attr = &ioa_cfg->bus_attr[i];
4770 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4771 bus->bus_width = bus_attr->bus_width;
4772 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4773 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4774 if (bus_attr->qas_enabled)
4775 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4776 else
4777 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4778 }
4779}
4780
4781/**
4782 * ipr_build_mode_select - Build a mode select command
4783 * @ipr_cmd: ipr command struct
4784 * @res_handle: resource handle to send command to
4785 * @parm: Byte 2 of Mode Sense command
4786 * @dma_addr: DMA buffer address
4787 * @xfer_len: data transfer length
4788 *
4789 * Return value:
4790 * none
4791 **/
4792static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4793 __be32 res_handle, u8 parm, u32 dma_addr,
4794 u8 xfer_len)
4795{
4796 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4797 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4798
4799 ioarcb->res_handle = res_handle;
4800 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4801 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4802 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4803 ioarcb->cmd_pkt.cdb[1] = parm;
4804 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4805
4806 ioadl->flags_and_data_len =
4807 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4808 ioadl->address = cpu_to_be32(dma_addr);
4809 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4810 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4811}
4812
4813/**
4814 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4815 * @ipr_cmd: ipr command struct
4816 *
4817 * This function sets up the SCSI bus attributes and sends
4818 * a Mode Select for Page 28 to activate them.
4819 *
4820 * Return value:
4821 * IPR_RC_JOB_RETURN
4822 **/
4823static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4824{
4825 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4826 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4827 int length;
4828
4829 ENTER;
4830 if (ioa_cfg->saved_mode_pages) {
4831 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4832 ioa_cfg->saved_mode_page_len);
4833 length = ioa_cfg->saved_mode_page_len;
4834 } else {
4835 ipr_scsi_bus_speed_limit(ioa_cfg);
4836 ipr_check_term_power(ioa_cfg, mode_pages);
4837 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4838 length = mode_pages->hdr.length + 1;
4839 mode_pages->hdr.length = 0;
4840 }
4841
4842 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4843 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4844 length);
4845
brking@us.ibm.com62275042005-11-01 17:01:14 -06004846 ipr_cmd->job_step = ipr_setup_write_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004847 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4848
4849 LEAVE;
4850 return IPR_RC_JOB_RETURN;
4851}
4852
4853/**
4854 * ipr_build_mode_sense - Builds a mode sense command
4855 * @ipr_cmd: ipr command struct
4856 * @res: resource entry struct
4857 * @parm: Byte 2 of mode sense command
4858 * @dma_addr: DMA address of mode sense buffer
4859 * @xfer_len: Size of DMA buffer
4860 *
4861 * Return value:
4862 * none
4863 **/
4864static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4865 __be32 res_handle,
4866 u8 parm, u32 dma_addr, u8 xfer_len)
4867{
4868 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4869 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4870
4871 ioarcb->res_handle = res_handle;
4872 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4873 ioarcb->cmd_pkt.cdb[2] = parm;
4874 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4875 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4876
4877 ioadl->flags_and_data_len =
4878 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4879 ioadl->address = cpu_to_be32(dma_addr);
4880 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4881 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4882}
4883
4884/**
4885 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4886 * @ipr_cmd: ipr command struct
4887 *
4888 * This function send a Page 28 mode sense to the IOA to
4889 * retrieve SCSI bus attributes.
4890 *
4891 * Return value:
4892 * IPR_RC_JOB_RETURN
4893 **/
4894static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4895{
4896 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4897
4898 ENTER;
4899 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4900 0x28, ioa_cfg->vpd_cbs_dma +
4901 offsetof(struct ipr_misc_cbs, mode_pages),
4902 sizeof(struct ipr_mode_pages));
4903
4904 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4905
4906 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4907
4908 LEAVE;
4909 return IPR_RC_JOB_RETURN;
4910}
4911
4912/**
4913 * ipr_init_res_table - Initialize the resource table
4914 * @ipr_cmd: ipr command struct
4915 *
4916 * This function looks through the existing resource table, comparing
4917 * it with the config table. This function will take care of old/new
4918 * devices and schedule adding/removing them from the mid-layer
4919 * as appropriate.
4920 *
4921 * Return value:
4922 * IPR_RC_JOB_CONTINUE
4923 **/
4924static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4925{
4926 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4927 struct ipr_resource_entry *res, *temp;
4928 struct ipr_config_table_entry *cfgte;
4929 int found, i;
4930 LIST_HEAD(old_res);
4931
4932 ENTER;
4933 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4934 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4935
4936 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4937 list_move_tail(&res->queue, &old_res);
4938
4939 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4940 cfgte = &ioa_cfg->cfg_table->dev[i];
4941 found = 0;
4942
4943 list_for_each_entry_safe(res, temp, &old_res, queue) {
4944 if (!memcmp(&res->cfgte.res_addr,
4945 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4946 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4947 found = 1;
4948 break;
4949 }
4950 }
4951
4952 if (!found) {
4953 if (list_empty(&ioa_cfg->free_res_q)) {
4954 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4955 break;
4956 }
4957
4958 found = 1;
4959 res = list_entry(ioa_cfg->free_res_q.next,
4960 struct ipr_resource_entry, queue);
4961 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4962 ipr_init_res_entry(res);
4963 res->add_to_ml = 1;
4964 }
4965
4966 if (found)
4967 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4968 }
4969
4970 list_for_each_entry_safe(res, temp, &old_res, queue) {
4971 if (res->sdev) {
4972 res->del_from_ml = 1;
4973 res->sdev->hostdata = NULL;
4974 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4975 } else {
4976 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4977 }
4978 }
4979
4980 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4981
4982 LEAVE;
4983 return IPR_RC_JOB_CONTINUE;
4984}
4985
4986/**
4987 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4988 * @ipr_cmd: ipr command struct
4989 *
4990 * This function sends a Query IOA Configuration command
4991 * to the adapter to retrieve the IOA configuration table.
4992 *
4993 * Return value:
4994 * IPR_RC_JOB_RETURN
4995 **/
4996static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4997{
4998 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4999 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5000 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5001 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5002
5003 ENTER;
5004 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5005 ucode_vpd->major_release, ucode_vpd->card_type,
5006 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5007 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5008 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5009
5010 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5011 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5012 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5013
5014 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5015 ioarcb->read_data_transfer_length =
5016 cpu_to_be32(sizeof(struct ipr_config_table));
5017
5018 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5019 ioadl->flags_and_data_len =
5020 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5021
5022 ipr_cmd->job_step = ipr_init_res_table;
5023
5024 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5025
5026 LEAVE;
5027 return IPR_RC_JOB_RETURN;
5028}
5029
5030/**
5031 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5032 * @ipr_cmd: ipr command struct
5033 *
5034 * This utility function sends an inquiry to the adapter.
5035 *
5036 * Return value:
5037 * none
5038 **/
5039static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5040 u32 dma_addr, u8 xfer_len)
5041{
5042 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5043 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5044
5045 ENTER;
5046 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5047 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5048
5049 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5050 ioarcb->cmd_pkt.cdb[1] = flags;
5051 ioarcb->cmd_pkt.cdb[2] = page;
5052 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5053
5054 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5055 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5056
5057 ioadl->address = cpu_to_be32(dma_addr);
5058 ioadl->flags_and_data_len =
5059 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5060
5061 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5062 LEAVE;
5063}
5064
5065/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06005066 * ipr_inquiry_page_supported - Is the given inquiry page supported
5067 * @page0: inquiry page 0 buffer
5068 * @page: page code.
5069 *
5070 * This function determines if the specified inquiry page is supported.
5071 *
5072 * Return value:
5073 * 1 if page is supported / 0 if not
5074 **/
5075static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5076{
5077 int i;
5078
5079 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5080 if (page0->page[i] == page)
5081 return 1;
5082
5083 return 0;
5084}
5085
5086/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5088 * @ipr_cmd: ipr command struct
5089 *
5090 * This function sends a Page 3 inquiry to the adapter
5091 * to retrieve software VPD information.
5092 *
5093 * Return value:
5094 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5095 **/
5096static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5097{
5098 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06005099 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5100
5101 ENTER;
5102
5103 if (!ipr_inquiry_page_supported(page0, 1))
5104 ioa_cfg->cache_state = CACHE_NONE;
5105
5106 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5107
5108 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5109 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5110 sizeof(struct ipr_inquiry_page3));
5111
5112 LEAVE;
5113 return IPR_RC_JOB_RETURN;
5114}
5115
5116/**
5117 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5118 * @ipr_cmd: ipr command struct
5119 *
5120 * This function sends a Page 0 inquiry to the adapter
5121 * to retrieve supported inquiry pages.
5122 *
5123 * Return value:
5124 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5125 **/
5126static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5127{
5128 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005129 char type[5];
5130
5131 ENTER;
5132
5133 /* Grab the type out of the VPD and store it away */
5134 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5135 type[4] = '\0';
5136 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5137
brking@us.ibm.com62275042005-11-01 17:01:14 -06005138 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139
brking@us.ibm.com62275042005-11-01 17:01:14 -06005140 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5141 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5142 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143
5144 LEAVE;
5145 return IPR_RC_JOB_RETURN;
5146}
5147
5148/**
5149 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5150 * @ipr_cmd: ipr command struct
5151 *
5152 * This function sends a standard inquiry to the adapter.
5153 *
5154 * Return value:
5155 * IPR_RC_JOB_RETURN
5156 **/
5157static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5158{
5159 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5160
5161 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06005162 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163
5164 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5165 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5166 sizeof(struct ipr_ioa_vpd));
5167
5168 LEAVE;
5169 return IPR_RC_JOB_RETURN;
5170}
5171
5172/**
5173 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5174 * @ipr_cmd: ipr command struct
5175 *
5176 * This function send an Identify Host Request Response Queue
5177 * command to establish the HRRQ with the adapter.
5178 *
5179 * Return value:
5180 * IPR_RC_JOB_RETURN
5181 **/
5182static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5183{
5184 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5185 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5186
5187 ENTER;
5188 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5189
5190 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5191 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5192
5193 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5194 ioarcb->cmd_pkt.cdb[2] =
5195 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5196 ioarcb->cmd_pkt.cdb[3] =
5197 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5198 ioarcb->cmd_pkt.cdb[4] =
5199 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5200 ioarcb->cmd_pkt.cdb[5] =
5201 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
5202 ioarcb->cmd_pkt.cdb[7] =
5203 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5204 ioarcb->cmd_pkt.cdb[8] =
5205 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5206
5207 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5208
5209 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5210
5211 LEAVE;
5212 return IPR_RC_JOB_RETURN;
5213}
5214
5215/**
5216 * ipr_reset_timer_done - Adapter reset timer function
5217 * @ipr_cmd: ipr command struct
5218 *
5219 * Description: This function is used in adapter reset processing
5220 * for timing events. If the reset_cmd pointer in the IOA
5221 * config struct is not this adapter's we are doing nested
5222 * resets and fail_all_ops will take care of freeing the
5223 * command block.
5224 *
5225 * Return value:
5226 * none
5227 **/
5228static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5229{
5230 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5231 unsigned long lock_flags = 0;
5232
5233 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5234
5235 if (ioa_cfg->reset_cmd == ipr_cmd) {
5236 list_del(&ipr_cmd->queue);
5237 ipr_cmd->done(ipr_cmd);
5238 }
5239
5240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5241}
5242
5243/**
5244 * ipr_reset_start_timer - Start a timer for adapter reset job
5245 * @ipr_cmd: ipr command struct
5246 * @timeout: timeout value
5247 *
5248 * Description: This function is used in adapter reset processing
5249 * for timing events. If the reset_cmd pointer in the IOA
5250 * config struct is not this adapter's we are doing nested
5251 * resets and fail_all_ops will take care of freeing the
5252 * command block.
5253 *
5254 * Return value:
5255 * none
5256 **/
5257static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5258 unsigned long timeout)
5259{
5260 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5261 ipr_cmd->done = ipr_reset_ioa_job;
5262
5263 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5264 ipr_cmd->timer.expires = jiffies + timeout;
5265 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5266 add_timer(&ipr_cmd->timer);
5267}
5268
5269/**
5270 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5271 * @ioa_cfg: ioa cfg struct
5272 *
5273 * Return value:
5274 * nothing
5275 **/
5276static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5277{
5278 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5279
5280 /* Initialize Host RRQ pointers */
5281 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5282 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5283 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5284 ioa_cfg->toggle_bit = 1;
5285
5286 /* Zero out config table */
5287 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5288}
5289
5290/**
5291 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5292 * @ipr_cmd: ipr command struct
5293 *
5294 * This function reinitializes some control blocks and
5295 * enables destructive diagnostics on the adapter.
5296 *
5297 * Return value:
5298 * IPR_RC_JOB_RETURN
5299 **/
5300static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5301{
5302 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5303 volatile u32 int_reg;
5304
5305 ENTER;
5306 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5307 ipr_init_ioa_mem(ioa_cfg);
5308
5309 ioa_cfg->allow_interrupts = 1;
5310 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5311
5312 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5313 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5314 ioa_cfg->regs.clr_interrupt_mask_reg);
5315 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5316 return IPR_RC_JOB_CONTINUE;
5317 }
5318
5319 /* Enable destructive diagnostics on IOA */
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06005320 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005321
5322 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5323 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5324
5325 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5326
5327 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5328 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5329 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5330 ipr_cmd->done = ipr_reset_ioa_job;
5331 add_timer(&ipr_cmd->timer);
5332 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5333
5334 LEAVE;
5335 return IPR_RC_JOB_RETURN;
5336}
5337
5338/**
5339 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5340 * @ipr_cmd: ipr command struct
5341 *
5342 * This function is invoked when an adapter dump has run out
5343 * of processing time.
5344 *
5345 * Return value:
5346 * IPR_RC_JOB_CONTINUE
5347 **/
5348static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5349{
5350 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5351
5352 if (ioa_cfg->sdt_state == GET_DUMP)
5353 ioa_cfg->sdt_state = ABORT_DUMP;
5354
5355 ipr_cmd->job_step = ipr_reset_alert;
5356
5357 return IPR_RC_JOB_CONTINUE;
5358}
5359
5360/**
5361 * ipr_unit_check_no_data - Log a unit check/no data error log
5362 * @ioa_cfg: ioa config struct
5363 *
5364 * Logs an error indicating the adapter unit checked, but for some
5365 * reason, we were unable to fetch the unit check buffer.
5366 *
5367 * Return value:
5368 * nothing
5369 **/
5370static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5371{
5372 ioa_cfg->errors_logged++;
5373 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5374}
5375
5376/**
5377 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5378 * @ioa_cfg: ioa config struct
5379 *
5380 * Fetches the unit check buffer from the adapter by clocking the data
5381 * through the mailbox register.
5382 *
5383 * Return value:
5384 * nothing
5385 **/
5386static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5387{
5388 unsigned long mailbox;
5389 struct ipr_hostrcb *hostrcb;
5390 struct ipr_uc_sdt sdt;
5391 int rc, length;
5392
5393 mailbox = readl(ioa_cfg->ioa_mailbox);
5394
5395 if (!ipr_sdt_is_fmt2(mailbox)) {
5396 ipr_unit_check_no_data(ioa_cfg);
5397 return;
5398 }
5399
5400 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5401 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5402 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5403
5404 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5405 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5406 ipr_unit_check_no_data(ioa_cfg);
5407 return;
5408 }
5409
5410 /* Find length of the first sdt entry (UC buffer) */
5411 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5412 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5413
5414 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5415 struct ipr_hostrcb, queue);
5416 list_del(&hostrcb->queue);
5417 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5418
5419 rc = ipr_get_ldump_data_section(ioa_cfg,
5420 be32_to_cpu(sdt.entry[0].bar_str_offset),
5421 (__be32 *)&hostrcb->hcam,
5422 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5423
5424 if (!rc)
5425 ipr_handle_log_data(ioa_cfg, hostrcb);
5426 else
5427 ipr_unit_check_no_data(ioa_cfg);
5428
5429 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5430}
5431
5432/**
5433 * ipr_reset_restore_cfg_space - Restore PCI config space.
5434 * @ipr_cmd: ipr command struct
5435 *
5436 * Description: This function restores the saved PCI config space of
5437 * the adapter, fails all outstanding ops back to the callers, and
5438 * fetches the dump/unit check if applicable to this reset.
5439 *
5440 * Return value:
5441 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5442 **/
5443static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5444{
5445 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5446 int rc;
5447
5448 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07005449 pci_unblock_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005450 rc = pci_restore_state(ioa_cfg->pdev);
5451
5452 if (rc != PCIBIOS_SUCCESSFUL) {
5453 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5454 return IPR_RC_JOB_CONTINUE;
5455 }
5456
5457 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5458 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5459 return IPR_RC_JOB_CONTINUE;
5460 }
5461
5462 ipr_fail_all_ops(ioa_cfg);
5463
5464 if (ioa_cfg->ioa_unit_checked) {
5465 ioa_cfg->ioa_unit_checked = 0;
5466 ipr_get_unit_check_buffer(ioa_cfg);
5467 ipr_cmd->job_step = ipr_reset_alert;
5468 ipr_reset_start_timer(ipr_cmd, 0);
5469 return IPR_RC_JOB_RETURN;
5470 }
5471
5472 if (ioa_cfg->in_ioa_bringdown) {
5473 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5474 } else {
5475 ipr_cmd->job_step = ipr_reset_enable_ioa;
5476
5477 if (GET_DUMP == ioa_cfg->sdt_state) {
5478 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5479 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5480 schedule_work(&ioa_cfg->work_q);
5481 return IPR_RC_JOB_RETURN;
5482 }
5483 }
5484
5485 ENTER;
5486 return IPR_RC_JOB_CONTINUE;
5487}
5488
5489/**
5490 * ipr_reset_start_bist - Run BIST on the adapter.
5491 * @ipr_cmd: ipr command struct
5492 *
5493 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5494 *
5495 * Return value:
5496 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5497 **/
5498static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5499{
5500 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5501 int rc;
5502
5503 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07005504 pci_block_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005505 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5506
5507 if (rc != PCIBIOS_SUCCESSFUL) {
5508 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5509 rc = IPR_RC_JOB_CONTINUE;
5510 } else {
5511 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5512 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5513 rc = IPR_RC_JOB_RETURN;
5514 }
5515
5516 LEAVE;
5517 return rc;
5518}
5519
5520/**
5521 * ipr_reset_allowed - Query whether or not IOA can be reset
5522 * @ioa_cfg: ioa config struct
5523 *
5524 * Return value:
5525 * 0 if reset not allowed / non-zero if reset is allowed
5526 **/
5527static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5528{
5529 volatile u32 temp_reg;
5530
5531 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5532 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5533}
5534
5535/**
5536 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5537 * @ipr_cmd: ipr command struct
5538 *
5539 * Description: This function waits for adapter permission to run BIST,
5540 * then runs BIST. If the adapter does not give permission after a
5541 * reasonable time, we will reset the adapter anyway. The impact of
5542 * resetting the adapter without warning the adapter is the risk of
5543 * losing the persistent error log on the adapter. If the adapter is
5544 * reset while it is writing to the flash on the adapter, the flash
5545 * segment will have bad ECC and be zeroed.
5546 *
5547 * Return value:
5548 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5549 **/
5550static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5551{
5552 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5553 int rc = IPR_RC_JOB_RETURN;
5554
5555 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5556 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5557 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5558 } else {
5559 ipr_cmd->job_step = ipr_reset_start_bist;
5560 rc = IPR_RC_JOB_CONTINUE;
5561 }
5562
5563 return rc;
5564}
5565
5566/**
5567 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5568 * @ipr_cmd: ipr command struct
5569 *
5570 * Description: This function alerts the adapter that it will be reset.
5571 * If memory space is not currently enabled, proceed directly
5572 * to running BIST on the adapter. The timer must always be started
5573 * so we guarantee we do not run BIST from ipr_isr.
5574 *
5575 * Return value:
5576 * IPR_RC_JOB_RETURN
5577 **/
5578static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5579{
5580 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5581 u16 cmd_reg;
5582 int rc;
5583
5584 ENTER;
5585 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5586
5587 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5588 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5589 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5590 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5591 } else {
5592 ipr_cmd->job_step = ipr_reset_start_bist;
5593 }
5594
5595 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5596 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5597
5598 LEAVE;
5599 return IPR_RC_JOB_RETURN;
5600}
5601
5602/**
5603 * ipr_reset_ucode_download_done - Microcode download completion
5604 * @ipr_cmd: ipr command struct
5605 *
5606 * Description: This function unmaps the microcode download buffer.
5607 *
5608 * Return value:
5609 * IPR_RC_JOB_CONTINUE
5610 **/
5611static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5612{
5613 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5614 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5615
5616 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5617 sglist->num_sg, DMA_TO_DEVICE);
5618
5619 ipr_cmd->job_step = ipr_reset_alert;
5620 return IPR_RC_JOB_CONTINUE;
5621}
5622
5623/**
5624 * ipr_reset_ucode_download - Download microcode to the adapter
5625 * @ipr_cmd: ipr command struct
5626 *
5627 * Description: This function checks to see if it there is microcode
5628 * to download to the adapter. If there is, a download is performed.
5629 *
5630 * Return value:
5631 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5632 **/
5633static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5634{
5635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5636 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5637
5638 ENTER;
5639 ipr_cmd->job_step = ipr_reset_alert;
5640
5641 if (!sglist)
5642 return IPR_RC_JOB_CONTINUE;
5643
5644 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5645 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5646 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5647 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5648 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5649 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5650 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5651
brking@us.ibm.com12baa422005-11-01 17:01:27 -06005652 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005653 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5654
5655 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5656 IPR_WRITE_BUFFER_TIMEOUT);
5657
5658 LEAVE;
5659 return IPR_RC_JOB_RETURN;
5660}
5661
5662/**
5663 * ipr_reset_shutdown_ioa - Shutdown the adapter
5664 * @ipr_cmd: ipr command struct
5665 *
5666 * Description: This function issues an adapter shutdown of the
5667 * specified type to the specified adapter as part of the
5668 * adapter reset job.
5669 *
5670 * Return value:
5671 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5672 **/
5673static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5674{
5675 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5676 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5677 unsigned long timeout;
5678 int rc = IPR_RC_JOB_CONTINUE;
5679
5680 ENTER;
5681 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5682 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5683 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5684 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5685 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5686
5687 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5688 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5689 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5690 timeout = IPR_INTERNAL_TIMEOUT;
5691 else
5692 timeout = IPR_SHUTDOWN_TIMEOUT;
5693
5694 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5695
5696 rc = IPR_RC_JOB_RETURN;
5697 ipr_cmd->job_step = ipr_reset_ucode_download;
5698 } else
5699 ipr_cmd->job_step = ipr_reset_alert;
5700
5701 LEAVE;
5702 return rc;
5703}
5704
5705/**
5706 * ipr_reset_ioa_job - Adapter reset job
5707 * @ipr_cmd: ipr command struct
5708 *
5709 * Description: This function is the job router for the adapter reset job.
5710 *
5711 * Return value:
5712 * none
5713 **/
5714static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5715{
5716 u32 rc, ioasc;
5717 unsigned long scratch = ipr_cmd->u.scratch;
5718 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5719
5720 do {
5721 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5722
5723 if (ioa_cfg->reset_cmd != ipr_cmd) {
5724 /*
5725 * We are doing nested adapter resets and this is
5726 * not the current reset job.
5727 */
5728 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5729 return;
5730 }
5731
5732 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5733 dev_err(&ioa_cfg->pdev->dev,
5734 "0x%02X failed with IOASC: 0x%08X\n",
5735 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5736
5737 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5738 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5739 return;
5740 }
5741
5742 ipr_reinit_ipr_cmnd(ipr_cmd);
5743 ipr_cmd->u.scratch = scratch;
5744 rc = ipr_cmd->job_step(ipr_cmd);
5745 } while(rc == IPR_RC_JOB_CONTINUE);
5746}
5747
5748/**
5749 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5750 * @ioa_cfg: ioa config struct
5751 * @job_step: first job step of reset job
5752 * @shutdown_type: shutdown type
5753 *
5754 * Description: This function will initiate the reset of the given adapter
5755 * starting at the selected job step.
5756 * If the caller needs to wait on the completion of the reset,
5757 * the caller must sleep on the reset_wait_q.
5758 *
5759 * Return value:
5760 * none
5761 **/
5762static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5763 int (*job_step) (struct ipr_cmnd *),
5764 enum ipr_shutdown_type shutdown_type)
5765{
5766 struct ipr_cmnd *ipr_cmd;
5767
5768 ioa_cfg->in_reset_reload = 1;
5769 ioa_cfg->allow_cmds = 0;
5770 scsi_block_requests(ioa_cfg->host);
5771
5772 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5773 ioa_cfg->reset_cmd = ipr_cmd;
5774 ipr_cmd->job_step = job_step;
5775 ipr_cmd->u.shutdown_type = shutdown_type;
5776
5777 ipr_reset_ioa_job(ipr_cmd);
5778}
5779
5780/**
5781 * ipr_initiate_ioa_reset - Initiate an adapter reset
5782 * @ioa_cfg: ioa config struct
5783 * @shutdown_type: shutdown type
5784 *
5785 * Description: This function will initiate the reset of the given adapter.
5786 * If the caller needs to wait on the completion of the reset,
5787 * the caller must sleep on the reset_wait_q.
5788 *
5789 * Return value:
5790 * none
5791 **/
5792static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5793 enum ipr_shutdown_type shutdown_type)
5794{
5795 if (ioa_cfg->ioa_is_dead)
5796 return;
5797
5798 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5799 ioa_cfg->sdt_state = ABORT_DUMP;
5800
5801 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5802 dev_err(&ioa_cfg->pdev->dev,
5803 "IOA taken offline - error recovery failed\n");
5804
5805 ioa_cfg->reset_retries = 0;
5806 ioa_cfg->ioa_is_dead = 1;
5807
5808 if (ioa_cfg->in_ioa_bringdown) {
5809 ioa_cfg->reset_cmd = NULL;
5810 ioa_cfg->in_reset_reload = 0;
5811 ipr_fail_all_ops(ioa_cfg);
5812 wake_up_all(&ioa_cfg->reset_wait_q);
5813
5814 spin_unlock_irq(ioa_cfg->host->host_lock);
5815 scsi_unblock_requests(ioa_cfg->host);
5816 spin_lock_irq(ioa_cfg->host->host_lock);
5817 return;
5818 } else {
5819 ioa_cfg->in_ioa_bringdown = 1;
5820 shutdown_type = IPR_SHUTDOWN_NONE;
5821 }
5822 }
5823
5824 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5825 shutdown_type);
5826}
5827
5828/**
5829 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5830 * @ioa_cfg: ioa cfg struct
5831 *
5832 * Description: This is the second phase of adapter intialization
5833 * This function takes care of initilizing the adapter to the point
5834 * where it can accept new commands.
5835
5836 * Return value:
5837 * 0 on sucess / -EIO on failure
5838 **/
5839static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5840{
5841 int rc = 0;
5842 unsigned long host_lock_flags = 0;
5843
5844 ENTER;
5845 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5846 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5847 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5848
5849 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5850 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5851 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5852
5853 if (ioa_cfg->ioa_is_dead) {
5854 rc = -EIO;
5855 } else if (ipr_invalid_adapter(ioa_cfg)) {
5856 if (!ipr_testmode)
5857 rc = -EIO;
5858
5859 dev_err(&ioa_cfg->pdev->dev,
5860 "Adapter not supported in this hardware configuration.\n");
5861 }
5862
5863 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5864
5865 LEAVE;
5866 return rc;
5867}
5868
5869/**
5870 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5871 * @ioa_cfg: ioa config struct
5872 *
5873 * Return value:
5874 * none
5875 **/
5876static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5877{
5878 int i;
5879
5880 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5881 if (ioa_cfg->ipr_cmnd_list[i])
5882 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5883 ioa_cfg->ipr_cmnd_list[i],
5884 ioa_cfg->ipr_cmnd_list_dma[i]);
5885
5886 ioa_cfg->ipr_cmnd_list[i] = NULL;
5887 }
5888
5889 if (ioa_cfg->ipr_cmd_pool)
5890 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5891
5892 ioa_cfg->ipr_cmd_pool = NULL;
5893}
5894
5895/**
5896 * ipr_free_mem - Frees memory allocated for an adapter
5897 * @ioa_cfg: ioa cfg struct
5898 *
5899 * Return value:
5900 * nothing
5901 **/
5902static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5903{
5904 int i;
5905
5906 kfree(ioa_cfg->res_entries);
5907 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5908 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5909 ipr_free_cmd_blks(ioa_cfg);
5910 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5911 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5912 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5913 ioa_cfg->cfg_table,
5914 ioa_cfg->cfg_table_dma);
5915
5916 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5917 pci_free_consistent(ioa_cfg->pdev,
5918 sizeof(struct ipr_hostrcb),
5919 ioa_cfg->hostrcb[i],
5920 ioa_cfg->hostrcb_dma[i]);
5921 }
5922
5923 ipr_free_dump(ioa_cfg);
5924 kfree(ioa_cfg->saved_mode_pages);
5925 kfree(ioa_cfg->trace);
5926}
5927
5928/**
5929 * ipr_free_all_resources - Free all allocated resources for an adapter.
5930 * @ipr_cmd: ipr command struct
5931 *
5932 * This function frees all allocated resources for the
5933 * specified adapter.
5934 *
5935 * Return value:
5936 * none
5937 **/
5938static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5939{
5940 struct pci_dev *pdev = ioa_cfg->pdev;
5941
5942 ENTER;
5943 free_irq(pdev->irq, ioa_cfg);
5944 iounmap(ioa_cfg->hdw_dma_regs);
5945 pci_release_regions(pdev);
5946 ipr_free_mem(ioa_cfg);
5947 scsi_host_put(ioa_cfg->host);
5948 pci_disable_device(pdev);
5949 LEAVE;
5950}
5951
5952/**
5953 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5954 * @ioa_cfg: ioa config struct
5955 *
5956 * Return value:
5957 * 0 on success / -ENOMEM on allocation failure
5958 **/
5959static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5960{
5961 struct ipr_cmnd *ipr_cmd;
5962 struct ipr_ioarcb *ioarcb;
5963 dma_addr_t dma_addr;
5964 int i;
5965
5966 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5967 sizeof(struct ipr_cmnd), 8, 0);
5968
5969 if (!ioa_cfg->ipr_cmd_pool)
5970 return -ENOMEM;
5971
5972 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5973 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5974
5975 if (!ipr_cmd) {
5976 ipr_free_cmd_blks(ioa_cfg);
5977 return -ENOMEM;
5978 }
5979
5980 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5981 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5982 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5983
5984 ioarcb = &ipr_cmd->ioarcb;
5985 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5986 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5987 ioarcb->write_ioadl_addr =
5988 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5989 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5990 ioarcb->ioasa_host_pci_addr =
5991 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5992 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5993 ipr_cmd->cmd_index = i;
5994 ipr_cmd->ioa_cfg = ioa_cfg;
5995 ipr_cmd->sense_buffer_dma = dma_addr +
5996 offsetof(struct ipr_cmnd, sense_buffer);
5997
5998 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5999 }
6000
6001 return 0;
6002}
6003
6004/**
6005 * ipr_alloc_mem - Allocate memory for an adapter
6006 * @ioa_cfg: ioa config struct
6007 *
6008 * Return value:
6009 * 0 on success / non-zero for error
6010 **/
6011static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6012{
6013 struct pci_dev *pdev = ioa_cfg->pdev;
6014 int i, rc = -ENOMEM;
6015
6016 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06006017 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006018 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6019
6020 if (!ioa_cfg->res_entries)
6021 goto out;
6022
Linus Torvalds1da177e2005-04-16 15:20:36 -07006023 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6024 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6025
6026 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6027 sizeof(struct ipr_misc_cbs),
6028 &ioa_cfg->vpd_cbs_dma);
6029
6030 if (!ioa_cfg->vpd_cbs)
6031 goto out_free_res_entries;
6032
6033 if (ipr_alloc_cmd_blks(ioa_cfg))
6034 goto out_free_vpd_cbs;
6035
6036 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6037 sizeof(u32) * IPR_NUM_CMD_BLKS,
6038 &ioa_cfg->host_rrq_dma);
6039
6040 if (!ioa_cfg->host_rrq)
6041 goto out_ipr_free_cmd_blocks;
6042
6043 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6044 sizeof(struct ipr_config_table),
6045 &ioa_cfg->cfg_table_dma);
6046
6047 if (!ioa_cfg->cfg_table)
6048 goto out_free_host_rrq;
6049
6050 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6051 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6052 sizeof(struct ipr_hostrcb),
6053 &ioa_cfg->hostrcb_dma[i]);
6054
6055 if (!ioa_cfg->hostrcb[i])
6056 goto out_free_hostrcb_dma;
6057
6058 ioa_cfg->hostrcb[i]->hostrcb_dma =
6059 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6060 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6061 }
6062
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06006063 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006064 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6065
6066 if (!ioa_cfg->trace)
6067 goto out_free_hostrcb_dma;
6068
Linus Torvalds1da177e2005-04-16 15:20:36 -07006069 rc = 0;
6070out:
6071 LEAVE;
6072 return rc;
6073
6074out_free_hostrcb_dma:
6075 while (i-- > 0) {
6076 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6077 ioa_cfg->hostrcb[i],
6078 ioa_cfg->hostrcb_dma[i]);
6079 }
6080 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6081 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6082out_free_host_rrq:
6083 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6084 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6085out_ipr_free_cmd_blocks:
6086 ipr_free_cmd_blks(ioa_cfg);
6087out_free_vpd_cbs:
6088 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6089 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6090out_free_res_entries:
6091 kfree(ioa_cfg->res_entries);
6092 goto out;
6093}
6094
6095/**
6096 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6097 * @ioa_cfg: ioa config struct
6098 *
6099 * Return value:
6100 * none
6101 **/
6102static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6103{
6104 int i;
6105
6106 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6107 ioa_cfg->bus_attr[i].bus = i;
6108 ioa_cfg->bus_attr[i].qas_enabled = 0;
6109 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6110 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6111 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6112 else
6113 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6114 }
6115}
6116
6117/**
6118 * ipr_init_ioa_cfg - Initialize IOA config struct
6119 * @ioa_cfg: ioa config struct
6120 * @host: scsi host struct
6121 * @pdev: PCI dev struct
6122 *
6123 * Return value:
6124 * none
6125 **/
6126static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6127 struct Scsi_Host *host, struct pci_dev *pdev)
6128{
6129 const struct ipr_interrupt_offsets *p;
6130 struct ipr_interrupts *t;
6131 void __iomem *base;
6132
6133 ioa_cfg->host = host;
6134 ioa_cfg->pdev = pdev;
6135 ioa_cfg->log_level = ipr_log_level;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06006136 ioa_cfg->doorbell = IPR_DOORBELL;
brking@us.ibm.com32d29772005-11-01 17:02:01 -06006137 if (!ipr_auto_create)
6138 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006139 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6140 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6141 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6142 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6143 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6144 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6145 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6146 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6147
6148 INIT_LIST_HEAD(&ioa_cfg->free_q);
6149 INIT_LIST_HEAD(&ioa_cfg->pending_q);
6150 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6151 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6152 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6153 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6154 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6155 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6156 ioa_cfg->sdt_state = INACTIVE;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006157 if (ipr_enable_cache)
6158 ioa_cfg->cache_state = CACHE_ENABLED;
6159 else
6160 ioa_cfg->cache_state = CACHE_DISABLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006161
6162 ipr_initialize_bus_attr(ioa_cfg);
6163
6164 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6165 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6166 host->max_channel = IPR_MAX_BUS_TO_SCAN;
6167 host->unique_id = host->host_no;
6168 host->max_cmd_len = IPR_MAX_CDB_LEN;
6169 pci_set_drvdata(pdev, ioa_cfg);
6170
6171 p = &ioa_cfg->chip_cfg->regs;
6172 t = &ioa_cfg->regs;
6173 base = ioa_cfg->hdw_dma_regs;
6174
6175 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6176 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6177 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6178 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6179 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6180 t->ioarrin_reg = base + p->ioarrin_reg;
6181 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6182 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6183 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6184}
6185
6186/**
6187 * ipr_get_chip_cfg - Find adapter chip configuration
6188 * @dev_id: PCI device id struct
6189 *
6190 * Return value:
6191 * ptr to chip config on success / NULL on failure
6192 **/
6193static const struct ipr_chip_cfg_t * __devinit
6194ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6195{
6196 int i;
6197
6198 if (dev_id->driver_data)
6199 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6200
6201 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6202 if (ipr_chip[i].vendor == dev_id->vendor &&
6203 ipr_chip[i].device == dev_id->device)
6204 return ipr_chip[i].cfg;
6205 return NULL;
6206}
6207
6208/**
6209 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6210 * @pdev: PCI device struct
6211 * @dev_id: PCI device id struct
6212 *
6213 * Return value:
6214 * 0 on success / non-zero on failure
6215 **/
6216static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6217 const struct pci_device_id *dev_id)
6218{
6219 struct ipr_ioa_cfg *ioa_cfg;
6220 struct Scsi_Host *host;
6221 unsigned long ipr_regs_pci;
6222 void __iomem *ipr_regs;
6223 u32 rc = PCIBIOS_SUCCESSFUL;
6224
6225 ENTER;
6226
6227 if ((rc = pci_enable_device(pdev))) {
6228 dev_err(&pdev->dev, "Cannot enable adapter\n");
6229 goto out;
6230 }
6231
6232 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6233
6234 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6235
6236 if (!host) {
6237 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6238 rc = -ENOMEM;
6239 goto out_disable;
6240 }
6241
6242 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6243 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6244
6245 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6246
6247 if (!ioa_cfg->chip_cfg) {
6248 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6249 dev_id->vendor, dev_id->device);
6250 goto out_scsi_host_put;
6251 }
6252
6253 ipr_regs_pci = pci_resource_start(pdev, 0);
6254
6255 rc = pci_request_regions(pdev, IPR_NAME);
6256 if (rc < 0) {
6257 dev_err(&pdev->dev,
6258 "Couldn't register memory range of registers\n");
6259 goto out_scsi_host_put;
6260 }
6261
6262 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6263
6264 if (!ipr_regs) {
6265 dev_err(&pdev->dev,
6266 "Couldn't map memory range of registers\n");
6267 rc = -ENOMEM;
6268 goto out_release_regions;
6269 }
6270
6271 ioa_cfg->hdw_dma_regs = ipr_regs;
6272 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6273 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6274
6275 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6276
6277 pci_set_master(pdev);
6278
6279 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6280 if (rc < 0) {
6281 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6282 goto cleanup_nomem;
6283 }
6284
6285 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6286 ioa_cfg->chip_cfg->cache_line_size);
6287
6288 if (rc != PCIBIOS_SUCCESSFUL) {
6289 dev_err(&pdev->dev, "Write of cache line size failed\n");
6290 rc = -EIO;
6291 goto cleanup_nomem;
6292 }
6293
6294 /* Save away PCI config space for use following IOA reset */
6295 rc = pci_save_state(pdev);
6296
6297 if (rc != PCIBIOS_SUCCESSFUL) {
6298 dev_err(&pdev->dev, "Failed to save PCI config space\n");
6299 rc = -EIO;
6300 goto cleanup_nomem;
6301 }
6302
6303 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6304 goto cleanup_nomem;
6305
6306 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6307 goto cleanup_nomem;
6308
6309 rc = ipr_alloc_mem(ioa_cfg);
6310 if (rc < 0) {
6311 dev_err(&pdev->dev,
6312 "Couldn't allocate enough memory for device driver!\n");
6313 goto cleanup_nomem;
6314 }
6315
6316 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6317 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6318
6319 if (rc) {
6320 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6321 pdev->irq, rc);
6322 goto cleanup_nolog;
6323 }
6324
6325 spin_lock(&ipr_driver_lock);
6326 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6327 spin_unlock(&ipr_driver_lock);
6328
6329 LEAVE;
6330out:
6331 return rc;
6332
6333cleanup_nolog:
6334 ipr_free_mem(ioa_cfg);
6335cleanup_nomem:
6336 iounmap(ipr_regs);
6337out_release_regions:
6338 pci_release_regions(pdev);
6339out_scsi_host_put:
6340 scsi_host_put(host);
6341out_disable:
6342 pci_disable_device(pdev);
6343 goto out;
6344}
6345
6346/**
6347 * ipr_scan_vsets - Scans for VSET devices
6348 * @ioa_cfg: ioa config struct
6349 *
6350 * Description: Since the VSET resources do not follow SAM in that we can have
6351 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6352 *
6353 * Return value:
6354 * none
6355 **/
6356static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6357{
6358 int target, lun;
6359
6360 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6361 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6362 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6363}
6364
6365/**
6366 * ipr_initiate_ioa_bringdown - Bring down an adapter
6367 * @ioa_cfg: ioa config struct
6368 * @shutdown_type: shutdown type
6369 *
6370 * Description: This function will initiate bringing down the adapter.
6371 * This consists of issuing an IOA shutdown to the adapter
6372 * to flush the cache, and running BIST.
6373 * If the caller needs to wait on the completion of the reset,
6374 * the caller must sleep on the reset_wait_q.
6375 *
6376 * Return value:
6377 * none
6378 **/
6379static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6380 enum ipr_shutdown_type shutdown_type)
6381{
6382 ENTER;
6383 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6384 ioa_cfg->sdt_state = ABORT_DUMP;
6385 ioa_cfg->reset_retries = 0;
6386 ioa_cfg->in_ioa_bringdown = 1;
6387 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6388 LEAVE;
6389}
6390
6391/**
6392 * __ipr_remove - Remove a single adapter
6393 * @pdev: pci device struct
6394 *
6395 * Adapter hot plug remove entry point.
6396 *
6397 * Return value:
6398 * none
6399 **/
6400static void __ipr_remove(struct pci_dev *pdev)
6401{
6402 unsigned long host_lock_flags = 0;
6403 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6404 ENTER;
6405
6406 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6407 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6408
6409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6410 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
brking@us.ibm.com 5cbf5ea2005-05-02 19:50:47 -05006411 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006412 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6413
6414 spin_lock(&ipr_driver_lock);
6415 list_del(&ioa_cfg->queue);
6416 spin_unlock(&ipr_driver_lock);
6417
6418 if (ioa_cfg->sdt_state == ABORT_DUMP)
6419 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6420 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6421
6422 ipr_free_all_resources(ioa_cfg);
6423
6424 LEAVE;
6425}
6426
6427/**
6428 * ipr_remove - IOA hot plug remove entry point
6429 * @pdev: pci device struct
6430 *
6431 * Adapter hot plug remove entry point.
6432 *
6433 * Return value:
6434 * none
6435 **/
6436static void ipr_remove(struct pci_dev *pdev)
6437{
6438 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6439
6440 ENTER;
6441
Linus Torvalds1da177e2005-04-16 15:20:36 -07006442 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6443 &ipr_trace_attr);
6444 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6445 &ipr_dump_attr);
6446 scsi_remove_host(ioa_cfg->host);
6447
6448 __ipr_remove(pdev);
6449
6450 LEAVE;
6451}
6452
6453/**
6454 * ipr_probe - Adapter hot plug add entry point
6455 *
6456 * Return value:
6457 * 0 on success / non-zero on failure
6458 **/
6459static int __devinit ipr_probe(struct pci_dev *pdev,
6460 const struct pci_device_id *dev_id)
6461{
6462 struct ipr_ioa_cfg *ioa_cfg;
6463 int rc;
6464
6465 rc = ipr_probe_ioa(pdev, dev_id);
6466
6467 if (rc)
6468 return rc;
6469
6470 ioa_cfg = pci_get_drvdata(pdev);
6471 rc = ipr_probe_ioa_part2(ioa_cfg);
6472
6473 if (rc) {
6474 __ipr_remove(pdev);
6475 return rc;
6476 }
6477
6478 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6479
6480 if (rc) {
6481 __ipr_remove(pdev);
6482 return rc;
6483 }
6484
6485 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6486 &ipr_trace_attr);
6487
6488 if (rc) {
6489 scsi_remove_host(ioa_cfg->host);
6490 __ipr_remove(pdev);
6491 return rc;
6492 }
6493
6494 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6495 &ipr_dump_attr);
6496
6497 if (rc) {
6498 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6499 &ipr_trace_attr);
6500 scsi_remove_host(ioa_cfg->host);
6501 __ipr_remove(pdev);
6502 return rc;
6503 }
6504
6505 scsi_scan_host(ioa_cfg->host);
6506 ipr_scan_vsets(ioa_cfg);
6507 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6508 ioa_cfg->allow_ml_add_del = 1;
brking@us.ibm.com11cd8f12005-11-01 17:00:11 -06006509 ioa_cfg->host->max_channel = IPR_VSET_BUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006510 schedule_work(&ioa_cfg->work_q);
6511 return 0;
6512}
6513
6514/**
6515 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006516 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006517 *
6518 * This function is invoked upon system shutdown/reboot. It will issue
6519 * an adapter shutdown to the adapter to flush the write cache.
6520 *
6521 * Return value:
6522 * none
6523 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006524static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006525{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006526 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006527 unsigned long lock_flags = 0;
6528
6529 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6530 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6531 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6532 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6533}
6534
6535static struct pci_device_id ipr_pci_table[] __devinitdata = {
6536 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6537 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6538 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6539 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6540 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6541 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6542 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6543 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6544 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6545 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6546 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6547 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6548 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6549 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6550 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6551 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6552 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6553 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6554 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6555 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6556 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6557 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6558 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6559 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6560 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6561 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6562 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6563 { }
6564};
6565MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6566
6567static struct pci_driver ipr_driver = {
6568 .name = IPR_NAME,
6569 .id_table = ipr_pci_table,
6570 .probe = ipr_probe,
6571 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006572 .shutdown = ipr_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006573};
6574
6575/**
6576 * ipr_init - Module entry point
6577 *
6578 * Return value:
6579 * 0 on success / negative value on failure
6580 **/
6581static int __init ipr_init(void)
6582{
6583 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6584 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6585
6586 return pci_module_init(&ipr_driver);
6587}
6588
6589/**
6590 * ipr_exit - Module unload
6591 *
6592 * Module unload entry point.
6593 *
6594 * Return value:
6595 * none
6596 **/
6597static void __exit ipr_exit(void)
6598{
6599 pci_unregister_driver(&ipr_driver);
6600}
6601
6602module_init(ipr_init);
6603module_exit(ipr_exit);