blob: 1d440f2763ca5aaed328fcd418cc43a7e1392909 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57#include <linux/config.h>
58#include <linux/fs.h>
59#include <linux/init.h>
60#include <linux/types.h>
61#include <linux/errno.h>
62#include <linux/kernel.h>
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_request.h>
83#include "ipr.h"
84
85/*
86 * Global Data
87 */
88static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
brking@us.ibm.com62275042005-11-01 17:01:14 -060094static unsigned int ipr_enable_cache = 1;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060095static unsigned int ipr_debug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096static DEFINE_SPINLOCK(ipr_driver_lock);
97
98/* This table describes the differences between DMA controller chips */
99static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
100 { /* Gemstone and Citrine */
101 .mailbox = 0x0042C,
102 .cache_line_size = 0x20,
103 {
104 .set_interrupt_mask_reg = 0x0022C,
105 .clr_interrupt_mask_reg = 0x00230,
106 .sense_interrupt_mask_reg = 0x0022C,
107 .clr_interrupt_reg = 0x00228,
108 .sense_interrupt_reg = 0x00224,
109 .ioarrin_reg = 0x00404,
110 .sense_uproc_interrupt_reg = 0x00214,
111 .set_uproc_interrupt_reg = 0x00214,
112 .clr_uproc_interrupt_reg = 0x00218
113 }
114 },
115 { /* Snipe and Scamp */
116 .mailbox = 0x0052C,
117 .cache_line_size = 0x20,
118 {
119 .set_interrupt_mask_reg = 0x00288,
120 .clr_interrupt_mask_reg = 0x0028C,
121 .sense_interrupt_mask_reg = 0x00288,
122 .clr_interrupt_reg = 0x00284,
123 .sense_interrupt_reg = 0x00280,
124 .ioarrin_reg = 0x00504,
125 .sense_uproc_interrupt_reg = 0x00290,
126 .set_uproc_interrupt_reg = 0x00290,
127 .clr_uproc_interrupt_reg = 0x00294
128 }
129 },
130};
131
132static const struct ipr_chip_t ipr_chip[] = {
133 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
136 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
137};
138
139static int ipr_max_bus_speeds [] = {
140 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
141};
142
143MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
144MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
145module_param_named(max_speed, ipr_max_speed, uint, 0);
146MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
147module_param_named(log_level, ipr_log_level, uint, 0);
148MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
149module_param_named(testmode, ipr_testmode, int, 0);
150MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
151module_param_named(fastfail, ipr_fastfail, int, 0);
152MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
153module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
154MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
brking@us.ibm.com62275042005-11-01 17:01:14 -0600155module_param_named(enable_cache, ipr_enable_cache, int, 0);
156MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600157module_param_named(debug, ipr_debug, int, 0);
158MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159MODULE_LICENSE("GPL");
160MODULE_VERSION(IPR_DRIVER_VERSION);
161
162static const char *ipr_gpdd_dev_end_states[] = {
163 "Command complete",
164 "Terminated by host",
165 "Terminated by device reset",
166 "Terminated by bus reset",
167 "Unknown",
168 "Command not started"
169};
170
171static const char *ipr_gpdd_dev_bus_phases[] = {
172 "Bus free",
173 "Arbitration",
174 "Selection",
175 "Message out",
176 "Command",
177 "Message in",
178 "Data out",
179 "Data in",
180 "Status",
181 "Reselection",
182 "Unknown"
183};
184
185/* A constant array of IOASCs/URCs/Error Messages */
186static const
187struct ipr_error_table_t ipr_error_table[] = {
188 {0x00000000, 1, 1,
189 "8155: An unknown error was received"},
190 {0x00330000, 0, 0,
191 "Soft underlength error"},
192 {0x005A0000, 0, 0,
193 "Command to be cancelled not found"},
194 {0x00808000, 0, 0,
195 "Qualified success"},
196 {0x01080000, 1, 1,
197 "FFFE: Soft device bus error recovered by the IOA"},
198 {0x01170600, 0, 1,
199 "FFF9: Device sector reassign successful"},
200 {0x01170900, 0, 1,
201 "FFF7: Media error recovered by device rewrite procedures"},
202 {0x01180200, 0, 1,
203 "7001: IOA sector reassignment successful"},
204 {0x01180500, 0, 1,
205 "FFF9: Soft media error. Sector reassignment recommended"},
206 {0x01180600, 0, 1,
207 "FFF7: Media error recovered by IOA rewrite procedures"},
208 {0x01418000, 0, 1,
209 "FF3D: Soft PCI bus error recovered by the IOA"},
210 {0x01440000, 1, 1,
211 "FFF6: Device hardware error recovered by the IOA"},
212 {0x01448100, 0, 1,
213 "FFF6: Device hardware error recovered by the device"},
214 {0x01448200, 1, 1,
215 "FF3D: Soft IOA error recovered by the IOA"},
216 {0x01448300, 0, 1,
217 "FFFA: Undefined device response recovered by the IOA"},
218 {0x014A0000, 1, 1,
219 "FFF6: Device bus error, message or command phase"},
220 {0x015D0000, 0, 1,
221 "FFF6: Failure prediction threshold exceeded"},
222 {0x015D9200, 0, 1,
223 "8009: Impending cache battery pack failure"},
224 {0x02040400, 0, 0,
225 "34FF: Disk device format in progress"},
226 {0x023F0000, 0, 0,
227 "Synchronization required"},
228 {0x024E0000, 0, 0,
229 "No ready, IOA shutdown"},
230 {0x025A0000, 0, 0,
231 "Not ready, IOA has been shutdown"},
232 {0x02670100, 0, 1,
233 "3020: Storage subsystem configuration error"},
234 {0x03110B00, 0, 0,
235 "FFF5: Medium error, data unreadable, recommend reassign"},
236 {0x03110C00, 0, 0,
237 "7000: Medium error, data unreadable, do not reassign"},
238 {0x03310000, 0, 1,
239 "FFF3: Disk media format bad"},
240 {0x04050000, 0, 1,
241 "3002: Addressed device failed to respond to selection"},
242 {0x04080000, 1, 1,
243 "3100: Device bus error"},
244 {0x04080100, 0, 1,
245 "3109: IOA timed out a device command"},
246 {0x04088000, 0, 0,
247 "3120: SCSI bus is not operational"},
248 {0x04118000, 0, 1,
249 "9000: IOA reserved area data check"},
250 {0x04118100, 0, 1,
251 "9001: IOA reserved area invalid data pattern"},
252 {0x04118200, 0, 1,
253 "9002: IOA reserved area LRC error"},
254 {0x04320000, 0, 1,
255 "102E: Out of alternate sectors for disk storage"},
256 {0x04330000, 1, 1,
257 "FFF4: Data transfer underlength error"},
258 {0x04338000, 1, 1,
259 "FFF4: Data transfer overlength error"},
260 {0x043E0100, 0, 1,
261 "3400: Logical unit failure"},
262 {0x04408500, 0, 1,
263 "FFF4: Device microcode is corrupt"},
264 {0x04418000, 1, 1,
265 "8150: PCI bus error"},
266 {0x04430000, 1, 0,
267 "Unsupported device bus message received"},
268 {0x04440000, 1, 1,
269 "FFF4: Disk device problem"},
270 {0x04448200, 1, 1,
271 "8150: Permanent IOA failure"},
272 {0x04448300, 0, 1,
273 "3010: Disk device returned wrong response to IOA"},
274 {0x04448400, 0, 1,
275 "8151: IOA microcode error"},
276 {0x04448500, 0, 0,
277 "Device bus status error"},
278 {0x04448600, 0, 1,
279 "8157: IOA error requiring IOA reset to recover"},
280 {0x04490000, 0, 0,
281 "Message reject received from the device"},
282 {0x04449200, 0, 1,
283 "8008: A permanent cache battery pack failure occurred"},
284 {0x0444A000, 0, 1,
285 "9090: Disk unit has been modified after the last known status"},
286 {0x0444A200, 0, 1,
287 "9081: IOA detected device error"},
288 {0x0444A300, 0, 1,
289 "9082: IOA detected device error"},
290 {0x044A0000, 1, 1,
291 "3110: Device bus error, message or command phase"},
292 {0x04670400, 0, 1,
293 "9091: Incorrect hardware configuration change has been detected"},
294 {0x046E0000, 0, 1,
295 "FFF4: Command to logical unit failed"},
296 {0x05240000, 1, 0,
297 "Illegal request, invalid request type or request packet"},
298 {0x05250000, 0, 0,
299 "Illegal request, invalid resource handle"},
300 {0x05260000, 0, 0,
301 "Illegal request, invalid field in parameter list"},
302 {0x05260100, 0, 0,
303 "Illegal request, parameter not supported"},
304 {0x05260200, 0, 0,
305 "Illegal request, parameter value invalid"},
306 {0x052C0000, 0, 0,
307 "Illegal request, command sequence error"},
308 {0x06040500, 0, 1,
309 "9031: Array protection temporarily suspended, protection resuming"},
310 {0x06040600, 0, 1,
311 "9040: Array protection temporarily suspended, protection resuming"},
312 {0x06290000, 0, 1,
313 "FFFB: SCSI bus was reset"},
314 {0x06290500, 0, 0,
315 "FFFE: SCSI bus transition to single ended"},
316 {0x06290600, 0, 0,
317 "FFFE: SCSI bus transition to LVD"},
318 {0x06298000, 0, 1,
319 "FFFB: SCSI bus was reset by another initiator"},
320 {0x063F0300, 0, 1,
321 "3029: A device replacement has occurred"},
322 {0x064C8000, 0, 1,
323 "9051: IOA cache data exists for a missing or failed device"},
324 {0x06670100, 0, 1,
325 "9025: Disk unit is not supported at its physical location"},
326 {0x06670600, 0, 1,
327 "3020: IOA detected a SCSI bus configuration error"},
328 {0x06678000, 0, 1,
329 "3150: SCSI bus configuration error"},
330 {0x06690200, 0, 1,
331 "9041: Array protection temporarily suspended"},
332 {0x06698200, 0, 1,
333 "9042: Corrupt array parity detected on specified device"},
334 {0x066B0200, 0, 1,
335 "9030: Array no longer protected due to missing or failed disk unit"},
336 {0x066B8200, 0, 1,
337 "9032: Array exposed but still protected"},
338 {0x07270000, 0, 0,
339 "Failure due to other device"},
340 {0x07278000, 0, 1,
341 "9008: IOA does not support functions expected by devices"},
342 {0x07278100, 0, 1,
343 "9010: Cache data associated with attached devices cannot be found"},
344 {0x07278200, 0, 1,
345 "9011: Cache data belongs to devices other than those attached"},
346 {0x07278400, 0, 1,
347 "9020: Array missing 2 or more devices with only 1 device present"},
348 {0x07278500, 0, 1,
349 "9021: Array missing 2 or more devices with 2 or more devices present"},
350 {0x07278600, 0, 1,
351 "9022: Exposed array is missing a required device"},
352 {0x07278700, 0, 1,
353 "9023: Array member(s) not at required physical locations"},
354 {0x07278800, 0, 1,
355 "9024: Array not functional due to present hardware configuration"},
356 {0x07278900, 0, 1,
357 "9026: Array not functional due to present hardware configuration"},
358 {0x07278A00, 0, 1,
359 "9027: Array is missing a device and parity is out of sync"},
360 {0x07278B00, 0, 1,
361 "9028: Maximum number of arrays already exist"},
362 {0x07278C00, 0, 1,
363 "9050: Required cache data cannot be located for a disk unit"},
364 {0x07278D00, 0, 1,
365 "9052: Cache data exists for a device that has been modified"},
366 {0x07278F00, 0, 1,
367 "9054: IOA resources not available due to previous problems"},
368 {0x07279100, 0, 1,
369 "9092: Disk unit requires initialization before use"},
370 {0x07279200, 0, 1,
371 "9029: Incorrect hardware configuration change has been detected"},
372 {0x07279600, 0, 1,
373 "9060: One or more disk pairs are missing from an array"},
374 {0x07279700, 0, 1,
375 "9061: One or more disks are missing from an array"},
376 {0x07279800, 0, 1,
377 "9062: One or more disks are missing from an array"},
378 {0x07279900, 0, 1,
379 "9063: Maximum number of functional arrays has been exceeded"},
380 {0x0B260000, 0, 0,
381 "Aborted command, invalid descriptor"},
382 {0x0B5A0000, 0, 0,
383 "Command terminated by host"}
384};
385
386static const struct ipr_ses_table_entry ipr_ses_table[] = {
387 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
388 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
389 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
390 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
391 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
392 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
393 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
394 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
395 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
396 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
397 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
398 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
399 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
400};
401
402/*
403 * Function Prototypes
404 */
405static int ipr_reset_alert(struct ipr_cmnd *);
406static void ipr_process_ccn(struct ipr_cmnd *);
407static void ipr_process_error(struct ipr_cmnd *);
408static void ipr_reset_ioa_job(struct ipr_cmnd *);
409static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
410 enum ipr_shutdown_type);
411
412#ifdef CONFIG_SCSI_IPR_TRACE
413/**
414 * ipr_trc_hook - Add a trace entry to the driver trace
415 * @ipr_cmd: ipr command struct
416 * @type: trace type
417 * @add_data: additional data
418 *
419 * Return value:
420 * none
421 **/
422static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
423 u8 type, u32 add_data)
424{
425 struct ipr_trace_entry *trace_entry;
426 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
427
428 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
429 trace_entry->time = jiffies;
430 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
431 trace_entry->type = type;
432 trace_entry->cmd_index = ipr_cmd->cmd_index;
433 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
434 trace_entry->u.add_data = add_data;
435}
436#else
437#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
438#endif
439
440/**
441 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
442 * @ipr_cmd: ipr command struct
443 *
444 * Return value:
445 * none
446 **/
447static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
448{
449 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
450 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
451
452 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
453 ioarcb->write_data_transfer_length = 0;
454 ioarcb->read_data_transfer_length = 0;
455 ioarcb->write_ioadl_len = 0;
456 ioarcb->read_ioadl_len = 0;
457 ioasa->ioasc = 0;
458 ioasa->residual_data_len = 0;
459
460 ipr_cmd->scsi_cmd = NULL;
461 ipr_cmd->sense_buffer[0] = 0;
462 ipr_cmd->dma_use_sg = 0;
463}
464
465/**
466 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
467 * @ipr_cmd: ipr command struct
468 *
469 * Return value:
470 * none
471 **/
472static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
473{
474 ipr_reinit_ipr_cmnd(ipr_cmd);
475 ipr_cmd->u.scratch = 0;
476 ipr_cmd->sibling = NULL;
477 init_timer(&ipr_cmd->timer);
478}
479
480/**
481 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
482 * @ioa_cfg: ioa config struct
483 *
484 * Return value:
485 * pointer to ipr command struct
486 **/
487static
488struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
489{
490 struct ipr_cmnd *ipr_cmd;
491
492 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
493 list_del(&ipr_cmd->queue);
494 ipr_init_ipr_cmnd(ipr_cmd);
495
496 return ipr_cmd;
497}
498
499/**
500 * ipr_unmap_sglist - Unmap scatterlist if mapped
501 * @ioa_cfg: ioa config struct
502 * @ipr_cmd: ipr command struct
503 *
504 * Return value:
505 * nothing
506 **/
507static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
508 struct ipr_cmnd *ipr_cmd)
509{
510 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
511
512 if (ipr_cmd->dma_use_sg) {
513 if (scsi_cmd->use_sg > 0) {
514 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
515 scsi_cmd->use_sg,
516 scsi_cmd->sc_data_direction);
517 } else {
518 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
519 scsi_cmd->request_bufflen,
520 scsi_cmd->sc_data_direction);
521 }
522 }
523}
524
525/**
526 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
527 * @ioa_cfg: ioa config struct
528 * @clr_ints: interrupts to clear
529 *
530 * This function masks all interrupts on the adapter, then clears the
531 * interrupts specified in the mask
532 *
533 * Return value:
534 * none
535 **/
536static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
537 u32 clr_ints)
538{
539 volatile u32 int_reg;
540
541 /* Stop new interrupts */
542 ioa_cfg->allow_interrupts = 0;
543
544 /* Set interrupt mask to stop all new interrupts */
545 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
546
547 /* Clear any pending interrupts */
548 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
549 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
550}
551
552/**
553 * ipr_save_pcix_cmd_reg - Save PCI-X command register
554 * @ioa_cfg: ioa config struct
555 *
556 * Return value:
557 * 0 on success / -EIO on failure
558 **/
559static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
560{
561 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
562
563 if (pcix_cmd_reg == 0) {
564 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
565 return -EIO;
566 }
567
568 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
569 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
570 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
571 return -EIO;
572 }
573
574 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
575 return 0;
576}
577
578/**
579 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
580 * @ioa_cfg: ioa config struct
581 *
582 * Return value:
583 * 0 on success / -EIO on failure
584 **/
585static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
586{
587 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
588
589 if (pcix_cmd_reg) {
590 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
591 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
592 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
593 return -EIO;
594 }
595 } else {
596 dev_err(&ioa_cfg->pdev->dev,
597 "Failed to setup PCI-X command register\n");
598 return -EIO;
599 }
600
601 return 0;
602}
603
604/**
605 * ipr_scsi_eh_done - mid-layer done function for aborted ops
606 * @ipr_cmd: ipr command struct
607 *
608 * This function is invoked by the interrupt handler for
609 * ops generated by the SCSI mid-layer which are being aborted.
610 *
611 * Return value:
612 * none
613 **/
614static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
615{
616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
617 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
618
619 scsi_cmd->result |= (DID_ERROR << 16);
620
621 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
622 scsi_cmd->scsi_done(scsi_cmd);
623 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
624}
625
626/**
627 * ipr_fail_all_ops - Fails all outstanding ops.
628 * @ioa_cfg: ioa config struct
629 *
630 * This function fails all outstanding ops.
631 *
632 * Return value:
633 * none
634 **/
635static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
636{
637 struct ipr_cmnd *ipr_cmd, *temp;
638
639 ENTER;
640 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
641 list_del(&ipr_cmd->queue);
642
643 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
644 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
645
646 if (ipr_cmd->scsi_cmd)
647 ipr_cmd->done = ipr_scsi_eh_done;
648
649 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
650 del_timer(&ipr_cmd->timer);
651 ipr_cmd->done(ipr_cmd);
652 }
653
654 LEAVE;
655}
656
657/**
658 * ipr_do_req - Send driver initiated requests.
659 * @ipr_cmd: ipr command struct
660 * @done: done function
661 * @timeout_func: timeout function
662 * @timeout: timeout value
663 *
664 * This function sends the specified command to the adapter with the
665 * timeout given. The done function is invoked on command completion.
666 *
667 * Return value:
668 * none
669 **/
670static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
671 void (*done) (struct ipr_cmnd *),
672 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
673{
674 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
675
676 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
677
678 ipr_cmd->done = done;
679
680 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
681 ipr_cmd->timer.expires = jiffies + timeout;
682 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
683
684 add_timer(&ipr_cmd->timer);
685
686 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
687
688 mb();
689 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
690 ioa_cfg->regs.ioarrin_reg);
691}
692
693/**
694 * ipr_internal_cmd_done - Op done function for an internally generated op.
695 * @ipr_cmd: ipr command struct
696 *
697 * This function is the op done function for an internally generated,
698 * blocking op. It simply wakes the sleeping thread.
699 *
700 * Return value:
701 * none
702 **/
703static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
704{
705 if (ipr_cmd->sibling)
706 ipr_cmd->sibling = NULL;
707 else
708 complete(&ipr_cmd->completion);
709}
710
711/**
712 * ipr_send_blocking_cmd - Send command and sleep on its completion.
713 * @ipr_cmd: ipr command struct
714 * @timeout_func: function to invoke if command times out
715 * @timeout: timeout
716 *
717 * Return value:
718 * none
719 **/
720static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
721 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
722 u32 timeout)
723{
724 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
725
726 init_completion(&ipr_cmd->completion);
727 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
728
729 spin_unlock_irq(ioa_cfg->host->host_lock);
730 wait_for_completion(&ipr_cmd->completion);
731 spin_lock_irq(ioa_cfg->host->host_lock);
732}
733
734/**
735 * ipr_send_hcam - Send an HCAM to the adapter.
736 * @ioa_cfg: ioa config struct
737 * @type: HCAM type
738 * @hostrcb: hostrcb struct
739 *
740 * This function will send a Host Controlled Async command to the adapter.
741 * If HCAMs are currently not allowed to be issued to the adapter, it will
742 * place the hostrcb on the free queue.
743 *
744 * Return value:
745 * none
746 **/
747static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
748 struct ipr_hostrcb *hostrcb)
749{
750 struct ipr_cmnd *ipr_cmd;
751 struct ipr_ioarcb *ioarcb;
752
753 if (ioa_cfg->allow_cmds) {
754 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
755 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
756 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
757
758 ipr_cmd->u.hostrcb = hostrcb;
759 ioarcb = &ipr_cmd->ioarcb;
760
761 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
762 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
763 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
764 ioarcb->cmd_pkt.cdb[1] = type;
765 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
766 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
767
768 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
769 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
770 ipr_cmd->ioadl[0].flags_and_data_len =
771 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
772 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
773
774 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
775 ipr_cmd->done = ipr_process_ccn;
776 else
777 ipr_cmd->done = ipr_process_error;
778
779 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
780
781 mb();
782 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
783 ioa_cfg->regs.ioarrin_reg);
784 } else {
785 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
786 }
787}
788
789/**
790 * ipr_init_res_entry - Initialize a resource entry struct.
791 * @res: resource entry struct
792 *
793 * Return value:
794 * none
795 **/
796static void ipr_init_res_entry(struct ipr_resource_entry *res)
797{
798 res->needs_sync_complete = 1;
799 res->in_erp = 0;
800 res->add_to_ml = 0;
801 res->del_from_ml = 0;
802 res->resetting_device = 0;
803 res->sdev = NULL;
804}
805
806/**
807 * ipr_handle_config_change - Handle a config change from the adapter
808 * @ioa_cfg: ioa config struct
809 * @hostrcb: hostrcb
810 *
811 * Return value:
812 * none
813 **/
814static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
815 struct ipr_hostrcb *hostrcb)
816{
817 struct ipr_resource_entry *res = NULL;
818 struct ipr_config_table_entry *cfgte;
819 u32 is_ndn = 1;
820
821 cfgte = &hostrcb->hcam.u.ccn.cfgte;
822
823 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
824 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
825 sizeof(cfgte->res_addr))) {
826 is_ndn = 0;
827 break;
828 }
829 }
830
831 if (is_ndn) {
832 if (list_empty(&ioa_cfg->free_res_q)) {
833 ipr_send_hcam(ioa_cfg,
834 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
835 hostrcb);
836 return;
837 }
838
839 res = list_entry(ioa_cfg->free_res_q.next,
840 struct ipr_resource_entry, queue);
841
842 list_del(&res->queue);
843 ipr_init_res_entry(res);
844 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
845 }
846
847 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
848
849 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
850 if (res->sdev) {
851 res->sdev->hostdata = NULL;
852 res->del_from_ml = 1;
853 if (ioa_cfg->allow_ml_add_del)
854 schedule_work(&ioa_cfg->work_q);
855 } else
856 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
857 } else if (!res->sdev) {
858 res->add_to_ml = 1;
859 if (ioa_cfg->allow_ml_add_del)
860 schedule_work(&ioa_cfg->work_q);
861 }
862
863 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
864}
865
866/**
867 * ipr_process_ccn - Op done function for a CCN.
868 * @ipr_cmd: ipr command struct
869 *
870 * This function is the op done function for a configuration
871 * change notification host controlled async from the adapter.
872 *
873 * Return value:
874 * none
875 **/
876static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
877{
878 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
879 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
880 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
881
882 list_del(&hostrcb->queue);
883 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
884
885 if (ioasc) {
886 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
887 dev_err(&ioa_cfg->pdev->dev,
888 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
889
890 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
891 } else {
892 ipr_handle_config_change(ioa_cfg, hostrcb);
893 }
894}
895
896/**
897 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600898 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 *
900 * Return value:
901 * none
902 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600903static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904{
905 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
906 + IPR_SERIAL_NUM_LEN];
907
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600908 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
909 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 IPR_PROD_ID_LEN);
911 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
912 ipr_err("Vendor/Product ID: %s\n", buffer);
913
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600914 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 buffer[IPR_SERIAL_NUM_LEN] = '\0';
916 ipr_err(" Serial Number: %s\n", buffer);
917}
918
919/**
920 * ipr_log_cache_error - Log a cache error.
921 * @ioa_cfg: ioa config struct
922 * @hostrcb: hostrcb struct
923 *
924 * Return value:
925 * none
926 **/
927static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
928 struct ipr_hostrcb *hostrcb)
929{
930 struct ipr_hostrcb_type_02_error *error =
931 &hostrcb->hcam.u.error.u.type_02_error;
932
933 ipr_err("-----Current Configuration-----\n");
934 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600935 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600937 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 ipr_err("-----Expected Configuration-----\n");
940 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600941 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600943 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
945 ipr_err("Additional IOA Data: %08X %08X %08X\n",
946 be32_to_cpu(error->ioa_data[0]),
947 be32_to_cpu(error->ioa_data[1]),
948 be32_to_cpu(error->ioa_data[2]));
949}
950
951/**
952 * ipr_log_config_error - Log a configuration error.
953 * @ioa_cfg: ioa config struct
954 * @hostrcb: hostrcb struct
955 *
956 * Return value:
957 * none
958 **/
959static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
960 struct ipr_hostrcb *hostrcb)
961{
962 int errors_logged, i;
963 struct ipr_hostrcb_device_data_entry *dev_entry;
964 struct ipr_hostrcb_type_03_error *error;
965
966 error = &hostrcb->hcam.u.error.u.type_03_error;
967 errors_logged = be32_to_cpu(error->errors_logged);
968
969 ipr_err("Device Errors Detected/Logged: %d/%d\n",
970 be32_to_cpu(error->errors_detected), errors_logged);
971
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600972 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
974 for (i = 0; i < errors_logged; i++, dev_entry++) {
975 ipr_err_separator;
976
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -0600977 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600978 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600981 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600984 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
986 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600987 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
989 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
990 be32_to_cpu(dev_entry->ioa_data[0]),
991 be32_to_cpu(dev_entry->ioa_data[1]),
992 be32_to_cpu(dev_entry->ioa_data[2]),
993 be32_to_cpu(dev_entry->ioa_data[3]),
994 be32_to_cpu(dev_entry->ioa_data[4]));
995 }
996}
997
998/**
999 * ipr_log_array_error - Log an array configuration error.
1000 * @ioa_cfg: ioa config struct
1001 * @hostrcb: hostrcb struct
1002 *
1003 * Return value:
1004 * none
1005 **/
1006static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1007 struct ipr_hostrcb *hostrcb)
1008{
1009 int i;
1010 struct ipr_hostrcb_type_04_error *error;
1011 struct ipr_hostrcb_array_data_entry *array_entry;
1012 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1013
1014 error = &hostrcb->hcam.u.error.u.type_04_error;
1015
1016 ipr_err_separator;
1017
1018 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1019 error->protection_level,
1020 ioa_cfg->host->host_no,
1021 error->last_func_vset_res_addr.bus,
1022 error->last_func_vset_res_addr.target,
1023 error->last_func_vset_res_addr.lun);
1024
1025 ipr_err_separator;
1026
1027 array_entry = error->array_member;
1028
1029 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001030 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 continue;
1032
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001033 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001035 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001038 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001040 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1041 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1042 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
1044 ipr_err_separator;
1045
1046 if (i == 9)
1047 array_entry = error->array_member2;
1048 else
1049 array_entry++;
1050 }
1051}
1052
1053/**
1054 * ipr_log_generic_error - Log an adapter error.
1055 * @ioa_cfg: ioa config struct
1056 * @hostrcb: hostrcb struct
1057 *
1058 * Return value:
1059 * none
1060 **/
1061static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1062 struct ipr_hostrcb *hostrcb)
1063{
1064 int i;
1065 int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
1066
1067 if (ioa_data_len == 0)
1068 return;
1069
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 for (i = 0; i < ioa_data_len / 4; i += 4) {
1071 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1072 be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
1073 be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
1074 be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
1075 be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
1076 }
1077}
1078
1079/**
1080 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1081 * @ioasc: IOASC
1082 *
1083 * This function will return the index of into the ipr_error_table
1084 * for the specified IOASC. If the IOASC is not in the table,
1085 * 0 will be returned, which points to the entry used for unknown errors.
1086 *
1087 * Return value:
1088 * index into the ipr_error_table
1089 **/
1090static u32 ipr_get_error(u32 ioasc)
1091{
1092 int i;
1093
1094 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1095 if (ipr_error_table[i].ioasc == ioasc)
1096 return i;
1097
1098 return 0;
1099}
1100
1101/**
1102 * ipr_handle_log_data - Log an adapter error.
1103 * @ioa_cfg: ioa config struct
1104 * @hostrcb: hostrcb struct
1105 *
1106 * This function logs an adapter error to the system.
1107 *
1108 * Return value:
1109 * none
1110 **/
1111static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1112 struct ipr_hostrcb *hostrcb)
1113{
1114 u32 ioasc;
1115 int error_index;
1116
1117 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1118 return;
1119
1120 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1121 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1122
1123 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1124
1125 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1126 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1127 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1128 scsi_report_bus_reset(ioa_cfg->host,
1129 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1130 }
1131
1132 error_index = ipr_get_error(ioasc);
1133
1134 if (!ipr_error_table[error_index].log_hcam)
1135 return;
1136
1137 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1138 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1139 "%s\n", ipr_error_table[error_index].error);
1140 } else {
1141 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1142 ipr_error_table[error_index].error);
1143 }
1144
1145 /* Set indication we have logged an error */
1146 ioa_cfg->errors_logged++;
1147
1148 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1149 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06001150 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1151 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152
1153 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 case IPR_HOST_RCB_OVERLAY_ID_2:
1155 ipr_log_cache_error(ioa_cfg, hostrcb);
1156 break;
1157 case IPR_HOST_RCB_OVERLAY_ID_3:
1158 ipr_log_config_error(ioa_cfg, hostrcb);
1159 break;
1160 case IPR_HOST_RCB_OVERLAY_ID_4:
1161 case IPR_HOST_RCB_OVERLAY_ID_6:
1162 ipr_log_array_error(ioa_cfg, hostrcb);
1163 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06001164 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06001167 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 break;
1169 }
1170}
1171
1172/**
1173 * ipr_process_error - Op done function for an adapter error log.
1174 * @ipr_cmd: ipr command struct
1175 *
1176 * This function is the op done function for an error log host
1177 * controlled async from the adapter. It will log the error and
1178 * send the HCAM back to the adapter.
1179 *
1180 * Return value:
1181 * none
1182 **/
1183static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1184{
1185 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1186 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1187 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1188
1189 list_del(&hostrcb->queue);
1190 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1191
1192 if (!ioasc) {
1193 ipr_handle_log_data(ioa_cfg, hostrcb);
1194 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1195 dev_err(&ioa_cfg->pdev->dev,
1196 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1197 }
1198
1199 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1200}
1201
1202/**
1203 * ipr_timeout - An internally generated op has timed out.
1204 * @ipr_cmd: ipr command struct
1205 *
1206 * This function blocks host requests and initiates an
1207 * adapter reset.
1208 *
1209 * Return value:
1210 * none
1211 **/
1212static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1213{
1214 unsigned long lock_flags = 0;
1215 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1216
1217 ENTER;
1218 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1219
1220 ioa_cfg->errors_logged++;
1221 dev_err(&ioa_cfg->pdev->dev,
1222 "Adapter being reset due to command timeout.\n");
1223
1224 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1225 ioa_cfg->sdt_state = GET_DUMP;
1226
1227 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1228 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1229
1230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1231 LEAVE;
1232}
1233
1234/**
1235 * ipr_oper_timeout - Adapter timed out transitioning to operational
1236 * @ipr_cmd: ipr command struct
1237 *
1238 * This function blocks host requests and initiates an
1239 * adapter reset.
1240 *
1241 * Return value:
1242 * none
1243 **/
1244static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1245{
1246 unsigned long lock_flags = 0;
1247 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1248
1249 ENTER;
1250 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1251
1252 ioa_cfg->errors_logged++;
1253 dev_err(&ioa_cfg->pdev->dev,
1254 "Adapter timed out transitioning to operational.\n");
1255
1256 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1257 ioa_cfg->sdt_state = GET_DUMP;
1258
1259 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1260 if (ipr_fastfail)
1261 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1262 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1263 }
1264
1265 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1266 LEAVE;
1267}
1268
1269/**
1270 * ipr_reset_reload - Reset/Reload the IOA
1271 * @ioa_cfg: ioa config struct
1272 * @shutdown_type: shutdown type
1273 *
1274 * This function resets the adapter and re-initializes it.
1275 * This function assumes that all new host commands have been stopped.
1276 * Return value:
1277 * SUCCESS / FAILED
1278 **/
1279static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1280 enum ipr_shutdown_type shutdown_type)
1281{
1282 if (!ioa_cfg->in_reset_reload)
1283 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1284
1285 spin_unlock_irq(ioa_cfg->host->host_lock);
1286 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1287 spin_lock_irq(ioa_cfg->host->host_lock);
1288
1289 /* If we got hit with a host reset while we were already resetting
1290 the adapter for some reason, and the reset failed. */
1291 if (ioa_cfg->ioa_is_dead) {
1292 ipr_trace;
1293 return FAILED;
1294 }
1295
1296 return SUCCESS;
1297}
1298
1299/**
1300 * ipr_find_ses_entry - Find matching SES in SES table
1301 * @res: resource entry struct of SES
1302 *
1303 * Return value:
1304 * pointer to SES table entry / NULL on failure
1305 **/
1306static const struct ipr_ses_table_entry *
1307ipr_find_ses_entry(struct ipr_resource_entry *res)
1308{
1309 int i, j, matches;
1310 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1311
1312 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1313 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1314 if (ste->compare_product_id_byte[j] == 'X') {
1315 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1316 matches++;
1317 else
1318 break;
1319 } else
1320 matches++;
1321 }
1322
1323 if (matches == IPR_PROD_ID_LEN)
1324 return ste;
1325 }
1326
1327 return NULL;
1328}
1329
1330/**
1331 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1332 * @ioa_cfg: ioa config struct
1333 * @bus: SCSI bus
1334 * @bus_width: bus width
1335 *
1336 * Return value:
1337 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1338 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1339 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1340 * max 160MHz = max 320MB/sec).
1341 **/
1342static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1343{
1344 struct ipr_resource_entry *res;
1345 const struct ipr_ses_table_entry *ste;
1346 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1347
1348 /* Loop through each config table entry in the config table buffer */
1349 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1350 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1351 continue;
1352
1353 if (bus != res->cfgte.res_addr.bus)
1354 continue;
1355
1356 if (!(ste = ipr_find_ses_entry(res)))
1357 continue;
1358
1359 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1360 }
1361
1362 return max_xfer_rate;
1363}
1364
1365/**
1366 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1367 * @ioa_cfg: ioa config struct
1368 * @max_delay: max delay in micro-seconds to wait
1369 *
1370 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1371 *
1372 * Return value:
1373 * 0 on success / other on failure
1374 **/
1375static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1376{
1377 volatile u32 pcii_reg;
1378 int delay = 1;
1379
1380 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1381 while (delay < max_delay) {
1382 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1383
1384 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1385 return 0;
1386
1387 /* udelay cannot be used if delay is more than a few milliseconds */
1388 if ((delay / 1000) > MAX_UDELAY_MS)
1389 mdelay(delay / 1000);
1390 else
1391 udelay(delay);
1392
1393 delay += delay;
1394 }
1395 return -EIO;
1396}
1397
1398/**
1399 * ipr_get_ldump_data_section - Dump IOA memory
1400 * @ioa_cfg: ioa config struct
1401 * @start_addr: adapter address to dump
1402 * @dest: destination kernel buffer
1403 * @length_in_words: length to dump in 4 byte words
1404 *
1405 * Return value:
1406 * 0 on success / -EIO on failure
1407 **/
1408static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1409 u32 start_addr,
1410 __be32 *dest, u32 length_in_words)
1411{
1412 volatile u32 temp_pcii_reg;
1413 int i, delay = 0;
1414
1415 /* Write IOA interrupt reg starting LDUMP state */
1416 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1417 ioa_cfg->regs.set_uproc_interrupt_reg);
1418
1419 /* Wait for IO debug acknowledge */
1420 if (ipr_wait_iodbg_ack(ioa_cfg,
1421 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1422 dev_err(&ioa_cfg->pdev->dev,
1423 "IOA dump long data transfer timeout\n");
1424 return -EIO;
1425 }
1426
1427 /* Signal LDUMP interlocked - clear IO debug ack */
1428 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1429 ioa_cfg->regs.clr_interrupt_reg);
1430
1431 /* Write Mailbox with starting address */
1432 writel(start_addr, ioa_cfg->ioa_mailbox);
1433
1434 /* Signal address valid - clear IOA Reset alert */
1435 writel(IPR_UPROCI_RESET_ALERT,
1436 ioa_cfg->regs.clr_uproc_interrupt_reg);
1437
1438 for (i = 0; i < length_in_words; i++) {
1439 /* Wait for IO debug acknowledge */
1440 if (ipr_wait_iodbg_ack(ioa_cfg,
1441 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1442 dev_err(&ioa_cfg->pdev->dev,
1443 "IOA dump short data transfer timeout\n");
1444 return -EIO;
1445 }
1446
1447 /* Read data from mailbox and increment destination pointer */
1448 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1449 dest++;
1450
1451 /* For all but the last word of data, signal data received */
1452 if (i < (length_in_words - 1)) {
1453 /* Signal dump data received - Clear IO debug Ack */
1454 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1455 ioa_cfg->regs.clr_interrupt_reg);
1456 }
1457 }
1458
1459 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1460 writel(IPR_UPROCI_RESET_ALERT,
1461 ioa_cfg->regs.set_uproc_interrupt_reg);
1462
1463 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1464 ioa_cfg->regs.clr_uproc_interrupt_reg);
1465
1466 /* Signal dump data received - Clear IO debug Ack */
1467 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1468 ioa_cfg->regs.clr_interrupt_reg);
1469
1470 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1471 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1472 temp_pcii_reg =
1473 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1474
1475 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1476 return 0;
1477
1478 udelay(10);
1479 delay += 10;
1480 }
1481
1482 return 0;
1483}
1484
1485#ifdef CONFIG_SCSI_IPR_DUMP
1486/**
1487 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1488 * @ioa_cfg: ioa config struct
1489 * @pci_address: adapter address
1490 * @length: length of data to copy
1491 *
1492 * Copy data from PCI adapter to kernel buffer.
1493 * Note: length MUST be a 4 byte multiple
1494 * Return value:
1495 * 0 on success / other on failure
1496 **/
1497static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1498 unsigned long pci_address, u32 length)
1499{
1500 int bytes_copied = 0;
1501 int cur_len, rc, rem_len, rem_page_len;
1502 __be32 *page;
1503 unsigned long lock_flags = 0;
1504 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1505
1506 while (bytes_copied < length &&
1507 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1508 if (ioa_dump->page_offset >= PAGE_SIZE ||
1509 ioa_dump->page_offset == 0) {
1510 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1511
1512 if (!page) {
1513 ipr_trace;
1514 return bytes_copied;
1515 }
1516
1517 ioa_dump->page_offset = 0;
1518 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1519 ioa_dump->next_page_index++;
1520 } else
1521 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1522
1523 rem_len = length - bytes_copied;
1524 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1525 cur_len = min(rem_len, rem_page_len);
1526
1527 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1528 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1529 rc = -EIO;
1530 } else {
1531 rc = ipr_get_ldump_data_section(ioa_cfg,
1532 pci_address + bytes_copied,
1533 &page[ioa_dump->page_offset / 4],
1534 (cur_len / sizeof(u32)));
1535 }
1536 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1537
1538 if (!rc) {
1539 ioa_dump->page_offset += cur_len;
1540 bytes_copied += cur_len;
1541 } else {
1542 ipr_trace;
1543 break;
1544 }
1545 schedule();
1546 }
1547
1548 return bytes_copied;
1549}
1550
1551/**
1552 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1553 * @hdr: dump entry header struct
1554 *
1555 * Return value:
1556 * nothing
1557 **/
1558static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1559{
1560 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1561 hdr->num_elems = 1;
1562 hdr->offset = sizeof(*hdr);
1563 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1564}
1565
1566/**
1567 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1568 * @ioa_cfg: ioa config struct
1569 * @driver_dump: driver dump struct
1570 *
1571 * Return value:
1572 * nothing
1573 **/
1574static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1575 struct ipr_driver_dump *driver_dump)
1576{
1577 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1578
1579 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1580 driver_dump->ioa_type_entry.hdr.len =
1581 sizeof(struct ipr_dump_ioa_type_entry) -
1582 sizeof(struct ipr_dump_entry_header);
1583 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1584 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1585 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1586 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1587 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1588 ucode_vpd->minor_release[1];
1589 driver_dump->hdr.num_entries++;
1590}
1591
1592/**
1593 * ipr_dump_version_data - Fill in the driver version in the dump.
1594 * @ioa_cfg: ioa config struct
1595 * @driver_dump: driver dump struct
1596 *
1597 * Return value:
1598 * nothing
1599 **/
1600static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1601 struct ipr_driver_dump *driver_dump)
1602{
1603 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1604 driver_dump->version_entry.hdr.len =
1605 sizeof(struct ipr_dump_version_entry) -
1606 sizeof(struct ipr_dump_entry_header);
1607 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1608 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1609 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1610 driver_dump->hdr.num_entries++;
1611}
1612
1613/**
1614 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1615 * @ioa_cfg: ioa config struct
1616 * @driver_dump: driver dump struct
1617 *
1618 * Return value:
1619 * nothing
1620 **/
1621static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1622 struct ipr_driver_dump *driver_dump)
1623{
1624 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1625 driver_dump->trace_entry.hdr.len =
1626 sizeof(struct ipr_dump_trace_entry) -
1627 sizeof(struct ipr_dump_entry_header);
1628 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1629 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1630 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1631 driver_dump->hdr.num_entries++;
1632}
1633
1634/**
1635 * ipr_dump_location_data - Fill in the IOA location in the dump.
1636 * @ioa_cfg: ioa config struct
1637 * @driver_dump: driver dump struct
1638 *
1639 * Return value:
1640 * nothing
1641 **/
1642static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1643 struct ipr_driver_dump *driver_dump)
1644{
1645 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1646 driver_dump->location_entry.hdr.len =
1647 sizeof(struct ipr_dump_location_entry) -
1648 sizeof(struct ipr_dump_entry_header);
1649 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1650 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1651 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1652 driver_dump->hdr.num_entries++;
1653}
1654
1655/**
1656 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1657 * @ioa_cfg: ioa config struct
1658 * @dump: dump struct
1659 *
1660 * Return value:
1661 * nothing
1662 **/
1663static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1664{
1665 unsigned long start_addr, sdt_word;
1666 unsigned long lock_flags = 0;
1667 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1668 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1669 u32 num_entries, start_off, end_off;
1670 u32 bytes_to_copy, bytes_copied, rc;
1671 struct ipr_sdt *sdt;
1672 int i;
1673
1674 ENTER;
1675
1676 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1677
1678 if (ioa_cfg->sdt_state != GET_DUMP) {
1679 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1680 return;
1681 }
1682
1683 start_addr = readl(ioa_cfg->ioa_mailbox);
1684
1685 if (!ipr_sdt_is_fmt2(start_addr)) {
1686 dev_err(&ioa_cfg->pdev->dev,
1687 "Invalid dump table format: %lx\n", start_addr);
1688 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1689 return;
1690 }
1691
1692 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1693
1694 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1695
1696 /* Initialize the overall dump header */
1697 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1698 driver_dump->hdr.num_entries = 1;
1699 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1700 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1701 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1702 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1703
1704 ipr_dump_version_data(ioa_cfg, driver_dump);
1705 ipr_dump_location_data(ioa_cfg, driver_dump);
1706 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1707 ipr_dump_trace_data(ioa_cfg, driver_dump);
1708
1709 /* Update dump_header */
1710 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1711
1712 /* IOA Dump entry */
1713 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1714 ioa_dump->format = IPR_SDT_FMT2;
1715 ioa_dump->hdr.len = 0;
1716 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1717 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1718
1719 /* First entries in sdt are actually a list of dump addresses and
1720 lengths to gather the real dump data. sdt represents the pointer
1721 to the ioa generated dump table. Dump data will be extracted based
1722 on entries in this table */
1723 sdt = &ioa_dump->sdt;
1724
1725 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1726 sizeof(struct ipr_sdt) / sizeof(__be32));
1727
1728 /* Smart Dump table is ready to use and the first entry is valid */
1729 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1730 dev_err(&ioa_cfg->pdev->dev,
1731 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1732 rc, be32_to_cpu(sdt->hdr.state));
1733 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1734 ioa_cfg->sdt_state = DUMP_OBTAINED;
1735 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1736 return;
1737 }
1738
1739 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1740
1741 if (num_entries > IPR_NUM_SDT_ENTRIES)
1742 num_entries = IPR_NUM_SDT_ENTRIES;
1743
1744 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1745
1746 for (i = 0; i < num_entries; i++) {
1747 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1748 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1749 break;
1750 }
1751
1752 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1753 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1754 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1755 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1756
1757 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1758 bytes_to_copy = end_off - start_off;
1759 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1760 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1761 continue;
1762 }
1763
1764 /* Copy data from adapter to driver buffers */
1765 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1766 bytes_to_copy);
1767
1768 ioa_dump->hdr.len += bytes_copied;
1769
1770 if (bytes_copied != bytes_to_copy) {
1771 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1772 break;
1773 }
1774 }
1775 }
1776 }
1777
1778 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1779
1780 /* Update dump_header */
1781 driver_dump->hdr.len += ioa_dump->hdr.len;
1782 wmb();
1783 ioa_cfg->sdt_state = DUMP_OBTAINED;
1784 LEAVE;
1785}
1786
1787#else
1788#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1789#endif
1790
1791/**
1792 * ipr_release_dump - Free adapter dump memory
1793 * @kref: kref struct
1794 *
1795 * Return value:
1796 * nothing
1797 **/
1798static void ipr_release_dump(struct kref *kref)
1799{
1800 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
1801 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
1802 unsigned long lock_flags = 0;
1803 int i;
1804
1805 ENTER;
1806 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1807 ioa_cfg->dump = NULL;
1808 ioa_cfg->sdt_state = INACTIVE;
1809 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1810
1811 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
1812 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
1813
1814 kfree(dump);
1815 LEAVE;
1816}
1817
1818/**
1819 * ipr_worker_thread - Worker thread
1820 * @data: ioa config struct
1821 *
1822 * Called at task level from a work thread. This function takes care
1823 * of adding and removing device from the mid-layer as configuration
1824 * changes are detected by the adapter.
1825 *
1826 * Return value:
1827 * nothing
1828 **/
1829static void ipr_worker_thread(void *data)
1830{
1831 unsigned long lock_flags;
1832 struct ipr_resource_entry *res;
1833 struct scsi_device *sdev;
1834 struct ipr_dump *dump;
1835 struct ipr_ioa_cfg *ioa_cfg = data;
1836 u8 bus, target, lun;
1837 int did_work;
1838
1839 ENTER;
1840 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1841
1842 if (ioa_cfg->sdt_state == GET_DUMP) {
1843 dump = ioa_cfg->dump;
1844 if (!dump) {
1845 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1846 return;
1847 }
1848 kref_get(&dump->kref);
1849 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1850 ipr_get_ioa_dump(ioa_cfg, dump);
1851 kref_put(&dump->kref, ipr_release_dump);
1852
1853 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1854 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1855 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1856 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1857 return;
1858 }
1859
1860restart:
1861 do {
1862 did_work = 0;
1863 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1864 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1865 return;
1866 }
1867
1868 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1869 if (res->del_from_ml && res->sdev) {
1870 did_work = 1;
1871 sdev = res->sdev;
1872 if (!scsi_device_get(sdev)) {
1873 res->sdev = NULL;
1874 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1875 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1876 scsi_remove_device(sdev);
1877 scsi_device_put(sdev);
1878 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1879 }
1880 break;
1881 }
1882 }
1883 } while(did_work);
1884
1885 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1886 if (res->add_to_ml) {
1887 bus = res->cfgte.res_addr.bus;
1888 target = res->cfgte.res_addr.target;
1889 lun = res->cfgte.res_addr.lun;
1890 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1891 scsi_add_device(ioa_cfg->host, bus, target, lun);
1892 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1893 goto restart;
1894 }
1895 }
1896
1897 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1898 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
1899 LEAVE;
1900}
1901
1902#ifdef CONFIG_SCSI_IPR_TRACE
1903/**
1904 * ipr_read_trace - Dump the adapter trace
1905 * @kobj: kobject struct
1906 * @buf: buffer
1907 * @off: offset
1908 * @count: buffer size
1909 *
1910 * Return value:
1911 * number of bytes printed to buffer
1912 **/
1913static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1914 loff_t off, size_t count)
1915{
1916 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1917 struct Scsi_Host *shost = class_to_shost(cdev);
1918 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1919 unsigned long lock_flags = 0;
1920 int size = IPR_TRACE_SIZE;
1921 char *src = (char *)ioa_cfg->trace;
1922
1923 if (off > size)
1924 return 0;
1925 if (off + count > size) {
1926 size -= off;
1927 count = size;
1928 }
1929
1930 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1931 memcpy(buf, &src[off], count);
1932 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1933 return count;
1934}
1935
1936static struct bin_attribute ipr_trace_attr = {
1937 .attr = {
1938 .name = "trace",
1939 .mode = S_IRUGO,
1940 },
1941 .size = 0,
1942 .read = ipr_read_trace,
1943};
1944#endif
1945
brking@us.ibm.com62275042005-11-01 17:01:14 -06001946static const struct {
1947 enum ipr_cache_state state;
1948 char *name;
1949} cache_state [] = {
1950 { CACHE_NONE, "none" },
1951 { CACHE_DISABLED, "disabled" },
1952 { CACHE_ENABLED, "enabled" }
1953};
1954
1955/**
1956 * ipr_show_write_caching - Show the write caching attribute
1957 * @class_dev: class device struct
1958 * @buf: buffer
1959 *
1960 * Return value:
1961 * number of bytes printed to buffer
1962 **/
1963static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
1964{
1965 struct Scsi_Host *shost = class_to_shost(class_dev);
1966 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1967 unsigned long lock_flags = 0;
1968 int i, len = 0;
1969
1970 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1971 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
1972 if (cache_state[i].state == ioa_cfg->cache_state) {
1973 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
1974 break;
1975 }
1976 }
1977 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1978 return len;
1979}
1980
1981
1982/**
1983 * ipr_store_write_caching - Enable/disable adapter write cache
1984 * @class_dev: class_device struct
1985 * @buf: buffer
1986 * @count: buffer size
1987 *
1988 * This function will enable/disable adapter write cache.
1989 *
1990 * Return value:
1991 * count on success / other on failure
1992 **/
1993static ssize_t ipr_store_write_caching(struct class_device *class_dev,
1994 const char *buf, size_t count)
1995{
1996 struct Scsi_Host *shost = class_to_shost(class_dev);
1997 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1998 unsigned long lock_flags = 0;
1999 enum ipr_cache_state new_state = CACHE_INVALID;
2000 int i;
2001
2002 if (!capable(CAP_SYS_ADMIN))
2003 return -EACCES;
2004 if (ioa_cfg->cache_state == CACHE_NONE)
2005 return -EINVAL;
2006
2007 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2008 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2009 new_state = cache_state[i].state;
2010 break;
2011 }
2012 }
2013
2014 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2015 return -EINVAL;
2016
2017 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2018 if (ioa_cfg->cache_state == new_state) {
2019 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2020 return count;
2021 }
2022
2023 ioa_cfg->cache_state = new_state;
2024 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2025 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2026 if (!ioa_cfg->in_reset_reload)
2027 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2028 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2029 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2030
2031 return count;
2032}
2033
2034static struct class_device_attribute ipr_ioa_cache_attr = {
2035 .attr = {
2036 .name = "write_cache",
2037 .mode = S_IRUGO | S_IWUSR,
2038 },
2039 .show = ipr_show_write_caching,
2040 .store = ipr_store_write_caching
2041};
2042
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043/**
2044 * ipr_show_fw_version - Show the firmware version
2045 * @class_dev: class device struct
2046 * @buf: buffer
2047 *
2048 * Return value:
2049 * number of bytes printed to buffer
2050 **/
2051static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2052{
2053 struct Scsi_Host *shost = class_to_shost(class_dev);
2054 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2055 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2056 unsigned long lock_flags = 0;
2057 int len;
2058
2059 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2060 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2061 ucode_vpd->major_release, ucode_vpd->card_type,
2062 ucode_vpd->minor_release[0],
2063 ucode_vpd->minor_release[1]);
2064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2065 return len;
2066}
2067
2068static struct class_device_attribute ipr_fw_version_attr = {
2069 .attr = {
2070 .name = "fw_version",
2071 .mode = S_IRUGO,
2072 },
2073 .show = ipr_show_fw_version,
2074};
2075
2076/**
2077 * ipr_show_log_level - Show the adapter's error logging level
2078 * @class_dev: class device struct
2079 * @buf: buffer
2080 *
2081 * Return value:
2082 * number of bytes printed to buffer
2083 **/
2084static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2085{
2086 struct Scsi_Host *shost = class_to_shost(class_dev);
2087 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2088 unsigned long lock_flags = 0;
2089 int len;
2090
2091 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2092 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2093 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2094 return len;
2095}
2096
2097/**
2098 * ipr_store_log_level - Change the adapter's error logging level
2099 * @class_dev: class device struct
2100 * @buf: buffer
2101 *
2102 * Return value:
2103 * number of bytes printed to buffer
2104 **/
2105static ssize_t ipr_store_log_level(struct class_device *class_dev,
2106 const char *buf, size_t count)
2107{
2108 struct Scsi_Host *shost = class_to_shost(class_dev);
2109 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2110 unsigned long lock_flags = 0;
2111
2112 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2113 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2114 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2115 return strlen(buf);
2116}
2117
2118static struct class_device_attribute ipr_log_level_attr = {
2119 .attr = {
2120 .name = "log_level",
2121 .mode = S_IRUGO | S_IWUSR,
2122 },
2123 .show = ipr_show_log_level,
2124 .store = ipr_store_log_level
2125};
2126
2127/**
2128 * ipr_store_diagnostics - IOA Diagnostics interface
2129 * @class_dev: class_device struct
2130 * @buf: buffer
2131 * @count: buffer size
2132 *
2133 * This function will reset the adapter and wait a reasonable
2134 * amount of time for any errors that the adapter might log.
2135 *
2136 * Return value:
2137 * count on success / other on failure
2138 **/
2139static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2140 const char *buf, size_t count)
2141{
2142 struct Scsi_Host *shost = class_to_shost(class_dev);
2143 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2144 unsigned long lock_flags = 0;
2145 int rc = count;
2146
2147 if (!capable(CAP_SYS_ADMIN))
2148 return -EACCES;
2149
2150 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2151 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2152 ioa_cfg->errors_logged = 0;
2153 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2154
2155 if (ioa_cfg->in_reset_reload) {
2156 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2157 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2158
2159 /* Wait for a second for any errors to be logged */
2160 msleep(1000);
2161 } else {
2162 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2163 return -EIO;
2164 }
2165
2166 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2167 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2168 rc = -EIO;
2169 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2170
2171 return rc;
2172}
2173
2174static struct class_device_attribute ipr_diagnostics_attr = {
2175 .attr = {
2176 .name = "run_diagnostics",
2177 .mode = S_IWUSR,
2178 },
2179 .store = ipr_store_diagnostics
2180};
2181
2182/**
2183 * ipr_store_reset_adapter - Reset the adapter
2184 * @class_dev: class_device struct
2185 * @buf: buffer
2186 * @count: buffer size
2187 *
2188 * This function will reset the adapter.
2189 *
2190 * Return value:
2191 * count on success / other on failure
2192 **/
2193static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2194 const char *buf, size_t count)
2195{
2196 struct Scsi_Host *shost = class_to_shost(class_dev);
2197 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2198 unsigned long lock_flags;
2199 int result = count;
2200
2201 if (!capable(CAP_SYS_ADMIN))
2202 return -EACCES;
2203
2204 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2205 if (!ioa_cfg->in_reset_reload)
2206 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2207 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2208 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2209
2210 return result;
2211}
2212
2213static struct class_device_attribute ipr_ioa_reset_attr = {
2214 .attr = {
2215 .name = "reset_host",
2216 .mode = S_IWUSR,
2217 },
2218 .store = ipr_store_reset_adapter
2219};
2220
2221/**
2222 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2223 * @buf_len: buffer length
2224 *
2225 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2226 * list to use for microcode download
2227 *
2228 * Return value:
2229 * pointer to sglist / NULL on failure
2230 **/
2231static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2232{
2233 int sg_size, order, bsize_elem, num_elem, i, j;
2234 struct ipr_sglist *sglist;
2235 struct scatterlist *scatterlist;
2236 struct page *page;
2237
2238 /* Get the minimum size per scatter/gather element */
2239 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2240
2241 /* Get the actual size per element */
2242 order = get_order(sg_size);
2243
2244 /* Determine the actual number of bytes per element */
2245 bsize_elem = PAGE_SIZE * (1 << order);
2246
2247 /* Determine the actual number of sg entries needed */
2248 if (buf_len % bsize_elem)
2249 num_elem = (buf_len / bsize_elem) + 1;
2250 else
2251 num_elem = buf_len / bsize_elem;
2252
2253 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06002254 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 (sizeof(struct scatterlist) * (num_elem - 1)),
2256 GFP_KERNEL);
2257
2258 if (sglist == NULL) {
2259 ipr_trace;
2260 return NULL;
2261 }
2262
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 scatterlist = sglist->scatterlist;
2264
2265 sglist->order = order;
2266 sglist->num_sg = num_elem;
2267
2268 /* Allocate a bunch of sg elements */
2269 for (i = 0; i < num_elem; i++) {
2270 page = alloc_pages(GFP_KERNEL, order);
2271 if (!page) {
2272 ipr_trace;
2273
2274 /* Free up what we already allocated */
2275 for (j = i - 1; j >= 0; j--)
2276 __free_pages(scatterlist[j].page, order);
2277 kfree(sglist);
2278 return NULL;
2279 }
2280
2281 scatterlist[i].page = page;
2282 }
2283
2284 return sglist;
2285}
2286
2287/**
2288 * ipr_free_ucode_buffer - Frees a microcode download buffer
2289 * @p_dnld: scatter/gather list pointer
2290 *
2291 * Free a DMA'able ucode download buffer previously allocated with
2292 * ipr_alloc_ucode_buffer
2293 *
2294 * Return value:
2295 * nothing
2296 **/
2297static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2298{
2299 int i;
2300
2301 for (i = 0; i < sglist->num_sg; i++)
2302 __free_pages(sglist->scatterlist[i].page, sglist->order);
2303
2304 kfree(sglist);
2305}
2306
2307/**
2308 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2309 * @sglist: scatter/gather list pointer
2310 * @buffer: buffer pointer
2311 * @len: buffer length
2312 *
2313 * Copy a microcode image from a user buffer into a buffer allocated by
2314 * ipr_alloc_ucode_buffer
2315 *
2316 * Return value:
2317 * 0 on success / other on failure
2318 **/
2319static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2320 u8 *buffer, u32 len)
2321{
2322 int bsize_elem, i, result = 0;
2323 struct scatterlist *scatterlist;
2324 void *kaddr;
2325
2326 /* Determine the actual number of bytes per element */
2327 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2328
2329 scatterlist = sglist->scatterlist;
2330
2331 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2332 kaddr = kmap(scatterlist[i].page);
2333 memcpy(kaddr, buffer, bsize_elem);
2334 kunmap(scatterlist[i].page);
2335
2336 scatterlist[i].length = bsize_elem;
2337
2338 if (result != 0) {
2339 ipr_trace;
2340 return result;
2341 }
2342 }
2343
2344 if (len % bsize_elem) {
2345 kaddr = kmap(scatterlist[i].page);
2346 memcpy(kaddr, buffer, len % bsize_elem);
2347 kunmap(scatterlist[i].page);
2348
2349 scatterlist[i].length = len % bsize_elem;
2350 }
2351
2352 sglist->buffer_len = len;
2353 return result;
2354}
2355
2356/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002357 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 * @ipr_cmd: ipr command struct
2359 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002361 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002364static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2365 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2368 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2369 struct scatterlist *scatterlist = sglist->scatterlist;
2370 int i;
2371
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002372 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002374 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 ioarcb->write_ioadl_len =
2376 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2377
2378 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2379 ioadl[i].flags_and_data_len =
2380 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2381 ioadl[i].address =
2382 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2383 }
2384
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002385 ioadl[i-1].flags_and_data_len |=
2386 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2387}
2388
2389/**
2390 * ipr_update_ioa_ucode - Update IOA's microcode
2391 * @ioa_cfg: ioa config struct
2392 * @sglist: scatter/gather list
2393 *
2394 * Initiate an adapter reset to update the IOA's microcode
2395 *
2396 * Return value:
2397 * 0 on success / -EIO on failure
2398 **/
2399static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2400 struct ipr_sglist *sglist)
2401{
2402 unsigned long lock_flags;
2403
2404 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2405
2406 if (ioa_cfg->ucode_sglist) {
2407 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2408 dev_err(&ioa_cfg->pdev->dev,
2409 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 return -EIO;
2411 }
2412
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002413 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2414 sglist->num_sg, DMA_TO_DEVICE);
2415
2416 if (!sglist->num_dma_sg) {
2417 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2418 dev_err(&ioa_cfg->pdev->dev,
2419 "Failed to map microcode download buffer!\n");
2420 return -EIO;
2421 }
2422
2423 ioa_cfg->ucode_sglist = sglist;
2424 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2425 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2426 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2427
2428 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2429 ioa_cfg->ucode_sglist = NULL;
2430 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 return 0;
2432}
2433
2434/**
2435 * ipr_store_update_fw - Update the firmware on the adapter
2436 * @class_dev: class_device struct
2437 * @buf: buffer
2438 * @count: buffer size
2439 *
2440 * This function will update the firmware on the adapter.
2441 *
2442 * Return value:
2443 * count on success / other on failure
2444 **/
2445static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2446 const char *buf, size_t count)
2447{
2448 struct Scsi_Host *shost = class_to_shost(class_dev);
2449 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2450 struct ipr_ucode_image_header *image_hdr;
2451 const struct firmware *fw_entry;
2452 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 char fname[100];
2454 char *src;
2455 int len, result, dnld_size;
2456
2457 if (!capable(CAP_SYS_ADMIN))
2458 return -EACCES;
2459
2460 len = snprintf(fname, 99, "%s", buf);
2461 fname[len-1] = '\0';
2462
2463 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2464 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2465 return -EIO;
2466 }
2467
2468 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2469
2470 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2471 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2472 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2473 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2474 release_firmware(fw_entry);
2475 return -EINVAL;
2476 }
2477
2478 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2479 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2480 sglist = ipr_alloc_ucode_buffer(dnld_size);
2481
2482 if (!sglist) {
2483 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2484 release_firmware(fw_entry);
2485 return -ENOMEM;
2486 }
2487
2488 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2489
2490 if (result) {
2491 dev_err(&ioa_cfg->pdev->dev,
2492 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002493 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 }
2495
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002496 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002498 if (!result)
2499 result = count;
2500out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 ipr_free_ucode_buffer(sglist);
2502 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002503 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504}
2505
2506static struct class_device_attribute ipr_update_fw_attr = {
2507 .attr = {
2508 .name = "update_fw",
2509 .mode = S_IWUSR,
2510 },
2511 .store = ipr_store_update_fw
2512};
2513
2514static struct class_device_attribute *ipr_ioa_attrs[] = {
2515 &ipr_fw_version_attr,
2516 &ipr_log_level_attr,
2517 &ipr_diagnostics_attr,
2518 &ipr_ioa_reset_attr,
2519 &ipr_update_fw_attr,
brking@us.ibm.com62275042005-11-01 17:01:14 -06002520 &ipr_ioa_cache_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 NULL,
2522};
2523
2524#ifdef CONFIG_SCSI_IPR_DUMP
2525/**
2526 * ipr_read_dump - Dump the adapter
2527 * @kobj: kobject struct
2528 * @buf: buffer
2529 * @off: offset
2530 * @count: buffer size
2531 *
2532 * Return value:
2533 * number of bytes printed to buffer
2534 **/
2535static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2536 loff_t off, size_t count)
2537{
2538 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2539 struct Scsi_Host *shost = class_to_shost(cdev);
2540 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2541 struct ipr_dump *dump;
2542 unsigned long lock_flags = 0;
2543 char *src;
2544 int len;
2545 size_t rc = count;
2546
2547 if (!capable(CAP_SYS_ADMIN))
2548 return -EACCES;
2549
2550 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2551 dump = ioa_cfg->dump;
2552
2553 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2554 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2555 return 0;
2556 }
2557 kref_get(&dump->kref);
2558 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2559
2560 if (off > dump->driver_dump.hdr.len) {
2561 kref_put(&dump->kref, ipr_release_dump);
2562 return 0;
2563 }
2564
2565 if (off + count > dump->driver_dump.hdr.len) {
2566 count = dump->driver_dump.hdr.len - off;
2567 rc = count;
2568 }
2569
2570 if (count && off < sizeof(dump->driver_dump)) {
2571 if (off + count > sizeof(dump->driver_dump))
2572 len = sizeof(dump->driver_dump) - off;
2573 else
2574 len = count;
2575 src = (u8 *)&dump->driver_dump + off;
2576 memcpy(buf, src, len);
2577 buf += len;
2578 off += len;
2579 count -= len;
2580 }
2581
2582 off -= sizeof(dump->driver_dump);
2583
2584 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2585 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2586 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2587 else
2588 len = count;
2589 src = (u8 *)&dump->ioa_dump + off;
2590 memcpy(buf, src, len);
2591 buf += len;
2592 off += len;
2593 count -= len;
2594 }
2595
2596 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2597
2598 while (count) {
2599 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2600 len = PAGE_ALIGN(off) - off;
2601 else
2602 len = count;
2603 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2604 src += off & ~PAGE_MASK;
2605 memcpy(buf, src, len);
2606 buf += len;
2607 off += len;
2608 count -= len;
2609 }
2610
2611 kref_put(&dump->kref, ipr_release_dump);
2612 return rc;
2613}
2614
2615/**
2616 * ipr_alloc_dump - Prepare for adapter dump
2617 * @ioa_cfg: ioa config struct
2618 *
2619 * Return value:
2620 * 0 on success / other on failure
2621 **/
2622static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2623{
2624 struct ipr_dump *dump;
2625 unsigned long lock_flags = 0;
2626
2627 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06002628 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629
2630 if (!dump) {
2631 ipr_err("Dump memory allocation failed\n");
2632 return -ENOMEM;
2633 }
2634
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635 kref_init(&dump->kref);
2636 dump->ioa_cfg = ioa_cfg;
2637
2638 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2639
2640 if (INACTIVE != ioa_cfg->sdt_state) {
2641 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2642 kfree(dump);
2643 return 0;
2644 }
2645
2646 ioa_cfg->dump = dump;
2647 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2648 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2649 ioa_cfg->dump_taken = 1;
2650 schedule_work(&ioa_cfg->work_q);
2651 }
2652 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2653
2654 LEAVE;
2655 return 0;
2656}
2657
2658/**
2659 * ipr_free_dump - Free adapter dump memory
2660 * @ioa_cfg: ioa config struct
2661 *
2662 * Return value:
2663 * 0 on success / other on failure
2664 **/
2665static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2666{
2667 struct ipr_dump *dump;
2668 unsigned long lock_flags = 0;
2669
2670 ENTER;
2671
2672 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2673 dump = ioa_cfg->dump;
2674 if (!dump) {
2675 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2676 return 0;
2677 }
2678
2679 ioa_cfg->dump = NULL;
2680 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2681
2682 kref_put(&dump->kref, ipr_release_dump);
2683
2684 LEAVE;
2685 return 0;
2686}
2687
2688/**
2689 * ipr_write_dump - Setup dump state of adapter
2690 * @kobj: kobject struct
2691 * @buf: buffer
2692 * @off: offset
2693 * @count: buffer size
2694 *
2695 * Return value:
2696 * number of bytes printed to buffer
2697 **/
2698static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2699 loff_t off, size_t count)
2700{
2701 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2702 struct Scsi_Host *shost = class_to_shost(cdev);
2703 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2704 int rc;
2705
2706 if (!capable(CAP_SYS_ADMIN))
2707 return -EACCES;
2708
2709 if (buf[0] == '1')
2710 rc = ipr_alloc_dump(ioa_cfg);
2711 else if (buf[0] == '0')
2712 rc = ipr_free_dump(ioa_cfg);
2713 else
2714 return -EINVAL;
2715
2716 if (rc)
2717 return rc;
2718 else
2719 return count;
2720}
2721
2722static struct bin_attribute ipr_dump_attr = {
2723 .attr = {
2724 .name = "dump",
2725 .mode = S_IRUSR | S_IWUSR,
2726 },
2727 .size = 0,
2728 .read = ipr_read_dump,
2729 .write = ipr_write_dump
2730};
2731#else
2732static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2733#endif
2734
2735/**
2736 * ipr_change_queue_depth - Change the device's queue depth
2737 * @sdev: scsi device struct
2738 * @qdepth: depth to set
2739 *
2740 * Return value:
2741 * actual depth set
2742 **/
2743static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
2744{
2745 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2746 return sdev->queue_depth;
2747}
2748
2749/**
2750 * ipr_change_queue_type - Change the device's queue type
2751 * @dsev: scsi device struct
2752 * @tag_type: type of tags to use
2753 *
2754 * Return value:
2755 * actual queue type set
2756 **/
2757static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
2758{
2759 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2760 struct ipr_resource_entry *res;
2761 unsigned long lock_flags = 0;
2762
2763 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2764 res = (struct ipr_resource_entry *)sdev->hostdata;
2765
2766 if (res) {
2767 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2768 /*
2769 * We don't bother quiescing the device here since the
2770 * adapter firmware does it for us.
2771 */
2772 scsi_set_tag_type(sdev, tag_type);
2773
2774 if (tag_type)
2775 scsi_activate_tcq(sdev, sdev->queue_depth);
2776 else
2777 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2778 } else
2779 tag_type = 0;
2780 } else
2781 tag_type = 0;
2782
2783 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2784 return tag_type;
2785}
2786
2787/**
2788 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2789 * @dev: device struct
2790 * @buf: buffer
2791 *
2792 * Return value:
2793 * number of bytes printed to buffer
2794 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04002795static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796{
2797 struct scsi_device *sdev = to_scsi_device(dev);
2798 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2799 struct ipr_resource_entry *res;
2800 unsigned long lock_flags = 0;
2801 ssize_t len = -ENXIO;
2802
2803 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2804 res = (struct ipr_resource_entry *)sdev->hostdata;
2805 if (res)
2806 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2807 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2808 return len;
2809}
2810
2811static struct device_attribute ipr_adapter_handle_attr = {
2812 .attr = {
2813 .name = "adapter_handle",
2814 .mode = S_IRUSR,
2815 },
2816 .show = ipr_show_adapter_handle
2817};
2818
2819static struct device_attribute *ipr_dev_attrs[] = {
2820 &ipr_adapter_handle_attr,
2821 NULL,
2822};
2823
2824/**
2825 * ipr_biosparam - Return the HSC mapping
2826 * @sdev: scsi device struct
2827 * @block_device: block device pointer
2828 * @capacity: capacity of the device
2829 * @parm: Array containing returned HSC values.
2830 *
2831 * This function generates the HSC parms that fdisk uses.
2832 * We want to make sure we return something that places partitions
2833 * on 4k boundaries for best performance with the IOA.
2834 *
2835 * Return value:
2836 * 0 on success
2837 **/
2838static int ipr_biosparam(struct scsi_device *sdev,
2839 struct block_device *block_device,
2840 sector_t capacity, int *parm)
2841{
2842 int heads, sectors;
2843 sector_t cylinders;
2844
2845 heads = 128;
2846 sectors = 32;
2847
2848 cylinders = capacity;
2849 sector_div(cylinders, (128 * 32));
2850
2851 /* return result */
2852 parm[0] = heads;
2853 parm[1] = sectors;
2854 parm[2] = cylinders;
2855
2856 return 0;
2857}
2858
2859/**
2860 * ipr_slave_destroy - Unconfigure a SCSI device
2861 * @sdev: scsi device struct
2862 *
2863 * Return value:
2864 * nothing
2865 **/
2866static void ipr_slave_destroy(struct scsi_device *sdev)
2867{
2868 struct ipr_resource_entry *res;
2869 struct ipr_ioa_cfg *ioa_cfg;
2870 unsigned long lock_flags = 0;
2871
2872 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2873
2874 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2875 res = (struct ipr_resource_entry *) sdev->hostdata;
2876 if (res) {
2877 sdev->hostdata = NULL;
2878 res->sdev = NULL;
2879 }
2880 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2881}
2882
2883/**
2884 * ipr_slave_configure - Configure a SCSI device
2885 * @sdev: scsi device struct
2886 *
2887 * This function configures the specified scsi device.
2888 *
2889 * Return value:
2890 * 0 on success
2891 **/
2892static int ipr_slave_configure(struct scsi_device *sdev)
2893{
2894 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2895 struct ipr_resource_entry *res;
2896 unsigned long lock_flags = 0;
2897
2898 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2899 res = sdev->hostdata;
2900 if (res) {
2901 if (ipr_is_af_dasd_device(res))
2902 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06002903 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06002905 sdev->no_uld_attach = 1;
2906 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 if (ipr_is_vset_device(res)) {
2908 sdev->timeout = IPR_VSET_RW_TIMEOUT;
2909 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
2910 }
2911 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
2912 sdev->allow_restart = 1;
2913 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
2914 }
2915 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2916 return 0;
2917}
2918
2919/**
2920 * ipr_slave_alloc - Prepare for commands to a device.
2921 * @sdev: scsi device struct
2922 *
2923 * This function saves a pointer to the resource entry
2924 * in the scsi device struct if the device exists. We
2925 * can then use this pointer in ipr_queuecommand when
2926 * handling new commands.
2927 *
2928 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06002929 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 **/
2931static int ipr_slave_alloc(struct scsi_device *sdev)
2932{
2933 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2934 struct ipr_resource_entry *res;
2935 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06002936 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937
2938 sdev->hostdata = NULL;
2939
2940 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2941
2942 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2943 if ((res->cfgte.res_addr.bus == sdev->channel) &&
2944 (res->cfgte.res_addr.target == sdev->id) &&
2945 (res->cfgte.res_addr.lun == sdev->lun)) {
2946 res->sdev = sdev;
2947 res->add_to_ml = 0;
2948 res->in_erp = 0;
2949 sdev->hostdata = res;
2950 res->needs_sync_complete = 1;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06002951 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 break;
2953 }
2954 }
2955
2956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2957
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06002958 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959}
2960
2961/**
2962 * ipr_eh_host_reset - Reset the host adapter
2963 * @scsi_cmd: scsi command struct
2964 *
2965 * Return value:
2966 * SUCCESS / FAILED
2967 **/
Jeff Garzik df0ae242005-05-28 07:57:14 -04002968static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969{
2970 struct ipr_ioa_cfg *ioa_cfg;
2971 int rc;
2972
2973 ENTER;
2974 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2975
2976 dev_err(&ioa_cfg->pdev->dev,
2977 "Adapter being reset as a result of error recovery.\n");
2978
2979 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2980 ioa_cfg->sdt_state = GET_DUMP;
2981
2982 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2983
2984 LEAVE;
2985 return rc;
2986}
2987
Jeff Garzik df0ae242005-05-28 07:57:14 -04002988static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
2989{
2990 int rc;
2991
2992 spin_lock_irq(cmd->device->host->host_lock);
2993 rc = __ipr_eh_host_reset(cmd);
2994 spin_unlock_irq(cmd->device->host->host_lock);
2995
2996 return rc;
2997}
2998
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999/**
3000 * ipr_eh_dev_reset - Reset the device
3001 * @scsi_cmd: scsi command struct
3002 *
3003 * This function issues a device reset to the affected device.
3004 * A LUN reset will be sent to the device first. If that does
3005 * not work, a target reset will be sent.
3006 *
3007 * Return value:
3008 * SUCCESS / FAILED
3009 **/
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04003010static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011{
3012 struct ipr_cmnd *ipr_cmd;
3013 struct ipr_ioa_cfg *ioa_cfg;
3014 struct ipr_resource_entry *res;
3015 struct ipr_cmd_pkt *cmd_pkt;
3016 u32 ioasc;
3017
3018 ENTER;
3019 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3020 res = scsi_cmd->device->hostdata;
3021
3022 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3023 return FAILED;
3024
3025 /*
3026 * If we are currently going through reset/reload, return failed. This will force the
3027 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3028 * reset to complete
3029 */
3030 if (ioa_cfg->in_reset_reload)
3031 return FAILED;
3032 if (ioa_cfg->ioa_is_dead)
3033 return FAILED;
3034
3035 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3036 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3037 if (ipr_cmd->scsi_cmd)
3038 ipr_cmd->done = ipr_scsi_eh_done;
3039 }
3040 }
3041
3042 res->resetting_device = 1;
3043
3044 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3045
3046 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3047 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3048 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3049 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3050
3051 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3052 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3053
3054 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3055
3056 res->resetting_device = 0;
3057
3058 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3059
3060 LEAVE;
3061 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3062}
3063
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04003064static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3065{
3066 int rc;
3067
3068 spin_lock_irq(cmd->device->host->host_lock);
3069 rc = __ipr_eh_dev_reset(cmd);
3070 spin_unlock_irq(cmd->device->host->host_lock);
3071
3072 return rc;
3073}
3074
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075/**
3076 * ipr_bus_reset_done - Op done function for bus reset.
3077 * @ipr_cmd: ipr command struct
3078 *
3079 * This function is the op done function for a bus reset
3080 *
3081 * Return value:
3082 * none
3083 **/
3084static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3085{
3086 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3087 struct ipr_resource_entry *res;
3088
3089 ENTER;
3090 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3091 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3092 sizeof(res->cfgte.res_handle))) {
3093 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3094 break;
3095 }
3096 }
3097
3098 /*
3099 * If abort has not completed, indicate the reset has, else call the
3100 * abort's done function to wake the sleeping eh thread
3101 */
3102 if (ipr_cmd->sibling->sibling)
3103 ipr_cmd->sibling->sibling = NULL;
3104 else
3105 ipr_cmd->sibling->done(ipr_cmd->sibling);
3106
3107 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3108 LEAVE;
3109}
3110
3111/**
3112 * ipr_abort_timeout - An abort task has timed out
3113 * @ipr_cmd: ipr command struct
3114 *
3115 * This function handles when an abort task times out. If this
3116 * happens we issue a bus reset since we have resources tied
3117 * up that must be freed before returning to the midlayer.
3118 *
3119 * Return value:
3120 * none
3121 **/
3122static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3123{
3124 struct ipr_cmnd *reset_cmd;
3125 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3126 struct ipr_cmd_pkt *cmd_pkt;
3127 unsigned long lock_flags = 0;
3128
3129 ENTER;
3130 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3131 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3133 return;
3134 }
3135
3136 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3137 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3138 ipr_cmd->sibling = reset_cmd;
3139 reset_cmd->sibling = ipr_cmd;
3140 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3141 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3142 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3143 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3144 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3145
3146 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3147 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3148 LEAVE;
3149}
3150
3151/**
3152 * ipr_cancel_op - Cancel specified op
3153 * @scsi_cmd: scsi command struct
3154 *
3155 * This function cancels specified op.
3156 *
3157 * Return value:
3158 * SUCCESS / FAILED
3159 **/
3160static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3161{
3162 struct ipr_cmnd *ipr_cmd;
3163 struct ipr_ioa_cfg *ioa_cfg;
3164 struct ipr_resource_entry *res;
3165 struct ipr_cmd_pkt *cmd_pkt;
3166 u32 ioasc;
3167 int op_found = 0;
3168
3169 ENTER;
3170 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3171 res = scsi_cmd->device->hostdata;
3172
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003173 /* If we are currently going through reset/reload, return failed.
3174 * This will force the mid-layer to call ipr_eh_host_reset,
3175 * which will then go to sleep and wait for the reset to complete
3176 */
3177 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3178 return FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3180 return FAILED;
3181
3182 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3183 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3184 ipr_cmd->done = ipr_scsi_eh_done;
3185 op_found = 1;
3186 break;
3187 }
3188 }
3189
3190 if (!op_found)
3191 return SUCCESS;
3192
3193 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3194 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3195 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3196 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3197 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3198 ipr_cmd->u.sdev = scsi_cmd->device;
3199
3200 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3201 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3202 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3203
3204 /*
3205 * If the abort task timed out and we sent a bus reset, we will get
3206 * one the following responses to the abort
3207 */
3208 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3209 ioasc = 0;
3210 ipr_trace;
3211 }
3212
3213 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3214 res->needs_sync_complete = 1;
3215
3216 LEAVE;
3217 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3218}
3219
3220/**
3221 * ipr_eh_abort - Abort a single op
3222 * @scsi_cmd: scsi command struct
3223 *
3224 * Return value:
3225 * SUCCESS / FAILED
3226 **/
3227static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3228{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003229 unsigned long flags;
3230 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231
3232 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003234 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3235 rc = ipr_cancel_op(scsi_cmd);
3236 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237
3238 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003239 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240}
3241
3242/**
3243 * ipr_handle_other_interrupt - Handle "other" interrupts
3244 * @ioa_cfg: ioa config struct
3245 * @int_reg: interrupt register
3246 *
3247 * Return value:
3248 * IRQ_NONE / IRQ_HANDLED
3249 **/
3250static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3251 volatile u32 int_reg)
3252{
3253 irqreturn_t rc = IRQ_HANDLED;
3254
3255 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3256 /* Mask the interrupt */
3257 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3258
3259 /* Clear the interrupt */
3260 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3261 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3262
3263 list_del(&ioa_cfg->reset_cmd->queue);
3264 del_timer(&ioa_cfg->reset_cmd->timer);
3265 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3266 } else {
3267 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3268 ioa_cfg->ioa_unit_checked = 1;
3269 else
3270 dev_err(&ioa_cfg->pdev->dev,
3271 "Permanent IOA failure. 0x%08X\n", int_reg);
3272
3273 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3274 ioa_cfg->sdt_state = GET_DUMP;
3275
3276 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3277 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3278 }
3279
3280 return rc;
3281}
3282
3283/**
3284 * ipr_isr - Interrupt service routine
3285 * @irq: irq number
3286 * @devp: pointer to ioa config struct
3287 * @regs: pt_regs struct
3288 *
3289 * Return value:
3290 * IRQ_NONE / IRQ_HANDLED
3291 **/
3292static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3293{
3294 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3295 unsigned long lock_flags = 0;
3296 volatile u32 int_reg, int_mask_reg;
3297 u32 ioasc;
3298 u16 cmd_index;
3299 struct ipr_cmnd *ipr_cmd;
3300 irqreturn_t rc = IRQ_NONE;
3301
3302 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3303
3304 /* If interrupts are disabled, ignore the interrupt */
3305 if (!ioa_cfg->allow_interrupts) {
3306 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3307 return IRQ_NONE;
3308 }
3309
3310 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3311 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3312
3313 /* If an interrupt on the adapter did not occur, ignore it */
3314 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3315 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3316 return IRQ_NONE;
3317 }
3318
3319 while (1) {
3320 ipr_cmd = NULL;
3321
3322 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3323 ioa_cfg->toggle_bit) {
3324
3325 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3326 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3327
3328 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3329 ioa_cfg->errors_logged++;
3330 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3331
3332 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3333 ioa_cfg->sdt_state = GET_DUMP;
3334
3335 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3336 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3337 return IRQ_HANDLED;
3338 }
3339
3340 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3341
3342 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3343
3344 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3345
3346 list_del(&ipr_cmd->queue);
3347 del_timer(&ipr_cmd->timer);
3348 ipr_cmd->done(ipr_cmd);
3349
3350 rc = IRQ_HANDLED;
3351
3352 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3353 ioa_cfg->hrrq_curr++;
3354 } else {
3355 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3356 ioa_cfg->toggle_bit ^= 1u;
3357 }
3358 }
3359
3360 if (ipr_cmd != NULL) {
3361 /* Clear the PCI interrupt */
3362 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3363 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3364 } else
3365 break;
3366 }
3367
3368 if (unlikely(rc == IRQ_NONE))
3369 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3370
3371 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3372 return rc;
3373}
3374
3375/**
3376 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3377 * @ioa_cfg: ioa config struct
3378 * @ipr_cmd: ipr command struct
3379 *
3380 * Return value:
3381 * 0 on success / -1 on failure
3382 **/
3383static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3384 struct ipr_cmnd *ipr_cmd)
3385{
3386 int i;
3387 struct scatterlist *sglist;
3388 u32 length;
3389 u32 ioadl_flags = 0;
3390 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3391 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3392 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3393
3394 length = scsi_cmd->request_bufflen;
3395
3396 if (length == 0)
3397 return 0;
3398
3399 if (scsi_cmd->use_sg) {
3400 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3401 scsi_cmd->request_buffer,
3402 scsi_cmd->use_sg,
3403 scsi_cmd->sc_data_direction);
3404
3405 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3406 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3407 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3408 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3409 ioarcb->write_ioadl_len =
3410 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3411 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3412 ioadl_flags = IPR_IOADL_FLAGS_READ;
3413 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3414 ioarcb->read_ioadl_len =
3415 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3416 }
3417
3418 sglist = scsi_cmd->request_buffer;
3419
3420 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3421 ioadl[i].flags_and_data_len =
3422 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3423 ioadl[i].address =
3424 cpu_to_be32(sg_dma_address(&sglist[i]));
3425 }
3426
3427 if (likely(ipr_cmd->dma_use_sg)) {
3428 ioadl[i-1].flags_and_data_len |=
3429 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3430 return 0;
3431 } else
3432 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3433 } else {
3434 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3435 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3436 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3437 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3438 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3439 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3440 ioadl_flags = IPR_IOADL_FLAGS_READ;
3441 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3442 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3443 }
3444
3445 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3446 scsi_cmd->request_buffer, length,
3447 scsi_cmd->sc_data_direction);
3448
3449 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3450 ipr_cmd->dma_use_sg = 1;
3451 ioadl[0].flags_and_data_len =
3452 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3453 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3454 return 0;
3455 } else
3456 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3457 }
3458
3459 return -1;
3460}
3461
3462/**
3463 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3464 * @scsi_cmd: scsi command struct
3465 *
3466 * Return value:
3467 * task attributes
3468 **/
3469static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3470{
3471 u8 tag[2];
3472 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3473
3474 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3475 switch (tag[0]) {
3476 case MSG_SIMPLE_TAG:
3477 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3478 break;
3479 case MSG_HEAD_TAG:
3480 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3481 break;
3482 case MSG_ORDERED_TAG:
3483 rc = IPR_FLAGS_LO_ORDERED_TASK;
3484 break;
3485 };
3486 }
3487
3488 return rc;
3489}
3490
3491/**
3492 * ipr_erp_done - Process completion of ERP for a device
3493 * @ipr_cmd: ipr command struct
3494 *
3495 * This function copies the sense buffer into the scsi_cmd
3496 * struct and pushes the scsi_done function.
3497 *
3498 * Return value:
3499 * nothing
3500 **/
3501static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3502{
3503 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3504 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3505 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3506 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3507
3508 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3509 scsi_cmd->result |= (DID_ERROR << 16);
3510 ipr_sdev_err(scsi_cmd->device,
3511 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3512 } else {
3513 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3514 SCSI_SENSE_BUFFERSIZE);
3515 }
3516
3517 if (res) {
3518 res->needs_sync_complete = 1;
3519 res->in_erp = 0;
3520 }
3521 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3522 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3523 scsi_cmd->scsi_done(scsi_cmd);
3524}
3525
3526/**
3527 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3528 * @ipr_cmd: ipr command struct
3529 *
3530 * Return value:
3531 * none
3532 **/
3533static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3534{
3535 struct ipr_ioarcb *ioarcb;
3536 struct ipr_ioasa *ioasa;
3537
3538 ioarcb = &ipr_cmd->ioarcb;
3539 ioasa = &ipr_cmd->ioasa;
3540
3541 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3542 ioarcb->write_data_transfer_length = 0;
3543 ioarcb->read_data_transfer_length = 0;
3544 ioarcb->write_ioadl_len = 0;
3545 ioarcb->read_ioadl_len = 0;
3546 ioasa->ioasc = 0;
3547 ioasa->residual_data_len = 0;
3548}
3549
3550/**
3551 * ipr_erp_request_sense - Send request sense to a device
3552 * @ipr_cmd: ipr command struct
3553 *
3554 * This function sends a request sense to a device as a result
3555 * of a check condition.
3556 *
3557 * Return value:
3558 * nothing
3559 **/
3560static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3561{
3562 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3563 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3564
3565 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3566 ipr_erp_done(ipr_cmd);
3567 return;
3568 }
3569
3570 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3571
3572 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3573 cmd_pkt->cdb[0] = REQUEST_SENSE;
3574 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3575 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3576 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3577 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3578
3579 ipr_cmd->ioadl[0].flags_and_data_len =
3580 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3581 ipr_cmd->ioadl[0].address =
3582 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3583
3584 ipr_cmd->ioarcb.read_ioadl_len =
3585 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3586 ipr_cmd->ioarcb.read_data_transfer_length =
3587 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3588
3589 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3590 IPR_REQUEST_SENSE_TIMEOUT * 2);
3591}
3592
3593/**
3594 * ipr_erp_cancel_all - Send cancel all to a device
3595 * @ipr_cmd: ipr command struct
3596 *
3597 * This function sends a cancel all to a device to clear the
3598 * queue. If we are running TCQ on the device, QERR is set to 1,
3599 * which means all outstanding ops have been dropped on the floor.
3600 * Cancel all will return them to us.
3601 *
3602 * Return value:
3603 * nothing
3604 **/
3605static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3606{
3607 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3608 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3609 struct ipr_cmd_pkt *cmd_pkt;
3610
3611 res->in_erp = 1;
3612
3613 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3614
3615 if (!scsi_get_tag_type(scsi_cmd->device)) {
3616 ipr_erp_request_sense(ipr_cmd);
3617 return;
3618 }
3619
3620 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3621 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3622 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3623
3624 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3625 IPR_CANCEL_ALL_TIMEOUT);
3626}
3627
3628/**
3629 * ipr_dump_ioasa - Dump contents of IOASA
3630 * @ioa_cfg: ioa config struct
3631 * @ipr_cmd: ipr command struct
3632 *
3633 * This function is invoked by the interrupt handler when ops
3634 * fail. It will log the IOASA if appropriate. Only called
3635 * for GPDD ops.
3636 *
3637 * Return value:
3638 * none
3639 **/
3640static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3641 struct ipr_cmnd *ipr_cmd)
3642{
3643 int i;
3644 u16 data_len;
3645 u32 ioasc;
3646 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3647 __be32 *ioasa_data = (__be32 *)ioasa;
3648 int error_index;
3649
3650 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3651
3652 if (0 == ioasc)
3653 return;
3654
3655 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3656 return;
3657
3658 error_index = ipr_get_error(ioasc);
3659
3660 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3661 /* Don't log an error if the IOA already logged one */
3662 if (ioasa->ilid != 0)
3663 return;
3664
3665 if (ipr_error_table[error_index].log_ioasa == 0)
3666 return;
3667 }
3668
3669 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3670 ipr_error_table[error_index].error);
3671
3672 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3673 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3674 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3675 "Device End state: %s Phase: %s\n",
3676 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3677 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3678 }
3679
3680 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3681 data_len = sizeof(struct ipr_ioasa);
3682 else
3683 data_len = be16_to_cpu(ioasa->ret_stat_len);
3684
3685 ipr_err("IOASA Dump:\n");
3686
3687 for (i = 0; i < data_len / 4; i += 4) {
3688 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3689 be32_to_cpu(ioasa_data[i]),
3690 be32_to_cpu(ioasa_data[i+1]),
3691 be32_to_cpu(ioasa_data[i+2]),
3692 be32_to_cpu(ioasa_data[i+3]));
3693 }
3694}
3695
3696/**
3697 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3698 * @ioasa: IOASA
3699 * @sense_buf: sense data buffer
3700 *
3701 * Return value:
3702 * none
3703 **/
3704static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3705{
3706 u32 failing_lba;
3707 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3708 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3709 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3710 u32 ioasc = be32_to_cpu(ioasa->ioasc);
3711
3712 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3713
3714 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3715 return;
3716
3717 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3718
3719 if (ipr_is_vset_device(res) &&
3720 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3721 ioasa->u.vset.failing_lba_hi != 0) {
3722 sense_buf[0] = 0x72;
3723 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3724 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3725 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3726
3727 sense_buf[7] = 12;
3728 sense_buf[8] = 0;
3729 sense_buf[9] = 0x0A;
3730 sense_buf[10] = 0x80;
3731
3732 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3733
3734 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3735 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3736 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3737 sense_buf[15] = failing_lba & 0x000000ff;
3738
3739 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3740
3741 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3742 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3743 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3744 sense_buf[19] = failing_lba & 0x000000ff;
3745 } else {
3746 sense_buf[0] = 0x70;
3747 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3748 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3749 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3750
3751 /* Illegal request */
3752 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3753 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3754 sense_buf[7] = 10; /* additional length */
3755
3756 /* IOARCB was in error */
3757 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3758 sense_buf[15] = 0xC0;
3759 else /* Parameter data was invalid */
3760 sense_buf[15] = 0x80;
3761
3762 sense_buf[16] =
3763 ((IPR_FIELD_POINTER_MASK &
3764 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3765 sense_buf[17] =
3766 (IPR_FIELD_POINTER_MASK &
3767 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3768 } else {
3769 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3770 if (ipr_is_vset_device(res))
3771 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3772 else
3773 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3774
3775 sense_buf[0] |= 0x80; /* Or in the Valid bit */
3776 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3777 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3778 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3779 sense_buf[6] = failing_lba & 0x000000ff;
3780 }
3781
3782 sense_buf[7] = 6; /* additional length */
3783 }
3784 }
3785}
3786
3787/**
3788 * ipr_erp_start - Process an error response for a SCSI op
3789 * @ioa_cfg: ioa config struct
3790 * @ipr_cmd: ipr command struct
3791 *
3792 * This function determines whether or not to initiate ERP
3793 * on the affected device.
3794 *
3795 * Return value:
3796 * nothing
3797 **/
3798static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3799 struct ipr_cmnd *ipr_cmd)
3800{
3801 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3802 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3803 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3804
3805 if (!res) {
3806 ipr_scsi_eh_done(ipr_cmd);
3807 return;
3808 }
3809
3810 if (ipr_is_gscsi(res))
3811 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3812 else
3813 ipr_gen_sense(ipr_cmd);
3814
3815 switch (ioasc & IPR_IOASC_IOASC_MASK) {
3816 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3817 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3818 break;
3819 case IPR_IOASC_IR_RESOURCE_HANDLE:
3820 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3821 break;
3822 case IPR_IOASC_HW_SEL_TIMEOUT:
3823 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3824 res->needs_sync_complete = 1;
3825 break;
3826 case IPR_IOASC_SYNC_REQUIRED:
3827 if (!res->in_erp)
3828 res->needs_sync_complete = 1;
3829 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3830 break;
3831 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3832 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3833 break;
3834 case IPR_IOASC_BUS_WAS_RESET:
3835 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3836 /*
3837 * Report the bus reset and ask for a retry. The device
3838 * will give CC/UA the next command.
3839 */
3840 if (!res->resetting_device)
3841 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3842 scsi_cmd->result |= (DID_ERROR << 16);
3843 res->needs_sync_complete = 1;
3844 break;
3845 case IPR_IOASC_HW_DEV_BUS_STATUS:
3846 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3847 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3848 ipr_erp_cancel_all(ipr_cmd);
3849 return;
3850 }
3851 res->needs_sync_complete = 1;
3852 break;
3853 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3854 break;
3855 default:
3856 scsi_cmd->result |= (DID_ERROR << 16);
3857 if (!ipr_is_vset_device(res))
3858 res->needs_sync_complete = 1;
3859 break;
3860 }
3861
3862 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3863 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3864 scsi_cmd->scsi_done(scsi_cmd);
3865}
3866
3867/**
3868 * ipr_scsi_done - mid-layer done function
3869 * @ipr_cmd: ipr command struct
3870 *
3871 * This function is invoked by the interrupt handler for
3872 * ops generated by the SCSI mid-layer
3873 *
3874 * Return value:
3875 * none
3876 **/
3877static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3878{
3879 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3880 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3881 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3882
3883 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
3884
3885 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3886 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3887 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3888 scsi_cmd->scsi_done(scsi_cmd);
3889 } else
3890 ipr_erp_start(ioa_cfg, ipr_cmd);
3891}
3892
3893/**
3894 * ipr_save_ioafp_mode_select - Save adapters mode select data
3895 * @ioa_cfg: ioa config struct
3896 * @scsi_cmd: scsi command struct
3897 *
3898 * This function saves mode select data for the adapter to
3899 * use following an adapter reset.
3900 *
3901 * Return value:
3902 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
3903 **/
3904static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
3905 struct scsi_cmnd *scsi_cmd)
3906{
3907 if (!ioa_cfg->saved_mode_pages) {
3908 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
3909 GFP_ATOMIC);
3910 if (!ioa_cfg->saved_mode_pages) {
3911 dev_err(&ioa_cfg->pdev->dev,
3912 "IOA mode select buffer allocation failed\n");
3913 return SCSI_MLQUEUE_HOST_BUSY;
3914 }
3915 }
3916
3917 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
3918 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
3919 return 0;
3920}
3921
3922/**
3923 * ipr_queuecommand - Queue a mid-layer request
3924 * @scsi_cmd: scsi command struct
3925 * @done: done function
3926 *
3927 * This function queues a request generated by the mid-layer.
3928 *
3929 * Return value:
3930 * 0 on success
3931 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3932 * SCSI_MLQUEUE_HOST_BUSY if host is busy
3933 **/
3934static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
3935 void (*done) (struct scsi_cmnd *))
3936{
3937 struct ipr_ioa_cfg *ioa_cfg;
3938 struct ipr_resource_entry *res;
3939 struct ipr_ioarcb *ioarcb;
3940 struct ipr_cmnd *ipr_cmd;
3941 int rc = 0;
3942
3943 scsi_cmd->scsi_done = done;
3944 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3945 res = scsi_cmd->device->hostdata;
3946 scsi_cmd->result = (DID_OK << 16);
3947
3948 /*
3949 * We are currently blocking all devices due to a host reset
3950 * We have told the host to stop giving us new requests, but
3951 * ERP ops don't count. FIXME
3952 */
3953 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
3954 return SCSI_MLQUEUE_HOST_BUSY;
3955
3956 /*
3957 * FIXME - Create scsi_set_host_offline interface
3958 * and the ioa_is_dead check can be removed
3959 */
3960 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
3961 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3962 scsi_cmd->result = (DID_NO_CONNECT << 16);
3963 scsi_cmd->scsi_done(scsi_cmd);
3964 return 0;
3965 }
3966
3967 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3968 ioarcb = &ipr_cmd->ioarcb;
3969 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
3970
3971 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3972 ipr_cmd->scsi_cmd = scsi_cmd;
3973 ioarcb->res_handle = res->cfgte.res_handle;
3974 ipr_cmd->done = ipr_scsi_done;
3975 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
3976
3977 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
3978 if (scsi_cmd->underflow == 0)
3979 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3980
3981 if (res->needs_sync_complete) {
3982 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
3983 res->needs_sync_complete = 0;
3984 }
3985
3986 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
3987 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
3988 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
3989 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
3990 }
3991
3992 if (scsi_cmd->cmnd[0] >= 0xC0 &&
3993 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
3994 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
3995
3996 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
3997 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
3998
3999 if (likely(rc == 0))
4000 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4001
4002 if (likely(rc == 0)) {
4003 mb();
4004 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4005 ioa_cfg->regs.ioarrin_reg);
4006 } else {
4007 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4008 return SCSI_MLQUEUE_HOST_BUSY;
4009 }
4010
4011 return 0;
4012}
4013
4014/**
4015 * ipr_info - Get information about the card/driver
4016 * @scsi_host: scsi host struct
4017 *
4018 * Return value:
4019 * pointer to buffer with description string
4020 **/
4021static const char * ipr_ioa_info(struct Scsi_Host *host)
4022{
4023 static char buffer[512];
4024 struct ipr_ioa_cfg *ioa_cfg;
4025 unsigned long lock_flags = 0;
4026
4027 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4028
4029 spin_lock_irqsave(host->host_lock, lock_flags);
4030 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4031 spin_unlock_irqrestore(host->host_lock, lock_flags);
4032
4033 return buffer;
4034}
4035
4036static struct scsi_host_template driver_template = {
4037 .module = THIS_MODULE,
4038 .name = "IPR",
4039 .info = ipr_ioa_info,
4040 .queuecommand = ipr_queuecommand,
4041 .eh_abort_handler = ipr_eh_abort,
4042 .eh_device_reset_handler = ipr_eh_dev_reset,
4043 .eh_host_reset_handler = ipr_eh_host_reset,
4044 .slave_alloc = ipr_slave_alloc,
4045 .slave_configure = ipr_slave_configure,
4046 .slave_destroy = ipr_slave_destroy,
4047 .change_queue_depth = ipr_change_queue_depth,
4048 .change_queue_type = ipr_change_queue_type,
4049 .bios_param = ipr_biosparam,
4050 .can_queue = IPR_MAX_COMMANDS,
4051 .this_id = -1,
4052 .sg_tablesize = IPR_MAX_SGLIST,
4053 .max_sectors = IPR_IOA_MAX_SECTORS,
4054 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4055 .use_clustering = ENABLE_CLUSTERING,
4056 .shost_attrs = ipr_ioa_attrs,
4057 .sdev_attrs = ipr_dev_attrs,
4058 .proc_name = IPR_NAME
4059};
4060
4061#ifdef CONFIG_PPC_PSERIES
4062static const u16 ipr_blocked_processors[] = {
4063 PV_NORTHSTAR,
4064 PV_PULSAR,
4065 PV_POWER4,
4066 PV_ICESTAR,
4067 PV_SSTAR,
4068 PV_POWER4p,
4069 PV_630,
4070 PV_630p
4071};
4072
4073/**
4074 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4075 * @ioa_cfg: ioa cfg struct
4076 *
4077 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4078 * certain pSeries hardware. This function determines if the given
4079 * adapter is in one of these confgurations or not.
4080 *
4081 * Return value:
4082 * 1 if adapter is not supported / 0 if adapter is supported
4083 **/
4084static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4085{
4086 u8 rev_id;
4087 int i;
4088
4089 if (ioa_cfg->type == 0x5702) {
4090 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4091 &rev_id) == PCIBIOS_SUCCESSFUL) {
4092 if (rev_id < 4) {
4093 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4094 if (__is_processor(ipr_blocked_processors[i]))
4095 return 1;
4096 }
4097 }
4098 }
4099 }
4100 return 0;
4101}
4102#else
4103#define ipr_invalid_adapter(ioa_cfg) 0
4104#endif
4105
4106/**
4107 * ipr_ioa_bringdown_done - IOA bring down completion.
4108 * @ipr_cmd: ipr command struct
4109 *
4110 * This function processes the completion of an adapter bring down.
4111 * It wakes any reset sleepers.
4112 *
4113 * Return value:
4114 * IPR_RC_JOB_RETURN
4115 **/
4116static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4117{
4118 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4119
4120 ENTER;
4121 ioa_cfg->in_reset_reload = 0;
4122 ioa_cfg->reset_retries = 0;
4123 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4124 wake_up_all(&ioa_cfg->reset_wait_q);
4125
4126 spin_unlock_irq(ioa_cfg->host->host_lock);
4127 scsi_unblock_requests(ioa_cfg->host);
4128 spin_lock_irq(ioa_cfg->host->host_lock);
4129 LEAVE;
4130
4131 return IPR_RC_JOB_RETURN;
4132}
4133
4134/**
4135 * ipr_ioa_reset_done - IOA reset completion.
4136 * @ipr_cmd: ipr command struct
4137 *
4138 * This function processes the completion of an adapter reset.
4139 * It schedules any necessary mid-layer add/removes and
4140 * wakes any reset sleepers.
4141 *
4142 * Return value:
4143 * IPR_RC_JOB_RETURN
4144 **/
4145static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4146{
4147 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4148 struct ipr_resource_entry *res;
4149 struct ipr_hostrcb *hostrcb, *temp;
4150 int i = 0;
4151
4152 ENTER;
4153 ioa_cfg->in_reset_reload = 0;
4154 ioa_cfg->allow_cmds = 1;
4155 ioa_cfg->reset_cmd = NULL;
4156
4157 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4158 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4159 ipr_trace;
4160 break;
4161 }
4162 }
4163 schedule_work(&ioa_cfg->work_q);
4164
4165 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4166 list_del(&hostrcb->queue);
4167 if (i++ < IPR_NUM_LOG_HCAMS)
4168 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4169 else
4170 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4171 }
4172
4173 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4174
4175 ioa_cfg->reset_retries = 0;
4176 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4177 wake_up_all(&ioa_cfg->reset_wait_q);
4178
4179 spin_unlock_irq(ioa_cfg->host->host_lock);
4180 scsi_unblock_requests(ioa_cfg->host);
4181 spin_lock_irq(ioa_cfg->host->host_lock);
4182
4183 if (!ioa_cfg->allow_cmds)
4184 scsi_block_requests(ioa_cfg->host);
4185
4186 LEAVE;
4187 return IPR_RC_JOB_RETURN;
4188}
4189
4190/**
4191 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4192 * @supported_dev: supported device struct
4193 * @vpids: vendor product id struct
4194 *
4195 * Return value:
4196 * none
4197 **/
4198static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4199 struct ipr_std_inq_vpids *vpids)
4200{
4201 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4202 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4203 supported_dev->num_records = 1;
4204 supported_dev->data_length =
4205 cpu_to_be16(sizeof(struct ipr_supported_device));
4206 supported_dev->reserved = 0;
4207}
4208
4209/**
4210 * ipr_set_supported_devs - Send Set Supported Devices for a device
4211 * @ipr_cmd: ipr command struct
4212 *
4213 * This function send a Set Supported Devices to the adapter
4214 *
4215 * Return value:
4216 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4217 **/
4218static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4219{
4220 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4221 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4222 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4223 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4224 struct ipr_resource_entry *res = ipr_cmd->u.res;
4225
4226 ipr_cmd->job_step = ipr_ioa_reset_done;
4227
4228 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
brking@us.ibm.comd0ad6f52005-11-01 17:00:54 -06004229 if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230 continue;
4231
4232 ipr_cmd->u.res = res;
4233 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4234
4235 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4236 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4237 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4238
4239 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4240 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4241 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4242
4243 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4244 sizeof(struct ipr_supported_device));
4245 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4246 offsetof(struct ipr_misc_cbs, supp_dev));
4247 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4248 ioarcb->write_data_transfer_length =
4249 cpu_to_be32(sizeof(struct ipr_supported_device));
4250
4251 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4252 IPR_SET_SUP_DEVICE_TIMEOUT);
4253
4254 ipr_cmd->job_step = ipr_set_supported_devs;
4255 return IPR_RC_JOB_RETURN;
4256 }
4257
4258 return IPR_RC_JOB_CONTINUE;
4259}
4260
4261/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06004262 * ipr_setup_write_cache - Disable write cache if needed
4263 * @ipr_cmd: ipr command struct
4264 *
4265 * This function sets up adapters write cache to desired setting
4266 *
4267 * Return value:
4268 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4269 **/
4270static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4271{
4272 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4273
4274 ipr_cmd->job_step = ipr_set_supported_devs;
4275 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4276 struct ipr_resource_entry, queue);
4277
4278 if (ioa_cfg->cache_state != CACHE_DISABLED)
4279 return IPR_RC_JOB_CONTINUE;
4280
4281 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4282 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4283 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4284 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4285
4286 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4287
4288 return IPR_RC_JOB_RETURN;
4289}
4290
4291/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292 * ipr_get_mode_page - Locate specified mode page
4293 * @mode_pages: mode page buffer
4294 * @page_code: page code to find
4295 * @len: minimum required length for mode page
4296 *
4297 * Return value:
4298 * pointer to mode page / NULL on failure
4299 **/
4300static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4301 u32 page_code, u32 len)
4302{
4303 struct ipr_mode_page_hdr *mode_hdr;
4304 u32 page_length;
4305 u32 length;
4306
4307 if (!mode_pages || (mode_pages->hdr.length == 0))
4308 return NULL;
4309
4310 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4311 mode_hdr = (struct ipr_mode_page_hdr *)
4312 (mode_pages->data + mode_pages->hdr.block_desc_len);
4313
4314 while (length) {
4315 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4316 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4317 return mode_hdr;
4318 break;
4319 } else {
4320 page_length = (sizeof(struct ipr_mode_page_hdr) +
4321 mode_hdr->page_length);
4322 length -= page_length;
4323 mode_hdr = (struct ipr_mode_page_hdr *)
4324 ((unsigned long)mode_hdr + page_length);
4325 }
4326 }
4327 return NULL;
4328}
4329
4330/**
4331 * ipr_check_term_power - Check for term power errors
4332 * @ioa_cfg: ioa config struct
4333 * @mode_pages: IOAFP mode pages buffer
4334 *
4335 * Check the IOAFP's mode page 28 for term power errors
4336 *
4337 * Return value:
4338 * nothing
4339 **/
4340static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4341 struct ipr_mode_pages *mode_pages)
4342{
4343 int i;
4344 int entry_length;
4345 struct ipr_dev_bus_entry *bus;
4346 struct ipr_mode_page28 *mode_page;
4347
4348 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4349 sizeof(struct ipr_mode_page28));
4350
4351 entry_length = mode_page->entry_length;
4352
4353 bus = mode_page->bus;
4354
4355 for (i = 0; i < mode_page->num_entries; i++) {
4356 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4357 dev_err(&ioa_cfg->pdev->dev,
4358 "Term power is absent on scsi bus %d\n",
4359 bus->res_addr.bus);
4360 }
4361
4362 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4363 }
4364}
4365
4366/**
4367 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4368 * @ioa_cfg: ioa config struct
4369 *
4370 * Looks through the config table checking for SES devices. If
4371 * the SES device is in the SES table indicating a maximum SCSI
4372 * bus speed, the speed is limited for the bus.
4373 *
4374 * Return value:
4375 * none
4376 **/
4377static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4378{
4379 u32 max_xfer_rate;
4380 int i;
4381
4382 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4383 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4384 ioa_cfg->bus_attr[i].bus_width);
4385
4386 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4387 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4388 }
4389}
4390
4391/**
4392 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4393 * @ioa_cfg: ioa config struct
4394 * @mode_pages: mode page 28 buffer
4395 *
4396 * Updates mode page 28 based on driver configuration
4397 *
4398 * Return value:
4399 * none
4400 **/
4401static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4402 struct ipr_mode_pages *mode_pages)
4403{
4404 int i, entry_length;
4405 struct ipr_dev_bus_entry *bus;
4406 struct ipr_bus_attributes *bus_attr;
4407 struct ipr_mode_page28 *mode_page;
4408
4409 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4410 sizeof(struct ipr_mode_page28));
4411
4412 entry_length = mode_page->entry_length;
4413
4414 /* Loop for each device bus entry */
4415 for (i = 0, bus = mode_page->bus;
4416 i < mode_page->num_entries;
4417 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4418 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4419 dev_err(&ioa_cfg->pdev->dev,
4420 "Invalid resource address reported: 0x%08X\n",
4421 IPR_GET_PHYS_LOC(bus->res_addr));
4422 continue;
4423 }
4424
4425 bus_attr = &ioa_cfg->bus_attr[i];
4426 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4427 bus->bus_width = bus_attr->bus_width;
4428 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4429 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4430 if (bus_attr->qas_enabled)
4431 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4432 else
4433 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4434 }
4435}
4436
4437/**
4438 * ipr_build_mode_select - Build a mode select command
4439 * @ipr_cmd: ipr command struct
4440 * @res_handle: resource handle to send command to
4441 * @parm: Byte 2 of Mode Sense command
4442 * @dma_addr: DMA buffer address
4443 * @xfer_len: data transfer length
4444 *
4445 * Return value:
4446 * none
4447 **/
4448static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4449 __be32 res_handle, u8 parm, u32 dma_addr,
4450 u8 xfer_len)
4451{
4452 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4453 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4454
4455 ioarcb->res_handle = res_handle;
4456 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4457 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4458 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4459 ioarcb->cmd_pkt.cdb[1] = parm;
4460 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4461
4462 ioadl->flags_and_data_len =
4463 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4464 ioadl->address = cpu_to_be32(dma_addr);
4465 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4466 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4467}
4468
4469/**
4470 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4471 * @ipr_cmd: ipr command struct
4472 *
4473 * This function sets up the SCSI bus attributes and sends
4474 * a Mode Select for Page 28 to activate them.
4475 *
4476 * Return value:
4477 * IPR_RC_JOB_RETURN
4478 **/
4479static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4480{
4481 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4482 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4483 int length;
4484
4485 ENTER;
4486 if (ioa_cfg->saved_mode_pages) {
4487 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4488 ioa_cfg->saved_mode_page_len);
4489 length = ioa_cfg->saved_mode_page_len;
4490 } else {
4491 ipr_scsi_bus_speed_limit(ioa_cfg);
4492 ipr_check_term_power(ioa_cfg, mode_pages);
4493 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4494 length = mode_pages->hdr.length + 1;
4495 mode_pages->hdr.length = 0;
4496 }
4497
4498 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4499 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4500 length);
4501
brking@us.ibm.com62275042005-11-01 17:01:14 -06004502 ipr_cmd->job_step = ipr_setup_write_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4504
4505 LEAVE;
4506 return IPR_RC_JOB_RETURN;
4507}
4508
4509/**
4510 * ipr_build_mode_sense - Builds a mode sense command
4511 * @ipr_cmd: ipr command struct
4512 * @res: resource entry struct
4513 * @parm: Byte 2 of mode sense command
4514 * @dma_addr: DMA address of mode sense buffer
4515 * @xfer_len: Size of DMA buffer
4516 *
4517 * Return value:
4518 * none
4519 **/
4520static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4521 __be32 res_handle,
4522 u8 parm, u32 dma_addr, u8 xfer_len)
4523{
4524 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4525 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4526
4527 ioarcb->res_handle = res_handle;
4528 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4529 ioarcb->cmd_pkt.cdb[2] = parm;
4530 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4531 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4532
4533 ioadl->flags_and_data_len =
4534 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4535 ioadl->address = cpu_to_be32(dma_addr);
4536 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4537 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4538}
4539
4540/**
4541 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4542 * @ipr_cmd: ipr command struct
4543 *
4544 * This function send a Page 28 mode sense to the IOA to
4545 * retrieve SCSI bus attributes.
4546 *
4547 * Return value:
4548 * IPR_RC_JOB_RETURN
4549 **/
4550static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4551{
4552 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4553
4554 ENTER;
4555 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4556 0x28, ioa_cfg->vpd_cbs_dma +
4557 offsetof(struct ipr_misc_cbs, mode_pages),
4558 sizeof(struct ipr_mode_pages));
4559
4560 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4561
4562 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4563
4564 LEAVE;
4565 return IPR_RC_JOB_RETURN;
4566}
4567
4568/**
4569 * ipr_init_res_table - Initialize the resource table
4570 * @ipr_cmd: ipr command struct
4571 *
4572 * This function looks through the existing resource table, comparing
4573 * it with the config table. This function will take care of old/new
4574 * devices and schedule adding/removing them from the mid-layer
4575 * as appropriate.
4576 *
4577 * Return value:
4578 * IPR_RC_JOB_CONTINUE
4579 **/
4580static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4581{
4582 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4583 struct ipr_resource_entry *res, *temp;
4584 struct ipr_config_table_entry *cfgte;
4585 int found, i;
4586 LIST_HEAD(old_res);
4587
4588 ENTER;
4589 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4590 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4591
4592 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4593 list_move_tail(&res->queue, &old_res);
4594
4595 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4596 cfgte = &ioa_cfg->cfg_table->dev[i];
4597 found = 0;
4598
4599 list_for_each_entry_safe(res, temp, &old_res, queue) {
4600 if (!memcmp(&res->cfgte.res_addr,
4601 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4602 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4603 found = 1;
4604 break;
4605 }
4606 }
4607
4608 if (!found) {
4609 if (list_empty(&ioa_cfg->free_res_q)) {
4610 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4611 break;
4612 }
4613
4614 found = 1;
4615 res = list_entry(ioa_cfg->free_res_q.next,
4616 struct ipr_resource_entry, queue);
4617 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4618 ipr_init_res_entry(res);
4619 res->add_to_ml = 1;
4620 }
4621
4622 if (found)
4623 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4624 }
4625
4626 list_for_each_entry_safe(res, temp, &old_res, queue) {
4627 if (res->sdev) {
4628 res->del_from_ml = 1;
4629 res->sdev->hostdata = NULL;
4630 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4631 } else {
4632 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4633 }
4634 }
4635
4636 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4637
4638 LEAVE;
4639 return IPR_RC_JOB_CONTINUE;
4640}
4641
4642/**
4643 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4644 * @ipr_cmd: ipr command struct
4645 *
4646 * This function sends a Query IOA Configuration command
4647 * to the adapter to retrieve the IOA configuration table.
4648 *
4649 * Return value:
4650 * IPR_RC_JOB_RETURN
4651 **/
4652static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4653{
4654 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4655 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4656 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4657 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4658
4659 ENTER;
4660 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4661 ucode_vpd->major_release, ucode_vpd->card_type,
4662 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4663 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4664 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4665
4666 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4667 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4668 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4669
4670 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4671 ioarcb->read_data_transfer_length =
4672 cpu_to_be32(sizeof(struct ipr_config_table));
4673
4674 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4675 ioadl->flags_and_data_len =
4676 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4677
4678 ipr_cmd->job_step = ipr_init_res_table;
4679
4680 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4681
4682 LEAVE;
4683 return IPR_RC_JOB_RETURN;
4684}
4685
4686/**
4687 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4688 * @ipr_cmd: ipr command struct
4689 *
4690 * This utility function sends an inquiry to the adapter.
4691 *
4692 * Return value:
4693 * none
4694 **/
4695static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4696 u32 dma_addr, u8 xfer_len)
4697{
4698 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4699 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4700
4701 ENTER;
4702 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4703 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4704
4705 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4706 ioarcb->cmd_pkt.cdb[1] = flags;
4707 ioarcb->cmd_pkt.cdb[2] = page;
4708 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4709
4710 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4711 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4712
4713 ioadl->address = cpu_to_be32(dma_addr);
4714 ioadl->flags_and_data_len =
4715 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4716
4717 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4718 LEAVE;
4719}
4720
4721/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06004722 * ipr_inquiry_page_supported - Is the given inquiry page supported
4723 * @page0: inquiry page 0 buffer
4724 * @page: page code.
4725 *
4726 * This function determines if the specified inquiry page is supported.
4727 *
4728 * Return value:
4729 * 1 if page is supported / 0 if not
4730 **/
4731static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
4732{
4733 int i;
4734
4735 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
4736 if (page0->page[i] == page)
4737 return 1;
4738
4739 return 0;
4740}
4741
4742/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4744 * @ipr_cmd: ipr command struct
4745 *
4746 * This function sends a Page 3 inquiry to the adapter
4747 * to retrieve software VPD information.
4748 *
4749 * Return value:
4750 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4751 **/
4752static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
4753{
4754 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06004755 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
4756
4757 ENTER;
4758
4759 if (!ipr_inquiry_page_supported(page0, 1))
4760 ioa_cfg->cache_state = CACHE_NONE;
4761
4762 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4763
4764 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4765 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4766 sizeof(struct ipr_inquiry_page3));
4767
4768 LEAVE;
4769 return IPR_RC_JOB_RETURN;
4770}
4771
4772/**
4773 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
4774 * @ipr_cmd: ipr command struct
4775 *
4776 * This function sends a Page 0 inquiry to the adapter
4777 * to retrieve supported inquiry pages.
4778 *
4779 * Return value:
4780 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4781 **/
4782static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
4783{
4784 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785 char type[5];
4786
4787 ENTER;
4788
4789 /* Grab the type out of the VPD and store it away */
4790 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4791 type[4] = '\0';
4792 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4793
brking@us.ibm.com62275042005-11-01 17:01:14 -06004794 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795
brking@us.ibm.com62275042005-11-01 17:01:14 -06004796 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
4797 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
4798 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004799
4800 LEAVE;
4801 return IPR_RC_JOB_RETURN;
4802}
4803
4804/**
4805 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4806 * @ipr_cmd: ipr command struct
4807 *
4808 * This function sends a standard inquiry to the adapter.
4809 *
4810 * Return value:
4811 * IPR_RC_JOB_RETURN
4812 **/
4813static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4814{
4815 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4816
4817 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06004818 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004819
4820 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4821 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4822 sizeof(struct ipr_ioa_vpd));
4823
4824 LEAVE;
4825 return IPR_RC_JOB_RETURN;
4826}
4827
4828/**
4829 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4830 * @ipr_cmd: ipr command struct
4831 *
4832 * This function send an Identify Host Request Response Queue
4833 * command to establish the HRRQ with the adapter.
4834 *
4835 * Return value:
4836 * IPR_RC_JOB_RETURN
4837 **/
4838static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4839{
4840 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4841 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4842
4843 ENTER;
4844 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4845
4846 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4847 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4848
4849 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4850 ioarcb->cmd_pkt.cdb[2] =
4851 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4852 ioarcb->cmd_pkt.cdb[3] =
4853 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4854 ioarcb->cmd_pkt.cdb[4] =
4855 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4856 ioarcb->cmd_pkt.cdb[5] =
4857 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
4858 ioarcb->cmd_pkt.cdb[7] =
4859 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4860 ioarcb->cmd_pkt.cdb[8] =
4861 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4862
4863 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4864
4865 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4866
4867 LEAVE;
4868 return IPR_RC_JOB_RETURN;
4869}
4870
4871/**
4872 * ipr_reset_timer_done - Adapter reset timer function
4873 * @ipr_cmd: ipr command struct
4874 *
4875 * Description: This function is used in adapter reset processing
4876 * for timing events. If the reset_cmd pointer in the IOA
4877 * config struct is not this adapter's we are doing nested
4878 * resets and fail_all_ops will take care of freeing the
4879 * command block.
4880 *
4881 * Return value:
4882 * none
4883 **/
4884static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
4885{
4886 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4887 unsigned long lock_flags = 0;
4888
4889 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4890
4891 if (ioa_cfg->reset_cmd == ipr_cmd) {
4892 list_del(&ipr_cmd->queue);
4893 ipr_cmd->done(ipr_cmd);
4894 }
4895
4896 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4897}
4898
4899/**
4900 * ipr_reset_start_timer - Start a timer for adapter reset job
4901 * @ipr_cmd: ipr command struct
4902 * @timeout: timeout value
4903 *
4904 * Description: This function is used in adapter reset processing
4905 * for timing events. If the reset_cmd pointer in the IOA
4906 * config struct is not this adapter's we are doing nested
4907 * resets and fail_all_ops will take care of freeing the
4908 * command block.
4909 *
4910 * Return value:
4911 * none
4912 **/
4913static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
4914 unsigned long timeout)
4915{
4916 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
4917 ipr_cmd->done = ipr_reset_ioa_job;
4918
4919 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4920 ipr_cmd->timer.expires = jiffies + timeout;
4921 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
4922 add_timer(&ipr_cmd->timer);
4923}
4924
4925/**
4926 * ipr_init_ioa_mem - Initialize ioa_cfg control block
4927 * @ioa_cfg: ioa cfg struct
4928 *
4929 * Return value:
4930 * nothing
4931 **/
4932static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
4933{
4934 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
4935
4936 /* Initialize Host RRQ pointers */
4937 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
4938 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
4939 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4940 ioa_cfg->toggle_bit = 1;
4941
4942 /* Zero out config table */
4943 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
4944}
4945
4946/**
4947 * ipr_reset_enable_ioa - Enable the IOA following a reset.
4948 * @ipr_cmd: ipr command struct
4949 *
4950 * This function reinitializes some control blocks and
4951 * enables destructive diagnostics on the adapter.
4952 *
4953 * Return value:
4954 * IPR_RC_JOB_RETURN
4955 **/
4956static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
4957{
4958 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4959 volatile u32 int_reg;
4960
4961 ENTER;
4962 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
4963 ipr_init_ioa_mem(ioa_cfg);
4964
4965 ioa_cfg->allow_interrupts = 1;
4966 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4967
4968 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4969 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
4970 ioa_cfg->regs.clr_interrupt_mask_reg);
4971 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4972 return IPR_RC_JOB_CONTINUE;
4973 }
4974
4975 /* Enable destructive diagnostics on IOA */
4976 writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
4977
4978 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
4979 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4980
4981 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
4982
4983 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4984 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
4985 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
4986 ipr_cmd->done = ipr_reset_ioa_job;
4987 add_timer(&ipr_cmd->timer);
4988 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4989
4990 LEAVE;
4991 return IPR_RC_JOB_RETURN;
4992}
4993
4994/**
4995 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
4996 * @ipr_cmd: ipr command struct
4997 *
4998 * This function is invoked when an adapter dump has run out
4999 * of processing time.
5000 *
5001 * Return value:
5002 * IPR_RC_JOB_CONTINUE
5003 **/
5004static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5005{
5006 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5007
5008 if (ioa_cfg->sdt_state == GET_DUMP)
5009 ioa_cfg->sdt_state = ABORT_DUMP;
5010
5011 ipr_cmd->job_step = ipr_reset_alert;
5012
5013 return IPR_RC_JOB_CONTINUE;
5014}
5015
5016/**
5017 * ipr_unit_check_no_data - Log a unit check/no data error log
5018 * @ioa_cfg: ioa config struct
5019 *
5020 * Logs an error indicating the adapter unit checked, but for some
5021 * reason, we were unable to fetch the unit check buffer.
5022 *
5023 * Return value:
5024 * nothing
5025 **/
5026static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5027{
5028 ioa_cfg->errors_logged++;
5029 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5030}
5031
5032/**
5033 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5034 * @ioa_cfg: ioa config struct
5035 *
5036 * Fetches the unit check buffer from the adapter by clocking the data
5037 * through the mailbox register.
5038 *
5039 * Return value:
5040 * nothing
5041 **/
5042static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5043{
5044 unsigned long mailbox;
5045 struct ipr_hostrcb *hostrcb;
5046 struct ipr_uc_sdt sdt;
5047 int rc, length;
5048
5049 mailbox = readl(ioa_cfg->ioa_mailbox);
5050
5051 if (!ipr_sdt_is_fmt2(mailbox)) {
5052 ipr_unit_check_no_data(ioa_cfg);
5053 return;
5054 }
5055
5056 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5057 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5058 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5059
5060 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5061 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5062 ipr_unit_check_no_data(ioa_cfg);
5063 return;
5064 }
5065
5066 /* Find length of the first sdt entry (UC buffer) */
5067 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5068 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5069
5070 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5071 struct ipr_hostrcb, queue);
5072 list_del(&hostrcb->queue);
5073 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5074
5075 rc = ipr_get_ldump_data_section(ioa_cfg,
5076 be32_to_cpu(sdt.entry[0].bar_str_offset),
5077 (__be32 *)&hostrcb->hcam,
5078 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5079
5080 if (!rc)
5081 ipr_handle_log_data(ioa_cfg, hostrcb);
5082 else
5083 ipr_unit_check_no_data(ioa_cfg);
5084
5085 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5086}
5087
5088/**
5089 * ipr_reset_restore_cfg_space - Restore PCI config space.
5090 * @ipr_cmd: ipr command struct
5091 *
5092 * Description: This function restores the saved PCI config space of
5093 * the adapter, fails all outstanding ops back to the callers, and
5094 * fetches the dump/unit check if applicable to this reset.
5095 *
5096 * Return value:
5097 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5098 **/
5099static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5100{
5101 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5102 int rc;
5103
5104 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07005105 pci_unblock_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005106 rc = pci_restore_state(ioa_cfg->pdev);
5107
5108 if (rc != PCIBIOS_SUCCESSFUL) {
5109 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5110 return IPR_RC_JOB_CONTINUE;
5111 }
5112
5113 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5114 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5115 return IPR_RC_JOB_CONTINUE;
5116 }
5117
5118 ipr_fail_all_ops(ioa_cfg);
5119
5120 if (ioa_cfg->ioa_unit_checked) {
5121 ioa_cfg->ioa_unit_checked = 0;
5122 ipr_get_unit_check_buffer(ioa_cfg);
5123 ipr_cmd->job_step = ipr_reset_alert;
5124 ipr_reset_start_timer(ipr_cmd, 0);
5125 return IPR_RC_JOB_RETURN;
5126 }
5127
5128 if (ioa_cfg->in_ioa_bringdown) {
5129 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5130 } else {
5131 ipr_cmd->job_step = ipr_reset_enable_ioa;
5132
5133 if (GET_DUMP == ioa_cfg->sdt_state) {
5134 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5135 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5136 schedule_work(&ioa_cfg->work_q);
5137 return IPR_RC_JOB_RETURN;
5138 }
5139 }
5140
5141 ENTER;
5142 return IPR_RC_JOB_CONTINUE;
5143}
5144
5145/**
5146 * ipr_reset_start_bist - Run BIST on the adapter.
5147 * @ipr_cmd: ipr command struct
5148 *
5149 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5150 *
5151 * Return value:
5152 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5153 **/
5154static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5155{
5156 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5157 int rc;
5158
5159 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07005160 pci_block_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5162
5163 if (rc != PCIBIOS_SUCCESSFUL) {
5164 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5165 rc = IPR_RC_JOB_CONTINUE;
5166 } else {
5167 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5168 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5169 rc = IPR_RC_JOB_RETURN;
5170 }
5171
5172 LEAVE;
5173 return rc;
5174}
5175
5176/**
5177 * ipr_reset_allowed - Query whether or not IOA can be reset
5178 * @ioa_cfg: ioa config struct
5179 *
5180 * Return value:
5181 * 0 if reset not allowed / non-zero if reset is allowed
5182 **/
5183static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5184{
5185 volatile u32 temp_reg;
5186
5187 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5188 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5189}
5190
5191/**
5192 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5193 * @ipr_cmd: ipr command struct
5194 *
5195 * Description: This function waits for adapter permission to run BIST,
5196 * then runs BIST. If the adapter does not give permission after a
5197 * reasonable time, we will reset the adapter anyway. The impact of
5198 * resetting the adapter without warning the adapter is the risk of
5199 * losing the persistent error log on the adapter. If the adapter is
5200 * reset while it is writing to the flash on the adapter, the flash
5201 * segment will have bad ECC and be zeroed.
5202 *
5203 * Return value:
5204 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5205 **/
5206static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5207{
5208 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5209 int rc = IPR_RC_JOB_RETURN;
5210
5211 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5212 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5213 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5214 } else {
5215 ipr_cmd->job_step = ipr_reset_start_bist;
5216 rc = IPR_RC_JOB_CONTINUE;
5217 }
5218
5219 return rc;
5220}
5221
5222/**
5223 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5224 * @ipr_cmd: ipr command struct
5225 *
5226 * Description: This function alerts the adapter that it will be reset.
5227 * If memory space is not currently enabled, proceed directly
5228 * to running BIST on the adapter. The timer must always be started
5229 * so we guarantee we do not run BIST from ipr_isr.
5230 *
5231 * Return value:
5232 * IPR_RC_JOB_RETURN
5233 **/
5234static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5235{
5236 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5237 u16 cmd_reg;
5238 int rc;
5239
5240 ENTER;
5241 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5242
5243 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5244 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5245 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5246 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5247 } else {
5248 ipr_cmd->job_step = ipr_reset_start_bist;
5249 }
5250
5251 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5252 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5253
5254 LEAVE;
5255 return IPR_RC_JOB_RETURN;
5256}
5257
5258/**
5259 * ipr_reset_ucode_download_done - Microcode download completion
5260 * @ipr_cmd: ipr command struct
5261 *
5262 * Description: This function unmaps the microcode download buffer.
5263 *
5264 * Return value:
5265 * IPR_RC_JOB_CONTINUE
5266 **/
5267static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5268{
5269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5270 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5271
5272 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5273 sglist->num_sg, DMA_TO_DEVICE);
5274
5275 ipr_cmd->job_step = ipr_reset_alert;
5276 return IPR_RC_JOB_CONTINUE;
5277}
5278
5279/**
5280 * ipr_reset_ucode_download - Download microcode to the adapter
5281 * @ipr_cmd: ipr command struct
5282 *
5283 * Description: This function checks to see if it there is microcode
5284 * to download to the adapter. If there is, a download is performed.
5285 *
5286 * Return value:
5287 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5288 **/
5289static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5290{
5291 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5292 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5293
5294 ENTER;
5295 ipr_cmd->job_step = ipr_reset_alert;
5296
5297 if (!sglist)
5298 return IPR_RC_JOB_CONTINUE;
5299
5300 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5301 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5302 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5303 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5304 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5305 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5306 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5307
brking@us.ibm.com12baa422005-11-01 17:01:27 -06005308 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005309 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5310
5311 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5312 IPR_WRITE_BUFFER_TIMEOUT);
5313
5314 LEAVE;
5315 return IPR_RC_JOB_RETURN;
5316}
5317
5318/**
5319 * ipr_reset_shutdown_ioa - Shutdown the adapter
5320 * @ipr_cmd: ipr command struct
5321 *
5322 * Description: This function issues an adapter shutdown of the
5323 * specified type to the specified adapter as part of the
5324 * adapter reset job.
5325 *
5326 * Return value:
5327 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5328 **/
5329static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5330{
5331 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5332 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5333 unsigned long timeout;
5334 int rc = IPR_RC_JOB_CONTINUE;
5335
5336 ENTER;
5337 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5338 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5339 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5340 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5341 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5342
5343 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5344 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5345 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5346 timeout = IPR_INTERNAL_TIMEOUT;
5347 else
5348 timeout = IPR_SHUTDOWN_TIMEOUT;
5349
5350 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5351
5352 rc = IPR_RC_JOB_RETURN;
5353 ipr_cmd->job_step = ipr_reset_ucode_download;
5354 } else
5355 ipr_cmd->job_step = ipr_reset_alert;
5356
5357 LEAVE;
5358 return rc;
5359}
5360
5361/**
5362 * ipr_reset_ioa_job - Adapter reset job
5363 * @ipr_cmd: ipr command struct
5364 *
5365 * Description: This function is the job router for the adapter reset job.
5366 *
5367 * Return value:
5368 * none
5369 **/
5370static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5371{
5372 u32 rc, ioasc;
5373 unsigned long scratch = ipr_cmd->u.scratch;
5374 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5375
5376 do {
5377 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5378
5379 if (ioa_cfg->reset_cmd != ipr_cmd) {
5380 /*
5381 * We are doing nested adapter resets and this is
5382 * not the current reset job.
5383 */
5384 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5385 return;
5386 }
5387
5388 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5389 dev_err(&ioa_cfg->pdev->dev,
5390 "0x%02X failed with IOASC: 0x%08X\n",
5391 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5392
5393 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5394 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5395 return;
5396 }
5397
5398 ipr_reinit_ipr_cmnd(ipr_cmd);
5399 ipr_cmd->u.scratch = scratch;
5400 rc = ipr_cmd->job_step(ipr_cmd);
5401 } while(rc == IPR_RC_JOB_CONTINUE);
5402}
5403
5404/**
5405 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5406 * @ioa_cfg: ioa config struct
5407 * @job_step: first job step of reset job
5408 * @shutdown_type: shutdown type
5409 *
5410 * Description: This function will initiate the reset of the given adapter
5411 * starting at the selected job step.
5412 * If the caller needs to wait on the completion of the reset,
5413 * the caller must sleep on the reset_wait_q.
5414 *
5415 * Return value:
5416 * none
5417 **/
5418static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5419 int (*job_step) (struct ipr_cmnd *),
5420 enum ipr_shutdown_type shutdown_type)
5421{
5422 struct ipr_cmnd *ipr_cmd;
5423
5424 ioa_cfg->in_reset_reload = 1;
5425 ioa_cfg->allow_cmds = 0;
5426 scsi_block_requests(ioa_cfg->host);
5427
5428 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5429 ioa_cfg->reset_cmd = ipr_cmd;
5430 ipr_cmd->job_step = job_step;
5431 ipr_cmd->u.shutdown_type = shutdown_type;
5432
5433 ipr_reset_ioa_job(ipr_cmd);
5434}
5435
5436/**
5437 * ipr_initiate_ioa_reset - Initiate an adapter reset
5438 * @ioa_cfg: ioa config struct
5439 * @shutdown_type: shutdown type
5440 *
5441 * Description: This function will initiate the reset of the given adapter.
5442 * If the caller needs to wait on the completion of the reset,
5443 * the caller must sleep on the reset_wait_q.
5444 *
5445 * Return value:
5446 * none
5447 **/
5448static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5449 enum ipr_shutdown_type shutdown_type)
5450{
5451 if (ioa_cfg->ioa_is_dead)
5452 return;
5453
5454 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5455 ioa_cfg->sdt_state = ABORT_DUMP;
5456
5457 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5458 dev_err(&ioa_cfg->pdev->dev,
5459 "IOA taken offline - error recovery failed\n");
5460
5461 ioa_cfg->reset_retries = 0;
5462 ioa_cfg->ioa_is_dead = 1;
5463
5464 if (ioa_cfg->in_ioa_bringdown) {
5465 ioa_cfg->reset_cmd = NULL;
5466 ioa_cfg->in_reset_reload = 0;
5467 ipr_fail_all_ops(ioa_cfg);
5468 wake_up_all(&ioa_cfg->reset_wait_q);
5469
5470 spin_unlock_irq(ioa_cfg->host->host_lock);
5471 scsi_unblock_requests(ioa_cfg->host);
5472 spin_lock_irq(ioa_cfg->host->host_lock);
5473 return;
5474 } else {
5475 ioa_cfg->in_ioa_bringdown = 1;
5476 shutdown_type = IPR_SHUTDOWN_NONE;
5477 }
5478 }
5479
5480 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5481 shutdown_type);
5482}
5483
5484/**
5485 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5486 * @ioa_cfg: ioa cfg struct
5487 *
5488 * Description: This is the second phase of adapter intialization
5489 * This function takes care of initilizing the adapter to the point
5490 * where it can accept new commands.
5491
5492 * Return value:
5493 * 0 on sucess / -EIO on failure
5494 **/
5495static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5496{
5497 int rc = 0;
5498 unsigned long host_lock_flags = 0;
5499
5500 ENTER;
5501 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5502 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5503 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5504
5505 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5506 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5507 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5508
5509 if (ioa_cfg->ioa_is_dead) {
5510 rc = -EIO;
5511 } else if (ipr_invalid_adapter(ioa_cfg)) {
5512 if (!ipr_testmode)
5513 rc = -EIO;
5514
5515 dev_err(&ioa_cfg->pdev->dev,
5516 "Adapter not supported in this hardware configuration.\n");
5517 }
5518
5519 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5520
5521 LEAVE;
5522 return rc;
5523}
5524
5525/**
5526 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5527 * @ioa_cfg: ioa config struct
5528 *
5529 * Return value:
5530 * none
5531 **/
5532static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5533{
5534 int i;
5535
5536 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5537 if (ioa_cfg->ipr_cmnd_list[i])
5538 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5539 ioa_cfg->ipr_cmnd_list[i],
5540 ioa_cfg->ipr_cmnd_list_dma[i]);
5541
5542 ioa_cfg->ipr_cmnd_list[i] = NULL;
5543 }
5544
5545 if (ioa_cfg->ipr_cmd_pool)
5546 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5547
5548 ioa_cfg->ipr_cmd_pool = NULL;
5549}
5550
5551/**
5552 * ipr_free_mem - Frees memory allocated for an adapter
5553 * @ioa_cfg: ioa cfg struct
5554 *
5555 * Return value:
5556 * nothing
5557 **/
5558static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5559{
5560 int i;
5561
5562 kfree(ioa_cfg->res_entries);
5563 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5564 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5565 ipr_free_cmd_blks(ioa_cfg);
5566 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5567 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5568 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5569 ioa_cfg->cfg_table,
5570 ioa_cfg->cfg_table_dma);
5571
5572 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5573 pci_free_consistent(ioa_cfg->pdev,
5574 sizeof(struct ipr_hostrcb),
5575 ioa_cfg->hostrcb[i],
5576 ioa_cfg->hostrcb_dma[i]);
5577 }
5578
5579 ipr_free_dump(ioa_cfg);
5580 kfree(ioa_cfg->saved_mode_pages);
5581 kfree(ioa_cfg->trace);
5582}
5583
5584/**
5585 * ipr_free_all_resources - Free all allocated resources for an adapter.
5586 * @ipr_cmd: ipr command struct
5587 *
5588 * This function frees all allocated resources for the
5589 * specified adapter.
5590 *
5591 * Return value:
5592 * none
5593 **/
5594static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5595{
5596 struct pci_dev *pdev = ioa_cfg->pdev;
5597
5598 ENTER;
5599 free_irq(pdev->irq, ioa_cfg);
5600 iounmap(ioa_cfg->hdw_dma_regs);
5601 pci_release_regions(pdev);
5602 ipr_free_mem(ioa_cfg);
5603 scsi_host_put(ioa_cfg->host);
5604 pci_disable_device(pdev);
5605 LEAVE;
5606}
5607
5608/**
5609 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5610 * @ioa_cfg: ioa config struct
5611 *
5612 * Return value:
5613 * 0 on success / -ENOMEM on allocation failure
5614 **/
5615static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5616{
5617 struct ipr_cmnd *ipr_cmd;
5618 struct ipr_ioarcb *ioarcb;
5619 dma_addr_t dma_addr;
5620 int i;
5621
5622 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5623 sizeof(struct ipr_cmnd), 8, 0);
5624
5625 if (!ioa_cfg->ipr_cmd_pool)
5626 return -ENOMEM;
5627
5628 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5629 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5630
5631 if (!ipr_cmd) {
5632 ipr_free_cmd_blks(ioa_cfg);
5633 return -ENOMEM;
5634 }
5635
5636 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5637 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5638 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5639
5640 ioarcb = &ipr_cmd->ioarcb;
5641 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5642 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5643 ioarcb->write_ioadl_addr =
5644 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5645 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5646 ioarcb->ioasa_host_pci_addr =
5647 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5648 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5649 ipr_cmd->cmd_index = i;
5650 ipr_cmd->ioa_cfg = ioa_cfg;
5651 ipr_cmd->sense_buffer_dma = dma_addr +
5652 offsetof(struct ipr_cmnd, sense_buffer);
5653
5654 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5655 }
5656
5657 return 0;
5658}
5659
5660/**
5661 * ipr_alloc_mem - Allocate memory for an adapter
5662 * @ioa_cfg: ioa config struct
5663 *
5664 * Return value:
5665 * 0 on success / non-zero for error
5666 **/
5667static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5668{
5669 struct pci_dev *pdev = ioa_cfg->pdev;
5670 int i, rc = -ENOMEM;
5671
5672 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06005673 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005674 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5675
5676 if (!ioa_cfg->res_entries)
5677 goto out;
5678
Linus Torvalds1da177e2005-04-16 15:20:36 -07005679 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5680 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5681
5682 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5683 sizeof(struct ipr_misc_cbs),
5684 &ioa_cfg->vpd_cbs_dma);
5685
5686 if (!ioa_cfg->vpd_cbs)
5687 goto out_free_res_entries;
5688
5689 if (ipr_alloc_cmd_blks(ioa_cfg))
5690 goto out_free_vpd_cbs;
5691
5692 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5693 sizeof(u32) * IPR_NUM_CMD_BLKS,
5694 &ioa_cfg->host_rrq_dma);
5695
5696 if (!ioa_cfg->host_rrq)
5697 goto out_ipr_free_cmd_blocks;
5698
5699 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5700 sizeof(struct ipr_config_table),
5701 &ioa_cfg->cfg_table_dma);
5702
5703 if (!ioa_cfg->cfg_table)
5704 goto out_free_host_rrq;
5705
5706 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5707 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5708 sizeof(struct ipr_hostrcb),
5709 &ioa_cfg->hostrcb_dma[i]);
5710
5711 if (!ioa_cfg->hostrcb[i])
5712 goto out_free_hostrcb_dma;
5713
5714 ioa_cfg->hostrcb[i]->hostrcb_dma =
5715 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5716 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5717 }
5718
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06005719 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005720 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5721
5722 if (!ioa_cfg->trace)
5723 goto out_free_hostrcb_dma;
5724
Linus Torvalds1da177e2005-04-16 15:20:36 -07005725 rc = 0;
5726out:
5727 LEAVE;
5728 return rc;
5729
5730out_free_hostrcb_dma:
5731 while (i-- > 0) {
5732 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
5733 ioa_cfg->hostrcb[i],
5734 ioa_cfg->hostrcb_dma[i]);
5735 }
5736 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
5737 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
5738out_free_host_rrq:
5739 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5740 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5741out_ipr_free_cmd_blocks:
5742 ipr_free_cmd_blks(ioa_cfg);
5743out_free_vpd_cbs:
5744 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
5745 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5746out_free_res_entries:
5747 kfree(ioa_cfg->res_entries);
5748 goto out;
5749}
5750
5751/**
5752 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5753 * @ioa_cfg: ioa config struct
5754 *
5755 * Return value:
5756 * none
5757 **/
5758static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5759{
5760 int i;
5761
5762 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5763 ioa_cfg->bus_attr[i].bus = i;
5764 ioa_cfg->bus_attr[i].qas_enabled = 0;
5765 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5766 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5767 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5768 else
5769 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5770 }
5771}
5772
5773/**
5774 * ipr_init_ioa_cfg - Initialize IOA config struct
5775 * @ioa_cfg: ioa config struct
5776 * @host: scsi host struct
5777 * @pdev: PCI dev struct
5778 *
5779 * Return value:
5780 * none
5781 **/
5782static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5783 struct Scsi_Host *host, struct pci_dev *pdev)
5784{
5785 const struct ipr_interrupt_offsets *p;
5786 struct ipr_interrupts *t;
5787 void __iomem *base;
5788
5789 ioa_cfg->host = host;
5790 ioa_cfg->pdev = pdev;
5791 ioa_cfg->log_level = ipr_log_level;
5792 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5793 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5794 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5795 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5796 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5797 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5798 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5799 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5800
5801 INIT_LIST_HEAD(&ioa_cfg->free_q);
5802 INIT_LIST_HEAD(&ioa_cfg->pending_q);
5803 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5804 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5805 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5806 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5807 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5808 init_waitqueue_head(&ioa_cfg->reset_wait_q);
5809 ioa_cfg->sdt_state = INACTIVE;
brking@us.ibm.com62275042005-11-01 17:01:14 -06005810 if (ipr_enable_cache)
5811 ioa_cfg->cache_state = CACHE_ENABLED;
5812 else
5813 ioa_cfg->cache_state = CACHE_DISABLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005814
5815 ipr_initialize_bus_attr(ioa_cfg);
5816
5817 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5818 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5819 host->max_channel = IPR_MAX_BUS_TO_SCAN;
5820 host->unique_id = host->host_no;
5821 host->max_cmd_len = IPR_MAX_CDB_LEN;
5822 pci_set_drvdata(pdev, ioa_cfg);
5823
5824 p = &ioa_cfg->chip_cfg->regs;
5825 t = &ioa_cfg->regs;
5826 base = ioa_cfg->hdw_dma_regs;
5827
5828 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
5829 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
5830 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
5831 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
5832 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
5833 t->ioarrin_reg = base + p->ioarrin_reg;
5834 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
5835 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
5836 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
5837}
5838
5839/**
5840 * ipr_get_chip_cfg - Find adapter chip configuration
5841 * @dev_id: PCI device id struct
5842 *
5843 * Return value:
5844 * ptr to chip config on success / NULL on failure
5845 **/
5846static const struct ipr_chip_cfg_t * __devinit
5847ipr_get_chip_cfg(const struct pci_device_id *dev_id)
5848{
5849 int i;
5850
5851 if (dev_id->driver_data)
5852 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5853
5854 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
5855 if (ipr_chip[i].vendor == dev_id->vendor &&
5856 ipr_chip[i].device == dev_id->device)
5857 return ipr_chip[i].cfg;
5858 return NULL;
5859}
5860
5861/**
5862 * ipr_probe_ioa - Allocates memory and does first stage of initialization
5863 * @pdev: PCI device struct
5864 * @dev_id: PCI device id struct
5865 *
5866 * Return value:
5867 * 0 on success / non-zero on failure
5868 **/
5869static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
5870 const struct pci_device_id *dev_id)
5871{
5872 struct ipr_ioa_cfg *ioa_cfg;
5873 struct Scsi_Host *host;
5874 unsigned long ipr_regs_pci;
5875 void __iomem *ipr_regs;
5876 u32 rc = PCIBIOS_SUCCESSFUL;
5877
5878 ENTER;
5879
5880 if ((rc = pci_enable_device(pdev))) {
5881 dev_err(&pdev->dev, "Cannot enable adapter\n");
5882 goto out;
5883 }
5884
5885 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
5886
5887 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
5888
5889 if (!host) {
5890 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
5891 rc = -ENOMEM;
5892 goto out_disable;
5893 }
5894
5895 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
5896 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
5897
5898 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
5899
5900 if (!ioa_cfg->chip_cfg) {
5901 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
5902 dev_id->vendor, dev_id->device);
5903 goto out_scsi_host_put;
5904 }
5905
5906 ipr_regs_pci = pci_resource_start(pdev, 0);
5907
5908 rc = pci_request_regions(pdev, IPR_NAME);
5909 if (rc < 0) {
5910 dev_err(&pdev->dev,
5911 "Couldn't register memory range of registers\n");
5912 goto out_scsi_host_put;
5913 }
5914
5915 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
5916
5917 if (!ipr_regs) {
5918 dev_err(&pdev->dev,
5919 "Couldn't map memory range of registers\n");
5920 rc = -ENOMEM;
5921 goto out_release_regions;
5922 }
5923
5924 ioa_cfg->hdw_dma_regs = ipr_regs;
5925 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
5926 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
5927
5928 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
5929
5930 pci_set_master(pdev);
5931
5932 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
5933 if (rc < 0) {
5934 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5935 goto cleanup_nomem;
5936 }
5937
5938 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5939 ioa_cfg->chip_cfg->cache_line_size);
5940
5941 if (rc != PCIBIOS_SUCCESSFUL) {
5942 dev_err(&pdev->dev, "Write of cache line size failed\n");
5943 rc = -EIO;
5944 goto cleanup_nomem;
5945 }
5946
5947 /* Save away PCI config space for use following IOA reset */
5948 rc = pci_save_state(pdev);
5949
5950 if (rc != PCIBIOS_SUCCESSFUL) {
5951 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5952 rc = -EIO;
5953 goto cleanup_nomem;
5954 }
5955
5956 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
5957 goto cleanup_nomem;
5958
5959 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
5960 goto cleanup_nomem;
5961
5962 rc = ipr_alloc_mem(ioa_cfg);
5963 if (rc < 0) {
5964 dev_err(&pdev->dev,
5965 "Couldn't allocate enough memory for device driver!\n");
5966 goto cleanup_nomem;
5967 }
5968
5969 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
5970 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
5971
5972 if (rc) {
5973 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
5974 pdev->irq, rc);
5975 goto cleanup_nolog;
5976 }
5977
5978 spin_lock(&ipr_driver_lock);
5979 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
5980 spin_unlock(&ipr_driver_lock);
5981
5982 LEAVE;
5983out:
5984 return rc;
5985
5986cleanup_nolog:
5987 ipr_free_mem(ioa_cfg);
5988cleanup_nomem:
5989 iounmap(ipr_regs);
5990out_release_regions:
5991 pci_release_regions(pdev);
5992out_scsi_host_put:
5993 scsi_host_put(host);
5994out_disable:
5995 pci_disable_device(pdev);
5996 goto out;
5997}
5998
5999/**
6000 * ipr_scan_vsets - Scans for VSET devices
6001 * @ioa_cfg: ioa config struct
6002 *
6003 * Description: Since the VSET resources do not follow SAM in that we can have
6004 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6005 *
6006 * Return value:
6007 * none
6008 **/
6009static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6010{
6011 int target, lun;
6012
6013 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6014 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6015 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6016}
6017
6018/**
6019 * ipr_initiate_ioa_bringdown - Bring down an adapter
6020 * @ioa_cfg: ioa config struct
6021 * @shutdown_type: shutdown type
6022 *
6023 * Description: This function will initiate bringing down the adapter.
6024 * This consists of issuing an IOA shutdown to the adapter
6025 * to flush the cache, and running BIST.
6026 * If the caller needs to wait on the completion of the reset,
6027 * the caller must sleep on the reset_wait_q.
6028 *
6029 * Return value:
6030 * none
6031 **/
6032static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6033 enum ipr_shutdown_type shutdown_type)
6034{
6035 ENTER;
6036 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6037 ioa_cfg->sdt_state = ABORT_DUMP;
6038 ioa_cfg->reset_retries = 0;
6039 ioa_cfg->in_ioa_bringdown = 1;
6040 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6041 LEAVE;
6042}
6043
6044/**
6045 * __ipr_remove - Remove a single adapter
6046 * @pdev: pci device struct
6047 *
6048 * Adapter hot plug remove entry point.
6049 *
6050 * Return value:
6051 * none
6052 **/
6053static void __ipr_remove(struct pci_dev *pdev)
6054{
6055 unsigned long host_lock_flags = 0;
6056 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6057 ENTER;
6058
6059 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6060 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6061
6062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6063 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
brking@us.ibm.com 5cbf5ea2005-05-02 19:50:47 -05006064 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006065 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6066
6067 spin_lock(&ipr_driver_lock);
6068 list_del(&ioa_cfg->queue);
6069 spin_unlock(&ipr_driver_lock);
6070
6071 if (ioa_cfg->sdt_state == ABORT_DUMP)
6072 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6073 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6074
6075 ipr_free_all_resources(ioa_cfg);
6076
6077 LEAVE;
6078}
6079
6080/**
6081 * ipr_remove - IOA hot plug remove entry point
6082 * @pdev: pci device struct
6083 *
6084 * Adapter hot plug remove entry point.
6085 *
6086 * Return value:
6087 * none
6088 **/
6089static void ipr_remove(struct pci_dev *pdev)
6090{
6091 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6092
6093 ENTER;
6094
Linus Torvalds1da177e2005-04-16 15:20:36 -07006095 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6096 &ipr_trace_attr);
6097 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6098 &ipr_dump_attr);
6099 scsi_remove_host(ioa_cfg->host);
6100
6101 __ipr_remove(pdev);
6102
6103 LEAVE;
6104}
6105
6106/**
6107 * ipr_probe - Adapter hot plug add entry point
6108 *
6109 * Return value:
6110 * 0 on success / non-zero on failure
6111 **/
6112static int __devinit ipr_probe(struct pci_dev *pdev,
6113 const struct pci_device_id *dev_id)
6114{
6115 struct ipr_ioa_cfg *ioa_cfg;
6116 int rc;
6117
6118 rc = ipr_probe_ioa(pdev, dev_id);
6119
6120 if (rc)
6121 return rc;
6122
6123 ioa_cfg = pci_get_drvdata(pdev);
6124 rc = ipr_probe_ioa_part2(ioa_cfg);
6125
6126 if (rc) {
6127 __ipr_remove(pdev);
6128 return rc;
6129 }
6130
6131 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6132
6133 if (rc) {
6134 __ipr_remove(pdev);
6135 return rc;
6136 }
6137
6138 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6139 &ipr_trace_attr);
6140
6141 if (rc) {
6142 scsi_remove_host(ioa_cfg->host);
6143 __ipr_remove(pdev);
6144 return rc;
6145 }
6146
6147 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6148 &ipr_dump_attr);
6149
6150 if (rc) {
6151 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6152 &ipr_trace_attr);
6153 scsi_remove_host(ioa_cfg->host);
6154 __ipr_remove(pdev);
6155 return rc;
6156 }
6157
6158 scsi_scan_host(ioa_cfg->host);
6159 ipr_scan_vsets(ioa_cfg);
6160 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6161 ioa_cfg->allow_ml_add_del = 1;
brking@us.ibm.com11cd8f12005-11-01 17:00:11 -06006162 ioa_cfg->host->max_channel = IPR_VSET_BUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006163 schedule_work(&ioa_cfg->work_q);
6164 return 0;
6165}
6166
6167/**
6168 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006169 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006170 *
6171 * This function is invoked upon system shutdown/reboot. It will issue
6172 * an adapter shutdown to the adapter to flush the write cache.
6173 *
6174 * Return value:
6175 * none
6176 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006177static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006178{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006179 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006180 unsigned long lock_flags = 0;
6181
6182 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6183 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6184 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6185 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6186}
6187
6188static struct pci_device_id ipr_pci_table[] __devinitdata = {
6189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6190 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6191 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6192 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6193 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6194 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6195 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6196 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6197 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6198 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6199 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6200 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6201 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6202 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6203 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6204 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6205 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6206 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6207 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6208 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6209 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6210 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6211 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6212 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6213 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6214 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6215 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6216 { }
6217};
6218MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6219
6220static struct pci_driver ipr_driver = {
6221 .name = IPR_NAME,
6222 .id_table = ipr_pci_table,
6223 .probe = ipr_probe,
6224 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006225 .shutdown = ipr_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006226};
6227
6228/**
6229 * ipr_init - Module entry point
6230 *
6231 * Return value:
6232 * 0 on success / negative value on failure
6233 **/
6234static int __init ipr_init(void)
6235{
6236 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6237 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6238
6239 return pci_module_init(&ipr_driver);
6240}
6241
6242/**
6243 * ipr_exit - Module unload
6244 *
6245 * Module unload entry point.
6246 *
6247 * Return value:
6248 * none
6249 **/
6250static void __exit ipr_exit(void)
6251{
6252 pci_unregister_driver(&ipr_driver);
6253}
6254
6255module_init(ipr_init);
6256module_exit(ipr_exit);