blob: 7149aada3f3ce92e466012cb1bd09fc0336d4474 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57#include <linux/config.h>
58#include <linux/fs.h>
59#include <linux/init.h>
60#include <linux/types.h>
61#include <linux/errno.h>
62#include <linux/kernel.h>
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_request.h>
83#include "ipr.h"
84
85/*
86 * Global Data
87 */
88static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
brking@us.ibm.com62275042005-11-01 17:01:14 -060094static unsigned int ipr_enable_cache = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095static DEFINE_SPINLOCK(ipr_driver_lock);
96
97/* This table describes the differences between DMA controller chips */
98static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
99 { /* Gemstone and Citrine */
100 .mailbox = 0x0042C,
101 .cache_line_size = 0x20,
102 {
103 .set_interrupt_mask_reg = 0x0022C,
104 .clr_interrupt_mask_reg = 0x00230,
105 .sense_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_reg = 0x00228,
107 .sense_interrupt_reg = 0x00224,
108 .ioarrin_reg = 0x00404,
109 .sense_uproc_interrupt_reg = 0x00214,
110 .set_uproc_interrupt_reg = 0x00214,
111 .clr_uproc_interrupt_reg = 0x00218
112 }
113 },
114 { /* Snipe and Scamp */
115 .mailbox = 0x0052C,
116 .cache_line_size = 0x20,
117 {
118 .set_interrupt_mask_reg = 0x00288,
119 .clr_interrupt_mask_reg = 0x0028C,
120 .sense_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_reg = 0x00284,
122 .sense_interrupt_reg = 0x00280,
123 .ioarrin_reg = 0x00504,
124 .sense_uproc_interrupt_reg = 0x00290,
125 .set_uproc_interrupt_reg = 0x00290,
126 .clr_uproc_interrupt_reg = 0x00294
127 }
128 },
129};
130
131static const struct ipr_chip_t ipr_chip[] = {
132 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
135 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
136};
137
138static int ipr_max_bus_speeds [] = {
139 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
140};
141
142MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
143MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
144module_param_named(max_speed, ipr_max_speed, uint, 0);
145MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
146module_param_named(log_level, ipr_log_level, uint, 0);
147MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
148module_param_named(testmode, ipr_testmode, int, 0);
149MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
150module_param_named(fastfail, ipr_fastfail, int, 0);
151MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
152module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
153MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
brking@us.ibm.com62275042005-11-01 17:01:14 -0600154module_param_named(enable_cache, ipr_enable_cache, int, 0);
155MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156MODULE_LICENSE("GPL");
157MODULE_VERSION(IPR_DRIVER_VERSION);
158
159static const char *ipr_gpdd_dev_end_states[] = {
160 "Command complete",
161 "Terminated by host",
162 "Terminated by device reset",
163 "Terminated by bus reset",
164 "Unknown",
165 "Command not started"
166};
167
168static const char *ipr_gpdd_dev_bus_phases[] = {
169 "Bus free",
170 "Arbitration",
171 "Selection",
172 "Message out",
173 "Command",
174 "Message in",
175 "Data out",
176 "Data in",
177 "Status",
178 "Reselection",
179 "Unknown"
180};
181
182/* A constant array of IOASCs/URCs/Error Messages */
183static const
184struct ipr_error_table_t ipr_error_table[] = {
185 {0x00000000, 1, 1,
186 "8155: An unknown error was received"},
187 {0x00330000, 0, 0,
188 "Soft underlength error"},
189 {0x005A0000, 0, 0,
190 "Command to be cancelled not found"},
191 {0x00808000, 0, 0,
192 "Qualified success"},
193 {0x01080000, 1, 1,
194 "FFFE: Soft device bus error recovered by the IOA"},
195 {0x01170600, 0, 1,
196 "FFF9: Device sector reassign successful"},
197 {0x01170900, 0, 1,
198 "FFF7: Media error recovered by device rewrite procedures"},
199 {0x01180200, 0, 1,
200 "7001: IOA sector reassignment successful"},
201 {0x01180500, 0, 1,
202 "FFF9: Soft media error. Sector reassignment recommended"},
203 {0x01180600, 0, 1,
204 "FFF7: Media error recovered by IOA rewrite procedures"},
205 {0x01418000, 0, 1,
206 "FF3D: Soft PCI bus error recovered by the IOA"},
207 {0x01440000, 1, 1,
208 "FFF6: Device hardware error recovered by the IOA"},
209 {0x01448100, 0, 1,
210 "FFF6: Device hardware error recovered by the device"},
211 {0x01448200, 1, 1,
212 "FF3D: Soft IOA error recovered by the IOA"},
213 {0x01448300, 0, 1,
214 "FFFA: Undefined device response recovered by the IOA"},
215 {0x014A0000, 1, 1,
216 "FFF6: Device bus error, message or command phase"},
217 {0x015D0000, 0, 1,
218 "FFF6: Failure prediction threshold exceeded"},
219 {0x015D9200, 0, 1,
220 "8009: Impending cache battery pack failure"},
221 {0x02040400, 0, 0,
222 "34FF: Disk device format in progress"},
223 {0x023F0000, 0, 0,
224 "Synchronization required"},
225 {0x024E0000, 0, 0,
226 "No ready, IOA shutdown"},
227 {0x025A0000, 0, 0,
228 "Not ready, IOA has been shutdown"},
229 {0x02670100, 0, 1,
230 "3020: Storage subsystem configuration error"},
231 {0x03110B00, 0, 0,
232 "FFF5: Medium error, data unreadable, recommend reassign"},
233 {0x03110C00, 0, 0,
234 "7000: Medium error, data unreadable, do not reassign"},
235 {0x03310000, 0, 1,
236 "FFF3: Disk media format bad"},
237 {0x04050000, 0, 1,
238 "3002: Addressed device failed to respond to selection"},
239 {0x04080000, 1, 1,
240 "3100: Device bus error"},
241 {0x04080100, 0, 1,
242 "3109: IOA timed out a device command"},
243 {0x04088000, 0, 0,
244 "3120: SCSI bus is not operational"},
245 {0x04118000, 0, 1,
246 "9000: IOA reserved area data check"},
247 {0x04118100, 0, 1,
248 "9001: IOA reserved area invalid data pattern"},
249 {0x04118200, 0, 1,
250 "9002: IOA reserved area LRC error"},
251 {0x04320000, 0, 1,
252 "102E: Out of alternate sectors for disk storage"},
253 {0x04330000, 1, 1,
254 "FFF4: Data transfer underlength error"},
255 {0x04338000, 1, 1,
256 "FFF4: Data transfer overlength error"},
257 {0x043E0100, 0, 1,
258 "3400: Logical unit failure"},
259 {0x04408500, 0, 1,
260 "FFF4: Device microcode is corrupt"},
261 {0x04418000, 1, 1,
262 "8150: PCI bus error"},
263 {0x04430000, 1, 0,
264 "Unsupported device bus message received"},
265 {0x04440000, 1, 1,
266 "FFF4: Disk device problem"},
267 {0x04448200, 1, 1,
268 "8150: Permanent IOA failure"},
269 {0x04448300, 0, 1,
270 "3010: Disk device returned wrong response to IOA"},
271 {0x04448400, 0, 1,
272 "8151: IOA microcode error"},
273 {0x04448500, 0, 0,
274 "Device bus status error"},
275 {0x04448600, 0, 1,
276 "8157: IOA error requiring IOA reset to recover"},
277 {0x04490000, 0, 0,
278 "Message reject received from the device"},
279 {0x04449200, 0, 1,
280 "8008: A permanent cache battery pack failure occurred"},
281 {0x0444A000, 0, 1,
282 "9090: Disk unit has been modified after the last known status"},
283 {0x0444A200, 0, 1,
284 "9081: IOA detected device error"},
285 {0x0444A300, 0, 1,
286 "9082: IOA detected device error"},
287 {0x044A0000, 1, 1,
288 "3110: Device bus error, message or command phase"},
289 {0x04670400, 0, 1,
290 "9091: Incorrect hardware configuration change has been detected"},
291 {0x046E0000, 0, 1,
292 "FFF4: Command to logical unit failed"},
293 {0x05240000, 1, 0,
294 "Illegal request, invalid request type or request packet"},
295 {0x05250000, 0, 0,
296 "Illegal request, invalid resource handle"},
297 {0x05260000, 0, 0,
298 "Illegal request, invalid field in parameter list"},
299 {0x05260100, 0, 0,
300 "Illegal request, parameter not supported"},
301 {0x05260200, 0, 0,
302 "Illegal request, parameter value invalid"},
303 {0x052C0000, 0, 0,
304 "Illegal request, command sequence error"},
305 {0x06040500, 0, 1,
306 "9031: Array protection temporarily suspended, protection resuming"},
307 {0x06040600, 0, 1,
308 "9040: Array protection temporarily suspended, protection resuming"},
309 {0x06290000, 0, 1,
310 "FFFB: SCSI bus was reset"},
311 {0x06290500, 0, 0,
312 "FFFE: SCSI bus transition to single ended"},
313 {0x06290600, 0, 0,
314 "FFFE: SCSI bus transition to LVD"},
315 {0x06298000, 0, 1,
316 "FFFB: SCSI bus was reset by another initiator"},
317 {0x063F0300, 0, 1,
318 "3029: A device replacement has occurred"},
319 {0x064C8000, 0, 1,
320 "9051: IOA cache data exists for a missing or failed device"},
321 {0x06670100, 0, 1,
322 "9025: Disk unit is not supported at its physical location"},
323 {0x06670600, 0, 1,
324 "3020: IOA detected a SCSI bus configuration error"},
325 {0x06678000, 0, 1,
326 "3150: SCSI bus configuration error"},
327 {0x06690200, 0, 1,
328 "9041: Array protection temporarily suspended"},
329 {0x06698200, 0, 1,
330 "9042: Corrupt array parity detected on specified device"},
331 {0x066B0200, 0, 1,
332 "9030: Array no longer protected due to missing or failed disk unit"},
333 {0x066B8200, 0, 1,
334 "9032: Array exposed but still protected"},
335 {0x07270000, 0, 0,
336 "Failure due to other device"},
337 {0x07278000, 0, 1,
338 "9008: IOA does not support functions expected by devices"},
339 {0x07278100, 0, 1,
340 "9010: Cache data associated with attached devices cannot be found"},
341 {0x07278200, 0, 1,
342 "9011: Cache data belongs to devices other than those attached"},
343 {0x07278400, 0, 1,
344 "9020: Array missing 2 or more devices with only 1 device present"},
345 {0x07278500, 0, 1,
346 "9021: Array missing 2 or more devices with 2 or more devices present"},
347 {0x07278600, 0, 1,
348 "9022: Exposed array is missing a required device"},
349 {0x07278700, 0, 1,
350 "9023: Array member(s) not at required physical locations"},
351 {0x07278800, 0, 1,
352 "9024: Array not functional due to present hardware configuration"},
353 {0x07278900, 0, 1,
354 "9026: Array not functional due to present hardware configuration"},
355 {0x07278A00, 0, 1,
356 "9027: Array is missing a device and parity is out of sync"},
357 {0x07278B00, 0, 1,
358 "9028: Maximum number of arrays already exist"},
359 {0x07278C00, 0, 1,
360 "9050: Required cache data cannot be located for a disk unit"},
361 {0x07278D00, 0, 1,
362 "9052: Cache data exists for a device that has been modified"},
363 {0x07278F00, 0, 1,
364 "9054: IOA resources not available due to previous problems"},
365 {0x07279100, 0, 1,
366 "9092: Disk unit requires initialization before use"},
367 {0x07279200, 0, 1,
368 "9029: Incorrect hardware configuration change has been detected"},
369 {0x07279600, 0, 1,
370 "9060: One or more disk pairs are missing from an array"},
371 {0x07279700, 0, 1,
372 "9061: One or more disks are missing from an array"},
373 {0x07279800, 0, 1,
374 "9062: One or more disks are missing from an array"},
375 {0x07279900, 0, 1,
376 "9063: Maximum number of functional arrays has been exceeded"},
377 {0x0B260000, 0, 0,
378 "Aborted command, invalid descriptor"},
379 {0x0B5A0000, 0, 0,
380 "Command terminated by host"}
381};
382
383static const struct ipr_ses_table_entry ipr_ses_table[] = {
384 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
385 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
386 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
387 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
388 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
389 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
390 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
391 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
392 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
393 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
394 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
395 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
396 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
397};
398
399/*
400 * Function Prototypes
401 */
402static int ipr_reset_alert(struct ipr_cmnd *);
403static void ipr_process_ccn(struct ipr_cmnd *);
404static void ipr_process_error(struct ipr_cmnd *);
405static void ipr_reset_ioa_job(struct ipr_cmnd *);
406static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
407 enum ipr_shutdown_type);
408
409#ifdef CONFIG_SCSI_IPR_TRACE
410/**
411 * ipr_trc_hook - Add a trace entry to the driver trace
412 * @ipr_cmd: ipr command struct
413 * @type: trace type
414 * @add_data: additional data
415 *
416 * Return value:
417 * none
418 **/
419static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
420 u8 type, u32 add_data)
421{
422 struct ipr_trace_entry *trace_entry;
423 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
424
425 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
426 trace_entry->time = jiffies;
427 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
428 trace_entry->type = type;
429 trace_entry->cmd_index = ipr_cmd->cmd_index;
430 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
431 trace_entry->u.add_data = add_data;
432}
433#else
434#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
435#endif
436
437/**
438 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
439 * @ipr_cmd: ipr command struct
440 *
441 * Return value:
442 * none
443 **/
444static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
445{
446 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
447 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
448
449 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
450 ioarcb->write_data_transfer_length = 0;
451 ioarcb->read_data_transfer_length = 0;
452 ioarcb->write_ioadl_len = 0;
453 ioarcb->read_ioadl_len = 0;
454 ioasa->ioasc = 0;
455 ioasa->residual_data_len = 0;
456
457 ipr_cmd->scsi_cmd = NULL;
458 ipr_cmd->sense_buffer[0] = 0;
459 ipr_cmd->dma_use_sg = 0;
460}
461
462/**
463 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
464 * @ipr_cmd: ipr command struct
465 *
466 * Return value:
467 * none
468 **/
469static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
470{
471 ipr_reinit_ipr_cmnd(ipr_cmd);
472 ipr_cmd->u.scratch = 0;
473 ipr_cmd->sibling = NULL;
474 init_timer(&ipr_cmd->timer);
475}
476
477/**
478 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
479 * @ioa_cfg: ioa config struct
480 *
481 * Return value:
482 * pointer to ipr command struct
483 **/
484static
485struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
486{
487 struct ipr_cmnd *ipr_cmd;
488
489 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
490 list_del(&ipr_cmd->queue);
491 ipr_init_ipr_cmnd(ipr_cmd);
492
493 return ipr_cmd;
494}
495
496/**
497 * ipr_unmap_sglist - Unmap scatterlist if mapped
498 * @ioa_cfg: ioa config struct
499 * @ipr_cmd: ipr command struct
500 *
501 * Return value:
502 * nothing
503 **/
504static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
505 struct ipr_cmnd *ipr_cmd)
506{
507 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
508
509 if (ipr_cmd->dma_use_sg) {
510 if (scsi_cmd->use_sg > 0) {
511 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
512 scsi_cmd->use_sg,
513 scsi_cmd->sc_data_direction);
514 } else {
515 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
516 scsi_cmd->request_bufflen,
517 scsi_cmd->sc_data_direction);
518 }
519 }
520}
521
522/**
523 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
524 * @ioa_cfg: ioa config struct
525 * @clr_ints: interrupts to clear
526 *
527 * This function masks all interrupts on the adapter, then clears the
528 * interrupts specified in the mask
529 *
530 * Return value:
531 * none
532 **/
533static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
534 u32 clr_ints)
535{
536 volatile u32 int_reg;
537
538 /* Stop new interrupts */
539 ioa_cfg->allow_interrupts = 0;
540
541 /* Set interrupt mask to stop all new interrupts */
542 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
543
544 /* Clear any pending interrupts */
545 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
546 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
547}
548
549/**
550 * ipr_save_pcix_cmd_reg - Save PCI-X command register
551 * @ioa_cfg: ioa config struct
552 *
553 * Return value:
554 * 0 on success / -EIO on failure
555 **/
556static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
557{
558 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
559
560 if (pcix_cmd_reg == 0) {
561 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
562 return -EIO;
563 }
564
565 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
566 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
567 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
568 return -EIO;
569 }
570
571 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
572 return 0;
573}
574
575/**
576 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
577 * @ioa_cfg: ioa config struct
578 *
579 * Return value:
580 * 0 on success / -EIO on failure
581 **/
582static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
583{
584 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
585
586 if (pcix_cmd_reg) {
587 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
588 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
589 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
590 return -EIO;
591 }
592 } else {
593 dev_err(&ioa_cfg->pdev->dev,
594 "Failed to setup PCI-X command register\n");
595 return -EIO;
596 }
597
598 return 0;
599}
600
601/**
602 * ipr_scsi_eh_done - mid-layer done function for aborted ops
603 * @ipr_cmd: ipr command struct
604 *
605 * This function is invoked by the interrupt handler for
606 * ops generated by the SCSI mid-layer which are being aborted.
607 *
608 * Return value:
609 * none
610 **/
611static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
612{
613 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
614 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
615
616 scsi_cmd->result |= (DID_ERROR << 16);
617
618 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
619 scsi_cmd->scsi_done(scsi_cmd);
620 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
621}
622
623/**
624 * ipr_fail_all_ops - Fails all outstanding ops.
625 * @ioa_cfg: ioa config struct
626 *
627 * This function fails all outstanding ops.
628 *
629 * Return value:
630 * none
631 **/
632static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
633{
634 struct ipr_cmnd *ipr_cmd, *temp;
635
636 ENTER;
637 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
638 list_del(&ipr_cmd->queue);
639
640 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
641 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
642
643 if (ipr_cmd->scsi_cmd)
644 ipr_cmd->done = ipr_scsi_eh_done;
645
646 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
647 del_timer(&ipr_cmd->timer);
648 ipr_cmd->done(ipr_cmd);
649 }
650
651 LEAVE;
652}
653
654/**
655 * ipr_do_req - Send driver initiated requests.
656 * @ipr_cmd: ipr command struct
657 * @done: done function
658 * @timeout_func: timeout function
659 * @timeout: timeout value
660 *
661 * This function sends the specified command to the adapter with the
662 * timeout given. The done function is invoked on command completion.
663 *
664 * Return value:
665 * none
666 **/
667static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
668 void (*done) (struct ipr_cmnd *),
669 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
670{
671 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
672
673 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
674
675 ipr_cmd->done = done;
676
677 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
678 ipr_cmd->timer.expires = jiffies + timeout;
679 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
680
681 add_timer(&ipr_cmd->timer);
682
683 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
684
685 mb();
686 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
687 ioa_cfg->regs.ioarrin_reg);
688}
689
690/**
691 * ipr_internal_cmd_done - Op done function for an internally generated op.
692 * @ipr_cmd: ipr command struct
693 *
694 * This function is the op done function for an internally generated,
695 * blocking op. It simply wakes the sleeping thread.
696 *
697 * Return value:
698 * none
699 **/
700static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
701{
702 if (ipr_cmd->sibling)
703 ipr_cmd->sibling = NULL;
704 else
705 complete(&ipr_cmd->completion);
706}
707
708/**
709 * ipr_send_blocking_cmd - Send command and sleep on its completion.
710 * @ipr_cmd: ipr command struct
711 * @timeout_func: function to invoke if command times out
712 * @timeout: timeout
713 *
714 * Return value:
715 * none
716 **/
717static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
718 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
719 u32 timeout)
720{
721 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
722
723 init_completion(&ipr_cmd->completion);
724 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
725
726 spin_unlock_irq(ioa_cfg->host->host_lock);
727 wait_for_completion(&ipr_cmd->completion);
728 spin_lock_irq(ioa_cfg->host->host_lock);
729}
730
731/**
732 * ipr_send_hcam - Send an HCAM to the adapter.
733 * @ioa_cfg: ioa config struct
734 * @type: HCAM type
735 * @hostrcb: hostrcb struct
736 *
737 * This function will send a Host Controlled Async command to the adapter.
738 * If HCAMs are currently not allowed to be issued to the adapter, it will
739 * place the hostrcb on the free queue.
740 *
741 * Return value:
742 * none
743 **/
744static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
745 struct ipr_hostrcb *hostrcb)
746{
747 struct ipr_cmnd *ipr_cmd;
748 struct ipr_ioarcb *ioarcb;
749
750 if (ioa_cfg->allow_cmds) {
751 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
752 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
753 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
754
755 ipr_cmd->u.hostrcb = hostrcb;
756 ioarcb = &ipr_cmd->ioarcb;
757
758 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
759 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
760 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
761 ioarcb->cmd_pkt.cdb[1] = type;
762 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
763 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
764
765 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
766 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
767 ipr_cmd->ioadl[0].flags_and_data_len =
768 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
769 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
770
771 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
772 ipr_cmd->done = ipr_process_ccn;
773 else
774 ipr_cmd->done = ipr_process_error;
775
776 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
777
778 mb();
779 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
780 ioa_cfg->regs.ioarrin_reg);
781 } else {
782 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
783 }
784}
785
786/**
787 * ipr_init_res_entry - Initialize a resource entry struct.
788 * @res: resource entry struct
789 *
790 * Return value:
791 * none
792 **/
793static void ipr_init_res_entry(struct ipr_resource_entry *res)
794{
795 res->needs_sync_complete = 1;
796 res->in_erp = 0;
797 res->add_to_ml = 0;
798 res->del_from_ml = 0;
799 res->resetting_device = 0;
800 res->sdev = NULL;
801}
802
803/**
804 * ipr_handle_config_change - Handle a config change from the adapter
805 * @ioa_cfg: ioa config struct
806 * @hostrcb: hostrcb
807 *
808 * Return value:
809 * none
810 **/
811static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
812 struct ipr_hostrcb *hostrcb)
813{
814 struct ipr_resource_entry *res = NULL;
815 struct ipr_config_table_entry *cfgte;
816 u32 is_ndn = 1;
817
818 cfgte = &hostrcb->hcam.u.ccn.cfgte;
819
820 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
821 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
822 sizeof(cfgte->res_addr))) {
823 is_ndn = 0;
824 break;
825 }
826 }
827
828 if (is_ndn) {
829 if (list_empty(&ioa_cfg->free_res_q)) {
830 ipr_send_hcam(ioa_cfg,
831 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
832 hostrcb);
833 return;
834 }
835
836 res = list_entry(ioa_cfg->free_res_q.next,
837 struct ipr_resource_entry, queue);
838
839 list_del(&res->queue);
840 ipr_init_res_entry(res);
841 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
842 }
843
844 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
845
846 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
847 if (res->sdev) {
848 res->sdev->hostdata = NULL;
849 res->del_from_ml = 1;
850 if (ioa_cfg->allow_ml_add_del)
851 schedule_work(&ioa_cfg->work_q);
852 } else
853 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
854 } else if (!res->sdev) {
855 res->add_to_ml = 1;
856 if (ioa_cfg->allow_ml_add_del)
857 schedule_work(&ioa_cfg->work_q);
858 }
859
860 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
861}
862
863/**
864 * ipr_process_ccn - Op done function for a CCN.
865 * @ipr_cmd: ipr command struct
866 *
867 * This function is the op done function for a configuration
868 * change notification host controlled async from the adapter.
869 *
870 * Return value:
871 * none
872 **/
873static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
874{
875 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
876 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
877 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
878
879 list_del(&hostrcb->queue);
880 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
881
882 if (ioasc) {
883 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
884 dev_err(&ioa_cfg->pdev->dev,
885 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
886
887 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
888 } else {
889 ipr_handle_config_change(ioa_cfg, hostrcb);
890 }
891}
892
893/**
894 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600895 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 *
897 * Return value:
898 * none
899 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600900static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901{
902 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
903 + IPR_SERIAL_NUM_LEN];
904
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600905 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
906 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 IPR_PROD_ID_LEN);
908 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
909 ipr_err("Vendor/Product ID: %s\n", buffer);
910
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600911 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 buffer[IPR_SERIAL_NUM_LEN] = '\0';
913 ipr_err(" Serial Number: %s\n", buffer);
914}
915
916/**
917 * ipr_log_cache_error - Log a cache error.
918 * @ioa_cfg: ioa config struct
919 * @hostrcb: hostrcb struct
920 *
921 * Return value:
922 * none
923 **/
924static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
925 struct ipr_hostrcb *hostrcb)
926{
927 struct ipr_hostrcb_type_02_error *error =
928 &hostrcb->hcam.u.error.u.type_02_error;
929
930 ipr_err("-----Current Configuration-----\n");
931 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600932 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600934 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
936 ipr_err("-----Expected Configuration-----\n");
937 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600938 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600940 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941
942 ipr_err("Additional IOA Data: %08X %08X %08X\n",
943 be32_to_cpu(error->ioa_data[0]),
944 be32_to_cpu(error->ioa_data[1]),
945 be32_to_cpu(error->ioa_data[2]));
946}
947
948/**
949 * ipr_log_config_error - Log a configuration error.
950 * @ioa_cfg: ioa config struct
951 * @hostrcb: hostrcb struct
952 *
953 * Return value:
954 * none
955 **/
956static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
957 struct ipr_hostrcb *hostrcb)
958{
959 int errors_logged, i;
960 struct ipr_hostrcb_device_data_entry *dev_entry;
961 struct ipr_hostrcb_type_03_error *error;
962
963 error = &hostrcb->hcam.u.error.u.type_03_error;
964 errors_logged = be32_to_cpu(error->errors_logged);
965
966 ipr_err("Device Errors Detected/Logged: %d/%d\n",
967 be32_to_cpu(error->errors_detected), errors_logged);
968
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600969 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
971 for (i = 0; i < errors_logged; i++, dev_entry++) {
972 ipr_err_separator;
973
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -0600974 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600975 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
977 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600978 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600981 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -0600984 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
986 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
987 be32_to_cpu(dev_entry->ioa_data[0]),
988 be32_to_cpu(dev_entry->ioa_data[1]),
989 be32_to_cpu(dev_entry->ioa_data[2]),
990 be32_to_cpu(dev_entry->ioa_data[3]),
991 be32_to_cpu(dev_entry->ioa_data[4]));
992 }
993}
994
995/**
996 * ipr_log_array_error - Log an array configuration error.
997 * @ioa_cfg: ioa config struct
998 * @hostrcb: hostrcb struct
999 *
1000 * Return value:
1001 * none
1002 **/
1003static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1004 struct ipr_hostrcb *hostrcb)
1005{
1006 int i;
1007 struct ipr_hostrcb_type_04_error *error;
1008 struct ipr_hostrcb_array_data_entry *array_entry;
1009 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1010
1011 error = &hostrcb->hcam.u.error.u.type_04_error;
1012
1013 ipr_err_separator;
1014
1015 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1016 error->protection_level,
1017 ioa_cfg->host->host_no,
1018 error->last_func_vset_res_addr.bus,
1019 error->last_func_vset_res_addr.target,
1020 error->last_func_vset_res_addr.lun);
1021
1022 ipr_err_separator;
1023
1024 array_entry = error->array_member;
1025
1026 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001027 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 continue;
1029
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001030 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001032 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001035 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001037 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1038 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1039 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
1041 ipr_err_separator;
1042
1043 if (i == 9)
1044 array_entry = error->array_member2;
1045 else
1046 array_entry++;
1047 }
1048}
1049
1050/**
1051 * ipr_log_generic_error - Log an adapter error.
1052 * @ioa_cfg: ioa config struct
1053 * @hostrcb: hostrcb struct
1054 *
1055 * Return value:
1056 * none
1057 **/
1058static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1059 struct ipr_hostrcb *hostrcb)
1060{
1061 int i;
1062 int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
1063
1064 if (ioa_data_len == 0)
1065 return;
1066
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 for (i = 0; i < ioa_data_len / 4; i += 4) {
1068 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1069 be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
1070 be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
1071 be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
1072 be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
1073 }
1074}
1075
1076/**
1077 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1078 * @ioasc: IOASC
1079 *
1080 * This function will return the index of into the ipr_error_table
1081 * for the specified IOASC. If the IOASC is not in the table,
1082 * 0 will be returned, which points to the entry used for unknown errors.
1083 *
1084 * Return value:
1085 * index into the ipr_error_table
1086 **/
1087static u32 ipr_get_error(u32 ioasc)
1088{
1089 int i;
1090
1091 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1092 if (ipr_error_table[i].ioasc == ioasc)
1093 return i;
1094
1095 return 0;
1096}
1097
1098/**
1099 * ipr_handle_log_data - Log an adapter error.
1100 * @ioa_cfg: ioa config struct
1101 * @hostrcb: hostrcb struct
1102 *
1103 * This function logs an adapter error to the system.
1104 *
1105 * Return value:
1106 * none
1107 **/
1108static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1109 struct ipr_hostrcb *hostrcb)
1110{
1111 u32 ioasc;
1112 int error_index;
1113
1114 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1115 return;
1116
1117 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1118 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1119
1120 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1121
1122 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1123 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1124 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1125 scsi_report_bus_reset(ioa_cfg->host,
1126 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1127 }
1128
1129 error_index = ipr_get_error(ioasc);
1130
1131 if (!ipr_error_table[error_index].log_hcam)
1132 return;
1133
1134 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1135 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1136 "%s\n", ipr_error_table[error_index].error);
1137 } else {
1138 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1139 ipr_error_table[error_index].error);
1140 }
1141
1142 /* Set indication we have logged an error */
1143 ioa_cfg->errors_logged++;
1144
1145 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1146 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06001147 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1148 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
1150 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 case IPR_HOST_RCB_OVERLAY_ID_2:
1152 ipr_log_cache_error(ioa_cfg, hostrcb);
1153 break;
1154 case IPR_HOST_RCB_OVERLAY_ID_3:
1155 ipr_log_config_error(ioa_cfg, hostrcb);
1156 break;
1157 case IPR_HOST_RCB_OVERLAY_ID_4:
1158 case IPR_HOST_RCB_OVERLAY_ID_6:
1159 ipr_log_array_error(ioa_cfg, hostrcb);
1160 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06001161 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06001164 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 break;
1166 }
1167}
1168
1169/**
1170 * ipr_process_error - Op done function for an adapter error log.
1171 * @ipr_cmd: ipr command struct
1172 *
1173 * This function is the op done function for an error log host
1174 * controlled async from the adapter. It will log the error and
1175 * send the HCAM back to the adapter.
1176 *
1177 * Return value:
1178 * none
1179 **/
1180static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1181{
1182 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1183 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1184 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1185
1186 list_del(&hostrcb->queue);
1187 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1188
1189 if (!ioasc) {
1190 ipr_handle_log_data(ioa_cfg, hostrcb);
1191 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1192 dev_err(&ioa_cfg->pdev->dev,
1193 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1194 }
1195
1196 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1197}
1198
1199/**
1200 * ipr_timeout - An internally generated op has timed out.
1201 * @ipr_cmd: ipr command struct
1202 *
1203 * This function blocks host requests and initiates an
1204 * adapter reset.
1205 *
1206 * Return value:
1207 * none
1208 **/
1209static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1210{
1211 unsigned long lock_flags = 0;
1212 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1213
1214 ENTER;
1215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1216
1217 ioa_cfg->errors_logged++;
1218 dev_err(&ioa_cfg->pdev->dev,
1219 "Adapter being reset due to command timeout.\n");
1220
1221 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1222 ioa_cfg->sdt_state = GET_DUMP;
1223
1224 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1225 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1226
1227 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1228 LEAVE;
1229}
1230
1231/**
1232 * ipr_oper_timeout - Adapter timed out transitioning to operational
1233 * @ipr_cmd: ipr command struct
1234 *
1235 * This function blocks host requests and initiates an
1236 * adapter reset.
1237 *
1238 * Return value:
1239 * none
1240 **/
1241static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1242{
1243 unsigned long lock_flags = 0;
1244 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1245
1246 ENTER;
1247 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1248
1249 ioa_cfg->errors_logged++;
1250 dev_err(&ioa_cfg->pdev->dev,
1251 "Adapter timed out transitioning to operational.\n");
1252
1253 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1254 ioa_cfg->sdt_state = GET_DUMP;
1255
1256 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1257 if (ipr_fastfail)
1258 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1259 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1260 }
1261
1262 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1263 LEAVE;
1264}
1265
1266/**
1267 * ipr_reset_reload - Reset/Reload the IOA
1268 * @ioa_cfg: ioa config struct
1269 * @shutdown_type: shutdown type
1270 *
1271 * This function resets the adapter and re-initializes it.
1272 * This function assumes that all new host commands have been stopped.
1273 * Return value:
1274 * SUCCESS / FAILED
1275 **/
1276static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1277 enum ipr_shutdown_type shutdown_type)
1278{
1279 if (!ioa_cfg->in_reset_reload)
1280 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1281
1282 spin_unlock_irq(ioa_cfg->host->host_lock);
1283 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1284 spin_lock_irq(ioa_cfg->host->host_lock);
1285
1286 /* If we got hit with a host reset while we were already resetting
1287 the adapter for some reason, and the reset failed. */
1288 if (ioa_cfg->ioa_is_dead) {
1289 ipr_trace;
1290 return FAILED;
1291 }
1292
1293 return SUCCESS;
1294}
1295
1296/**
1297 * ipr_find_ses_entry - Find matching SES in SES table
1298 * @res: resource entry struct of SES
1299 *
1300 * Return value:
1301 * pointer to SES table entry / NULL on failure
1302 **/
1303static const struct ipr_ses_table_entry *
1304ipr_find_ses_entry(struct ipr_resource_entry *res)
1305{
1306 int i, j, matches;
1307 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1308
1309 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1310 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1311 if (ste->compare_product_id_byte[j] == 'X') {
1312 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1313 matches++;
1314 else
1315 break;
1316 } else
1317 matches++;
1318 }
1319
1320 if (matches == IPR_PROD_ID_LEN)
1321 return ste;
1322 }
1323
1324 return NULL;
1325}
1326
1327/**
1328 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1329 * @ioa_cfg: ioa config struct
1330 * @bus: SCSI bus
1331 * @bus_width: bus width
1332 *
1333 * Return value:
1334 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1335 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1336 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1337 * max 160MHz = max 320MB/sec).
1338 **/
1339static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1340{
1341 struct ipr_resource_entry *res;
1342 const struct ipr_ses_table_entry *ste;
1343 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1344
1345 /* Loop through each config table entry in the config table buffer */
1346 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1347 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1348 continue;
1349
1350 if (bus != res->cfgte.res_addr.bus)
1351 continue;
1352
1353 if (!(ste = ipr_find_ses_entry(res)))
1354 continue;
1355
1356 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1357 }
1358
1359 return max_xfer_rate;
1360}
1361
1362/**
1363 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1364 * @ioa_cfg: ioa config struct
1365 * @max_delay: max delay in micro-seconds to wait
1366 *
1367 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1368 *
1369 * Return value:
1370 * 0 on success / other on failure
1371 **/
1372static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1373{
1374 volatile u32 pcii_reg;
1375 int delay = 1;
1376
1377 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1378 while (delay < max_delay) {
1379 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1380
1381 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1382 return 0;
1383
1384 /* udelay cannot be used if delay is more than a few milliseconds */
1385 if ((delay / 1000) > MAX_UDELAY_MS)
1386 mdelay(delay / 1000);
1387 else
1388 udelay(delay);
1389
1390 delay += delay;
1391 }
1392 return -EIO;
1393}
1394
1395/**
1396 * ipr_get_ldump_data_section - Dump IOA memory
1397 * @ioa_cfg: ioa config struct
1398 * @start_addr: adapter address to dump
1399 * @dest: destination kernel buffer
1400 * @length_in_words: length to dump in 4 byte words
1401 *
1402 * Return value:
1403 * 0 on success / -EIO on failure
1404 **/
1405static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1406 u32 start_addr,
1407 __be32 *dest, u32 length_in_words)
1408{
1409 volatile u32 temp_pcii_reg;
1410 int i, delay = 0;
1411
1412 /* Write IOA interrupt reg starting LDUMP state */
1413 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1414 ioa_cfg->regs.set_uproc_interrupt_reg);
1415
1416 /* Wait for IO debug acknowledge */
1417 if (ipr_wait_iodbg_ack(ioa_cfg,
1418 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1419 dev_err(&ioa_cfg->pdev->dev,
1420 "IOA dump long data transfer timeout\n");
1421 return -EIO;
1422 }
1423
1424 /* Signal LDUMP interlocked - clear IO debug ack */
1425 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1426 ioa_cfg->regs.clr_interrupt_reg);
1427
1428 /* Write Mailbox with starting address */
1429 writel(start_addr, ioa_cfg->ioa_mailbox);
1430
1431 /* Signal address valid - clear IOA Reset alert */
1432 writel(IPR_UPROCI_RESET_ALERT,
1433 ioa_cfg->regs.clr_uproc_interrupt_reg);
1434
1435 for (i = 0; i < length_in_words; i++) {
1436 /* Wait for IO debug acknowledge */
1437 if (ipr_wait_iodbg_ack(ioa_cfg,
1438 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1439 dev_err(&ioa_cfg->pdev->dev,
1440 "IOA dump short data transfer timeout\n");
1441 return -EIO;
1442 }
1443
1444 /* Read data from mailbox and increment destination pointer */
1445 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1446 dest++;
1447
1448 /* For all but the last word of data, signal data received */
1449 if (i < (length_in_words - 1)) {
1450 /* Signal dump data received - Clear IO debug Ack */
1451 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1452 ioa_cfg->regs.clr_interrupt_reg);
1453 }
1454 }
1455
1456 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1457 writel(IPR_UPROCI_RESET_ALERT,
1458 ioa_cfg->regs.set_uproc_interrupt_reg);
1459
1460 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1461 ioa_cfg->regs.clr_uproc_interrupt_reg);
1462
1463 /* Signal dump data received - Clear IO debug Ack */
1464 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1465 ioa_cfg->regs.clr_interrupt_reg);
1466
1467 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1468 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1469 temp_pcii_reg =
1470 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1471
1472 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1473 return 0;
1474
1475 udelay(10);
1476 delay += 10;
1477 }
1478
1479 return 0;
1480}
1481
1482#ifdef CONFIG_SCSI_IPR_DUMP
1483/**
1484 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1485 * @ioa_cfg: ioa config struct
1486 * @pci_address: adapter address
1487 * @length: length of data to copy
1488 *
1489 * Copy data from PCI adapter to kernel buffer.
1490 * Note: length MUST be a 4 byte multiple
1491 * Return value:
1492 * 0 on success / other on failure
1493 **/
1494static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1495 unsigned long pci_address, u32 length)
1496{
1497 int bytes_copied = 0;
1498 int cur_len, rc, rem_len, rem_page_len;
1499 __be32 *page;
1500 unsigned long lock_flags = 0;
1501 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1502
1503 while (bytes_copied < length &&
1504 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1505 if (ioa_dump->page_offset >= PAGE_SIZE ||
1506 ioa_dump->page_offset == 0) {
1507 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1508
1509 if (!page) {
1510 ipr_trace;
1511 return bytes_copied;
1512 }
1513
1514 ioa_dump->page_offset = 0;
1515 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1516 ioa_dump->next_page_index++;
1517 } else
1518 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1519
1520 rem_len = length - bytes_copied;
1521 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1522 cur_len = min(rem_len, rem_page_len);
1523
1524 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1525 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1526 rc = -EIO;
1527 } else {
1528 rc = ipr_get_ldump_data_section(ioa_cfg,
1529 pci_address + bytes_copied,
1530 &page[ioa_dump->page_offset / 4],
1531 (cur_len / sizeof(u32)));
1532 }
1533 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1534
1535 if (!rc) {
1536 ioa_dump->page_offset += cur_len;
1537 bytes_copied += cur_len;
1538 } else {
1539 ipr_trace;
1540 break;
1541 }
1542 schedule();
1543 }
1544
1545 return bytes_copied;
1546}
1547
1548/**
1549 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1550 * @hdr: dump entry header struct
1551 *
1552 * Return value:
1553 * nothing
1554 **/
1555static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1556{
1557 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1558 hdr->num_elems = 1;
1559 hdr->offset = sizeof(*hdr);
1560 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1561}
1562
1563/**
1564 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1565 * @ioa_cfg: ioa config struct
1566 * @driver_dump: driver dump struct
1567 *
1568 * Return value:
1569 * nothing
1570 **/
1571static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1572 struct ipr_driver_dump *driver_dump)
1573{
1574 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1575
1576 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1577 driver_dump->ioa_type_entry.hdr.len =
1578 sizeof(struct ipr_dump_ioa_type_entry) -
1579 sizeof(struct ipr_dump_entry_header);
1580 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1581 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1582 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1583 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1584 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1585 ucode_vpd->minor_release[1];
1586 driver_dump->hdr.num_entries++;
1587}
1588
1589/**
1590 * ipr_dump_version_data - Fill in the driver version in the dump.
1591 * @ioa_cfg: ioa config struct
1592 * @driver_dump: driver dump struct
1593 *
1594 * Return value:
1595 * nothing
1596 **/
1597static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1598 struct ipr_driver_dump *driver_dump)
1599{
1600 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1601 driver_dump->version_entry.hdr.len =
1602 sizeof(struct ipr_dump_version_entry) -
1603 sizeof(struct ipr_dump_entry_header);
1604 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1605 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1606 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1607 driver_dump->hdr.num_entries++;
1608}
1609
1610/**
1611 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1612 * @ioa_cfg: ioa config struct
1613 * @driver_dump: driver dump struct
1614 *
1615 * Return value:
1616 * nothing
1617 **/
1618static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1619 struct ipr_driver_dump *driver_dump)
1620{
1621 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1622 driver_dump->trace_entry.hdr.len =
1623 sizeof(struct ipr_dump_trace_entry) -
1624 sizeof(struct ipr_dump_entry_header);
1625 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1626 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1627 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1628 driver_dump->hdr.num_entries++;
1629}
1630
1631/**
1632 * ipr_dump_location_data - Fill in the IOA location in the dump.
1633 * @ioa_cfg: ioa config struct
1634 * @driver_dump: driver dump struct
1635 *
1636 * Return value:
1637 * nothing
1638 **/
1639static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1640 struct ipr_driver_dump *driver_dump)
1641{
1642 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1643 driver_dump->location_entry.hdr.len =
1644 sizeof(struct ipr_dump_location_entry) -
1645 sizeof(struct ipr_dump_entry_header);
1646 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1647 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1648 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1649 driver_dump->hdr.num_entries++;
1650}
1651
1652/**
1653 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1654 * @ioa_cfg: ioa config struct
1655 * @dump: dump struct
1656 *
1657 * Return value:
1658 * nothing
1659 **/
1660static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1661{
1662 unsigned long start_addr, sdt_word;
1663 unsigned long lock_flags = 0;
1664 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1665 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1666 u32 num_entries, start_off, end_off;
1667 u32 bytes_to_copy, bytes_copied, rc;
1668 struct ipr_sdt *sdt;
1669 int i;
1670
1671 ENTER;
1672
1673 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1674
1675 if (ioa_cfg->sdt_state != GET_DUMP) {
1676 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1677 return;
1678 }
1679
1680 start_addr = readl(ioa_cfg->ioa_mailbox);
1681
1682 if (!ipr_sdt_is_fmt2(start_addr)) {
1683 dev_err(&ioa_cfg->pdev->dev,
1684 "Invalid dump table format: %lx\n", start_addr);
1685 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1686 return;
1687 }
1688
1689 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1690
1691 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1692
1693 /* Initialize the overall dump header */
1694 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1695 driver_dump->hdr.num_entries = 1;
1696 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1697 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1698 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1699 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1700
1701 ipr_dump_version_data(ioa_cfg, driver_dump);
1702 ipr_dump_location_data(ioa_cfg, driver_dump);
1703 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1704 ipr_dump_trace_data(ioa_cfg, driver_dump);
1705
1706 /* Update dump_header */
1707 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1708
1709 /* IOA Dump entry */
1710 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1711 ioa_dump->format = IPR_SDT_FMT2;
1712 ioa_dump->hdr.len = 0;
1713 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1714 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1715
1716 /* First entries in sdt are actually a list of dump addresses and
1717 lengths to gather the real dump data. sdt represents the pointer
1718 to the ioa generated dump table. Dump data will be extracted based
1719 on entries in this table */
1720 sdt = &ioa_dump->sdt;
1721
1722 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1723 sizeof(struct ipr_sdt) / sizeof(__be32));
1724
1725 /* Smart Dump table is ready to use and the first entry is valid */
1726 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1727 dev_err(&ioa_cfg->pdev->dev,
1728 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1729 rc, be32_to_cpu(sdt->hdr.state));
1730 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1731 ioa_cfg->sdt_state = DUMP_OBTAINED;
1732 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1733 return;
1734 }
1735
1736 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1737
1738 if (num_entries > IPR_NUM_SDT_ENTRIES)
1739 num_entries = IPR_NUM_SDT_ENTRIES;
1740
1741 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1742
1743 for (i = 0; i < num_entries; i++) {
1744 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1745 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1746 break;
1747 }
1748
1749 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1750 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1751 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1752 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1753
1754 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1755 bytes_to_copy = end_off - start_off;
1756 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1757 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1758 continue;
1759 }
1760
1761 /* Copy data from adapter to driver buffers */
1762 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1763 bytes_to_copy);
1764
1765 ioa_dump->hdr.len += bytes_copied;
1766
1767 if (bytes_copied != bytes_to_copy) {
1768 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1769 break;
1770 }
1771 }
1772 }
1773 }
1774
1775 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1776
1777 /* Update dump_header */
1778 driver_dump->hdr.len += ioa_dump->hdr.len;
1779 wmb();
1780 ioa_cfg->sdt_state = DUMP_OBTAINED;
1781 LEAVE;
1782}
1783
1784#else
1785#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1786#endif
1787
1788/**
1789 * ipr_release_dump - Free adapter dump memory
1790 * @kref: kref struct
1791 *
1792 * Return value:
1793 * nothing
1794 **/
1795static void ipr_release_dump(struct kref *kref)
1796{
1797 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
1798 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
1799 unsigned long lock_flags = 0;
1800 int i;
1801
1802 ENTER;
1803 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1804 ioa_cfg->dump = NULL;
1805 ioa_cfg->sdt_state = INACTIVE;
1806 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1807
1808 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
1809 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
1810
1811 kfree(dump);
1812 LEAVE;
1813}
1814
1815/**
1816 * ipr_worker_thread - Worker thread
1817 * @data: ioa config struct
1818 *
1819 * Called at task level from a work thread. This function takes care
1820 * of adding and removing device from the mid-layer as configuration
1821 * changes are detected by the adapter.
1822 *
1823 * Return value:
1824 * nothing
1825 **/
1826static void ipr_worker_thread(void *data)
1827{
1828 unsigned long lock_flags;
1829 struct ipr_resource_entry *res;
1830 struct scsi_device *sdev;
1831 struct ipr_dump *dump;
1832 struct ipr_ioa_cfg *ioa_cfg = data;
1833 u8 bus, target, lun;
1834 int did_work;
1835
1836 ENTER;
1837 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1838
1839 if (ioa_cfg->sdt_state == GET_DUMP) {
1840 dump = ioa_cfg->dump;
1841 if (!dump) {
1842 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1843 return;
1844 }
1845 kref_get(&dump->kref);
1846 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1847 ipr_get_ioa_dump(ioa_cfg, dump);
1848 kref_put(&dump->kref, ipr_release_dump);
1849
1850 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1851 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1852 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1853 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1854 return;
1855 }
1856
1857restart:
1858 do {
1859 did_work = 0;
1860 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1861 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1862 return;
1863 }
1864
1865 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1866 if (res->del_from_ml && res->sdev) {
1867 did_work = 1;
1868 sdev = res->sdev;
1869 if (!scsi_device_get(sdev)) {
1870 res->sdev = NULL;
1871 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1872 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1873 scsi_remove_device(sdev);
1874 scsi_device_put(sdev);
1875 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1876 }
1877 break;
1878 }
1879 }
1880 } while(did_work);
1881
1882 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1883 if (res->add_to_ml) {
1884 bus = res->cfgte.res_addr.bus;
1885 target = res->cfgte.res_addr.target;
1886 lun = res->cfgte.res_addr.lun;
1887 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1888 scsi_add_device(ioa_cfg->host, bus, target, lun);
1889 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1890 goto restart;
1891 }
1892 }
1893
1894 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1895 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
1896 LEAVE;
1897}
1898
1899#ifdef CONFIG_SCSI_IPR_TRACE
1900/**
1901 * ipr_read_trace - Dump the adapter trace
1902 * @kobj: kobject struct
1903 * @buf: buffer
1904 * @off: offset
1905 * @count: buffer size
1906 *
1907 * Return value:
1908 * number of bytes printed to buffer
1909 **/
1910static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1911 loff_t off, size_t count)
1912{
1913 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1914 struct Scsi_Host *shost = class_to_shost(cdev);
1915 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1916 unsigned long lock_flags = 0;
1917 int size = IPR_TRACE_SIZE;
1918 char *src = (char *)ioa_cfg->trace;
1919
1920 if (off > size)
1921 return 0;
1922 if (off + count > size) {
1923 size -= off;
1924 count = size;
1925 }
1926
1927 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1928 memcpy(buf, &src[off], count);
1929 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1930 return count;
1931}
1932
1933static struct bin_attribute ipr_trace_attr = {
1934 .attr = {
1935 .name = "trace",
1936 .mode = S_IRUGO,
1937 },
1938 .size = 0,
1939 .read = ipr_read_trace,
1940};
1941#endif
1942
brking@us.ibm.com62275042005-11-01 17:01:14 -06001943static const struct {
1944 enum ipr_cache_state state;
1945 char *name;
1946} cache_state [] = {
1947 { CACHE_NONE, "none" },
1948 { CACHE_DISABLED, "disabled" },
1949 { CACHE_ENABLED, "enabled" }
1950};
1951
1952/**
1953 * ipr_show_write_caching - Show the write caching attribute
1954 * @class_dev: class device struct
1955 * @buf: buffer
1956 *
1957 * Return value:
1958 * number of bytes printed to buffer
1959 **/
1960static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
1961{
1962 struct Scsi_Host *shost = class_to_shost(class_dev);
1963 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1964 unsigned long lock_flags = 0;
1965 int i, len = 0;
1966
1967 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1968 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
1969 if (cache_state[i].state == ioa_cfg->cache_state) {
1970 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
1971 break;
1972 }
1973 }
1974 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1975 return len;
1976}
1977
1978
1979/**
1980 * ipr_store_write_caching - Enable/disable adapter write cache
1981 * @class_dev: class_device struct
1982 * @buf: buffer
1983 * @count: buffer size
1984 *
1985 * This function will enable/disable adapter write cache.
1986 *
1987 * Return value:
1988 * count on success / other on failure
1989 **/
1990static ssize_t ipr_store_write_caching(struct class_device *class_dev,
1991 const char *buf, size_t count)
1992{
1993 struct Scsi_Host *shost = class_to_shost(class_dev);
1994 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1995 unsigned long lock_flags = 0;
1996 enum ipr_cache_state new_state = CACHE_INVALID;
1997 int i;
1998
1999 if (!capable(CAP_SYS_ADMIN))
2000 return -EACCES;
2001 if (ioa_cfg->cache_state == CACHE_NONE)
2002 return -EINVAL;
2003
2004 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2005 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2006 new_state = cache_state[i].state;
2007 break;
2008 }
2009 }
2010
2011 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2012 return -EINVAL;
2013
2014 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2015 if (ioa_cfg->cache_state == new_state) {
2016 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2017 return count;
2018 }
2019
2020 ioa_cfg->cache_state = new_state;
2021 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2022 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2023 if (!ioa_cfg->in_reset_reload)
2024 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2025 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2026 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2027
2028 return count;
2029}
2030
2031static struct class_device_attribute ipr_ioa_cache_attr = {
2032 .attr = {
2033 .name = "write_cache",
2034 .mode = S_IRUGO | S_IWUSR,
2035 },
2036 .show = ipr_show_write_caching,
2037 .store = ipr_store_write_caching
2038};
2039
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040/**
2041 * ipr_show_fw_version - Show the firmware version
2042 * @class_dev: class device struct
2043 * @buf: buffer
2044 *
2045 * Return value:
2046 * number of bytes printed to buffer
2047 **/
2048static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2049{
2050 struct Scsi_Host *shost = class_to_shost(class_dev);
2051 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2052 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2053 unsigned long lock_flags = 0;
2054 int len;
2055
2056 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2057 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2058 ucode_vpd->major_release, ucode_vpd->card_type,
2059 ucode_vpd->minor_release[0],
2060 ucode_vpd->minor_release[1]);
2061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2062 return len;
2063}
2064
2065static struct class_device_attribute ipr_fw_version_attr = {
2066 .attr = {
2067 .name = "fw_version",
2068 .mode = S_IRUGO,
2069 },
2070 .show = ipr_show_fw_version,
2071};
2072
2073/**
2074 * ipr_show_log_level - Show the adapter's error logging level
2075 * @class_dev: class device struct
2076 * @buf: buffer
2077 *
2078 * Return value:
2079 * number of bytes printed to buffer
2080 **/
2081static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2082{
2083 struct Scsi_Host *shost = class_to_shost(class_dev);
2084 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2085 unsigned long lock_flags = 0;
2086 int len;
2087
2088 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2089 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2090 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2091 return len;
2092}
2093
2094/**
2095 * ipr_store_log_level - Change the adapter's error logging level
2096 * @class_dev: class device struct
2097 * @buf: buffer
2098 *
2099 * Return value:
2100 * number of bytes printed to buffer
2101 **/
2102static ssize_t ipr_store_log_level(struct class_device *class_dev,
2103 const char *buf, size_t count)
2104{
2105 struct Scsi_Host *shost = class_to_shost(class_dev);
2106 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2107 unsigned long lock_flags = 0;
2108
2109 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2110 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2111 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2112 return strlen(buf);
2113}
2114
2115static struct class_device_attribute ipr_log_level_attr = {
2116 .attr = {
2117 .name = "log_level",
2118 .mode = S_IRUGO | S_IWUSR,
2119 },
2120 .show = ipr_show_log_level,
2121 .store = ipr_store_log_level
2122};
2123
2124/**
2125 * ipr_store_diagnostics - IOA Diagnostics interface
2126 * @class_dev: class_device struct
2127 * @buf: buffer
2128 * @count: buffer size
2129 *
2130 * This function will reset the adapter and wait a reasonable
2131 * amount of time for any errors that the adapter might log.
2132 *
2133 * Return value:
2134 * count on success / other on failure
2135 **/
2136static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2137 const char *buf, size_t count)
2138{
2139 struct Scsi_Host *shost = class_to_shost(class_dev);
2140 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2141 unsigned long lock_flags = 0;
2142 int rc = count;
2143
2144 if (!capable(CAP_SYS_ADMIN))
2145 return -EACCES;
2146
2147 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2148 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2149 ioa_cfg->errors_logged = 0;
2150 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2151
2152 if (ioa_cfg->in_reset_reload) {
2153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2154 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2155
2156 /* Wait for a second for any errors to be logged */
2157 msleep(1000);
2158 } else {
2159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2160 return -EIO;
2161 }
2162
2163 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2164 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2165 rc = -EIO;
2166 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2167
2168 return rc;
2169}
2170
2171static struct class_device_attribute ipr_diagnostics_attr = {
2172 .attr = {
2173 .name = "run_diagnostics",
2174 .mode = S_IWUSR,
2175 },
2176 .store = ipr_store_diagnostics
2177};
2178
2179/**
2180 * ipr_store_reset_adapter - Reset the adapter
2181 * @class_dev: class_device struct
2182 * @buf: buffer
2183 * @count: buffer size
2184 *
2185 * This function will reset the adapter.
2186 *
2187 * Return value:
2188 * count on success / other on failure
2189 **/
2190static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2191 const char *buf, size_t count)
2192{
2193 struct Scsi_Host *shost = class_to_shost(class_dev);
2194 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2195 unsigned long lock_flags;
2196 int result = count;
2197
2198 if (!capable(CAP_SYS_ADMIN))
2199 return -EACCES;
2200
2201 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2202 if (!ioa_cfg->in_reset_reload)
2203 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2204 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2205 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2206
2207 return result;
2208}
2209
2210static struct class_device_attribute ipr_ioa_reset_attr = {
2211 .attr = {
2212 .name = "reset_host",
2213 .mode = S_IWUSR,
2214 },
2215 .store = ipr_store_reset_adapter
2216};
2217
2218/**
2219 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2220 * @buf_len: buffer length
2221 *
2222 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2223 * list to use for microcode download
2224 *
2225 * Return value:
2226 * pointer to sglist / NULL on failure
2227 **/
2228static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2229{
2230 int sg_size, order, bsize_elem, num_elem, i, j;
2231 struct ipr_sglist *sglist;
2232 struct scatterlist *scatterlist;
2233 struct page *page;
2234
2235 /* Get the minimum size per scatter/gather element */
2236 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2237
2238 /* Get the actual size per element */
2239 order = get_order(sg_size);
2240
2241 /* Determine the actual number of bytes per element */
2242 bsize_elem = PAGE_SIZE * (1 << order);
2243
2244 /* Determine the actual number of sg entries needed */
2245 if (buf_len % bsize_elem)
2246 num_elem = (buf_len / bsize_elem) + 1;
2247 else
2248 num_elem = buf_len / bsize_elem;
2249
2250 /* Allocate a scatter/gather list for the DMA */
2251 sglist = kmalloc(sizeof(struct ipr_sglist) +
2252 (sizeof(struct scatterlist) * (num_elem - 1)),
2253 GFP_KERNEL);
2254
2255 if (sglist == NULL) {
2256 ipr_trace;
2257 return NULL;
2258 }
2259
2260 memset(sglist, 0, sizeof(struct ipr_sglist) +
2261 (sizeof(struct scatterlist) * (num_elem - 1)));
2262
2263 scatterlist = sglist->scatterlist;
2264
2265 sglist->order = order;
2266 sglist->num_sg = num_elem;
2267
2268 /* Allocate a bunch of sg elements */
2269 for (i = 0; i < num_elem; i++) {
2270 page = alloc_pages(GFP_KERNEL, order);
2271 if (!page) {
2272 ipr_trace;
2273
2274 /* Free up what we already allocated */
2275 for (j = i - 1; j >= 0; j--)
2276 __free_pages(scatterlist[j].page, order);
2277 kfree(sglist);
2278 return NULL;
2279 }
2280
2281 scatterlist[i].page = page;
2282 }
2283
2284 return sglist;
2285}
2286
2287/**
2288 * ipr_free_ucode_buffer - Frees a microcode download buffer
2289 * @p_dnld: scatter/gather list pointer
2290 *
2291 * Free a DMA'able ucode download buffer previously allocated with
2292 * ipr_alloc_ucode_buffer
2293 *
2294 * Return value:
2295 * nothing
2296 **/
2297static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2298{
2299 int i;
2300
2301 for (i = 0; i < sglist->num_sg; i++)
2302 __free_pages(sglist->scatterlist[i].page, sglist->order);
2303
2304 kfree(sglist);
2305}
2306
2307/**
2308 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2309 * @sglist: scatter/gather list pointer
2310 * @buffer: buffer pointer
2311 * @len: buffer length
2312 *
2313 * Copy a microcode image from a user buffer into a buffer allocated by
2314 * ipr_alloc_ucode_buffer
2315 *
2316 * Return value:
2317 * 0 on success / other on failure
2318 **/
2319static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2320 u8 *buffer, u32 len)
2321{
2322 int bsize_elem, i, result = 0;
2323 struct scatterlist *scatterlist;
2324 void *kaddr;
2325
2326 /* Determine the actual number of bytes per element */
2327 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2328
2329 scatterlist = sglist->scatterlist;
2330
2331 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2332 kaddr = kmap(scatterlist[i].page);
2333 memcpy(kaddr, buffer, bsize_elem);
2334 kunmap(scatterlist[i].page);
2335
2336 scatterlist[i].length = bsize_elem;
2337
2338 if (result != 0) {
2339 ipr_trace;
2340 return result;
2341 }
2342 }
2343
2344 if (len % bsize_elem) {
2345 kaddr = kmap(scatterlist[i].page);
2346 memcpy(kaddr, buffer, len % bsize_elem);
2347 kunmap(scatterlist[i].page);
2348
2349 scatterlist[i].length = len % bsize_elem;
2350 }
2351
2352 sglist->buffer_len = len;
2353 return result;
2354}
2355
2356/**
2357 * ipr_map_ucode_buffer - Map a microcode download buffer
2358 * @ipr_cmd: ipr command struct
2359 * @sglist: scatter/gather list
2360 * @len: total length of download buffer
2361 *
2362 * Maps a microcode download scatter/gather list for DMA and
2363 * builds the IOADL.
2364 *
2365 * Return value:
2366 * 0 on success / -EIO on failure
2367 **/
2368static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd,
2369 struct ipr_sglist *sglist, int len)
2370{
2371 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2372 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2373 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2374 struct scatterlist *scatterlist = sglist->scatterlist;
2375 int i;
2376
2377 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist,
2378 sglist->num_sg, DMA_TO_DEVICE);
2379
2380 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2381 ioarcb->write_data_transfer_length = cpu_to_be32(len);
2382 ioarcb->write_ioadl_len =
2383 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2384
2385 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2386 ioadl[i].flags_and_data_len =
2387 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2388 ioadl[i].address =
2389 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2390 }
2391
2392 if (likely(ipr_cmd->dma_use_sg)) {
2393 ioadl[i-1].flags_and_data_len |=
2394 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2395 }
2396 else {
2397 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
2398 return -EIO;
2399 }
2400
2401 return 0;
2402}
2403
2404/**
2405 * ipr_store_update_fw - Update the firmware on the adapter
2406 * @class_dev: class_device struct
2407 * @buf: buffer
2408 * @count: buffer size
2409 *
2410 * This function will update the firmware on the adapter.
2411 *
2412 * Return value:
2413 * count on success / other on failure
2414 **/
2415static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2416 const char *buf, size_t count)
2417{
2418 struct Scsi_Host *shost = class_to_shost(class_dev);
2419 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2420 struct ipr_ucode_image_header *image_hdr;
2421 const struct firmware *fw_entry;
2422 struct ipr_sglist *sglist;
2423 unsigned long lock_flags;
2424 char fname[100];
2425 char *src;
2426 int len, result, dnld_size;
2427
2428 if (!capable(CAP_SYS_ADMIN))
2429 return -EACCES;
2430
2431 len = snprintf(fname, 99, "%s", buf);
2432 fname[len-1] = '\0';
2433
2434 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2435 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2436 return -EIO;
2437 }
2438
2439 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2440
2441 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2442 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2443 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2444 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2445 release_firmware(fw_entry);
2446 return -EINVAL;
2447 }
2448
2449 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2450 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2451 sglist = ipr_alloc_ucode_buffer(dnld_size);
2452
2453 if (!sglist) {
2454 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2455 release_firmware(fw_entry);
2456 return -ENOMEM;
2457 }
2458
2459 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2460
2461 if (result) {
2462 dev_err(&ioa_cfg->pdev->dev,
2463 "Microcode buffer copy to DMA buffer failed\n");
2464 ipr_free_ucode_buffer(sglist);
2465 release_firmware(fw_entry);
2466 return result;
2467 }
2468
2469 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2470
2471 if (ioa_cfg->ucode_sglist) {
2472 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2473 dev_err(&ioa_cfg->pdev->dev,
2474 "Microcode download already in progress\n");
2475 ipr_free_ucode_buffer(sglist);
2476 release_firmware(fw_entry);
2477 return -EIO;
2478 }
2479
2480 ioa_cfg->ucode_sglist = sglist;
2481 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2482 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2483 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2484
2485 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2486 ioa_cfg->ucode_sglist = NULL;
2487 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2488
2489 ipr_free_ucode_buffer(sglist);
2490 release_firmware(fw_entry);
2491
2492 return count;
2493}
2494
2495static struct class_device_attribute ipr_update_fw_attr = {
2496 .attr = {
2497 .name = "update_fw",
2498 .mode = S_IWUSR,
2499 },
2500 .store = ipr_store_update_fw
2501};
2502
2503static struct class_device_attribute *ipr_ioa_attrs[] = {
2504 &ipr_fw_version_attr,
2505 &ipr_log_level_attr,
2506 &ipr_diagnostics_attr,
2507 &ipr_ioa_reset_attr,
2508 &ipr_update_fw_attr,
brking@us.ibm.com62275042005-11-01 17:01:14 -06002509 &ipr_ioa_cache_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 NULL,
2511};
2512
2513#ifdef CONFIG_SCSI_IPR_DUMP
2514/**
2515 * ipr_read_dump - Dump the adapter
2516 * @kobj: kobject struct
2517 * @buf: buffer
2518 * @off: offset
2519 * @count: buffer size
2520 *
2521 * Return value:
2522 * number of bytes printed to buffer
2523 **/
2524static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2525 loff_t off, size_t count)
2526{
2527 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2528 struct Scsi_Host *shost = class_to_shost(cdev);
2529 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2530 struct ipr_dump *dump;
2531 unsigned long lock_flags = 0;
2532 char *src;
2533 int len;
2534 size_t rc = count;
2535
2536 if (!capable(CAP_SYS_ADMIN))
2537 return -EACCES;
2538
2539 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2540 dump = ioa_cfg->dump;
2541
2542 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2543 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2544 return 0;
2545 }
2546 kref_get(&dump->kref);
2547 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2548
2549 if (off > dump->driver_dump.hdr.len) {
2550 kref_put(&dump->kref, ipr_release_dump);
2551 return 0;
2552 }
2553
2554 if (off + count > dump->driver_dump.hdr.len) {
2555 count = dump->driver_dump.hdr.len - off;
2556 rc = count;
2557 }
2558
2559 if (count && off < sizeof(dump->driver_dump)) {
2560 if (off + count > sizeof(dump->driver_dump))
2561 len = sizeof(dump->driver_dump) - off;
2562 else
2563 len = count;
2564 src = (u8 *)&dump->driver_dump + off;
2565 memcpy(buf, src, len);
2566 buf += len;
2567 off += len;
2568 count -= len;
2569 }
2570
2571 off -= sizeof(dump->driver_dump);
2572
2573 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2574 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2575 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2576 else
2577 len = count;
2578 src = (u8 *)&dump->ioa_dump + off;
2579 memcpy(buf, src, len);
2580 buf += len;
2581 off += len;
2582 count -= len;
2583 }
2584
2585 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2586
2587 while (count) {
2588 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2589 len = PAGE_ALIGN(off) - off;
2590 else
2591 len = count;
2592 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2593 src += off & ~PAGE_MASK;
2594 memcpy(buf, src, len);
2595 buf += len;
2596 off += len;
2597 count -= len;
2598 }
2599
2600 kref_put(&dump->kref, ipr_release_dump);
2601 return rc;
2602}
2603
2604/**
2605 * ipr_alloc_dump - Prepare for adapter dump
2606 * @ioa_cfg: ioa config struct
2607 *
2608 * Return value:
2609 * 0 on success / other on failure
2610 **/
2611static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2612{
2613 struct ipr_dump *dump;
2614 unsigned long lock_flags = 0;
2615
2616 ENTER;
2617 dump = kmalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2618
2619 if (!dump) {
2620 ipr_err("Dump memory allocation failed\n");
2621 return -ENOMEM;
2622 }
2623
2624 memset(dump, 0, sizeof(struct ipr_dump));
2625 kref_init(&dump->kref);
2626 dump->ioa_cfg = ioa_cfg;
2627
2628 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2629
2630 if (INACTIVE != ioa_cfg->sdt_state) {
2631 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2632 kfree(dump);
2633 return 0;
2634 }
2635
2636 ioa_cfg->dump = dump;
2637 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2638 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2639 ioa_cfg->dump_taken = 1;
2640 schedule_work(&ioa_cfg->work_q);
2641 }
2642 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2643
2644 LEAVE;
2645 return 0;
2646}
2647
2648/**
2649 * ipr_free_dump - Free adapter dump memory
2650 * @ioa_cfg: ioa config struct
2651 *
2652 * Return value:
2653 * 0 on success / other on failure
2654 **/
2655static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2656{
2657 struct ipr_dump *dump;
2658 unsigned long lock_flags = 0;
2659
2660 ENTER;
2661
2662 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2663 dump = ioa_cfg->dump;
2664 if (!dump) {
2665 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2666 return 0;
2667 }
2668
2669 ioa_cfg->dump = NULL;
2670 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2671
2672 kref_put(&dump->kref, ipr_release_dump);
2673
2674 LEAVE;
2675 return 0;
2676}
2677
2678/**
2679 * ipr_write_dump - Setup dump state of adapter
2680 * @kobj: kobject struct
2681 * @buf: buffer
2682 * @off: offset
2683 * @count: buffer size
2684 *
2685 * Return value:
2686 * number of bytes printed to buffer
2687 **/
2688static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2689 loff_t off, size_t count)
2690{
2691 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2692 struct Scsi_Host *shost = class_to_shost(cdev);
2693 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2694 int rc;
2695
2696 if (!capable(CAP_SYS_ADMIN))
2697 return -EACCES;
2698
2699 if (buf[0] == '1')
2700 rc = ipr_alloc_dump(ioa_cfg);
2701 else if (buf[0] == '0')
2702 rc = ipr_free_dump(ioa_cfg);
2703 else
2704 return -EINVAL;
2705
2706 if (rc)
2707 return rc;
2708 else
2709 return count;
2710}
2711
2712static struct bin_attribute ipr_dump_attr = {
2713 .attr = {
2714 .name = "dump",
2715 .mode = S_IRUSR | S_IWUSR,
2716 },
2717 .size = 0,
2718 .read = ipr_read_dump,
2719 .write = ipr_write_dump
2720};
2721#else
2722static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2723#endif
2724
2725/**
2726 * ipr_change_queue_depth - Change the device's queue depth
2727 * @sdev: scsi device struct
2728 * @qdepth: depth to set
2729 *
2730 * Return value:
2731 * actual depth set
2732 **/
2733static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
2734{
2735 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2736 return sdev->queue_depth;
2737}
2738
2739/**
2740 * ipr_change_queue_type - Change the device's queue type
2741 * @dsev: scsi device struct
2742 * @tag_type: type of tags to use
2743 *
2744 * Return value:
2745 * actual queue type set
2746 **/
2747static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
2748{
2749 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2750 struct ipr_resource_entry *res;
2751 unsigned long lock_flags = 0;
2752
2753 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2754 res = (struct ipr_resource_entry *)sdev->hostdata;
2755
2756 if (res) {
2757 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2758 /*
2759 * We don't bother quiescing the device here since the
2760 * adapter firmware does it for us.
2761 */
2762 scsi_set_tag_type(sdev, tag_type);
2763
2764 if (tag_type)
2765 scsi_activate_tcq(sdev, sdev->queue_depth);
2766 else
2767 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2768 } else
2769 tag_type = 0;
2770 } else
2771 tag_type = 0;
2772
2773 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2774 return tag_type;
2775}
2776
2777/**
2778 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2779 * @dev: device struct
2780 * @buf: buffer
2781 *
2782 * Return value:
2783 * number of bytes printed to buffer
2784 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04002785static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786{
2787 struct scsi_device *sdev = to_scsi_device(dev);
2788 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2789 struct ipr_resource_entry *res;
2790 unsigned long lock_flags = 0;
2791 ssize_t len = -ENXIO;
2792
2793 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2794 res = (struct ipr_resource_entry *)sdev->hostdata;
2795 if (res)
2796 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2797 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2798 return len;
2799}
2800
2801static struct device_attribute ipr_adapter_handle_attr = {
2802 .attr = {
2803 .name = "adapter_handle",
2804 .mode = S_IRUSR,
2805 },
2806 .show = ipr_show_adapter_handle
2807};
2808
2809static struct device_attribute *ipr_dev_attrs[] = {
2810 &ipr_adapter_handle_attr,
2811 NULL,
2812};
2813
2814/**
2815 * ipr_biosparam - Return the HSC mapping
2816 * @sdev: scsi device struct
2817 * @block_device: block device pointer
2818 * @capacity: capacity of the device
2819 * @parm: Array containing returned HSC values.
2820 *
2821 * This function generates the HSC parms that fdisk uses.
2822 * We want to make sure we return something that places partitions
2823 * on 4k boundaries for best performance with the IOA.
2824 *
2825 * Return value:
2826 * 0 on success
2827 **/
2828static int ipr_biosparam(struct scsi_device *sdev,
2829 struct block_device *block_device,
2830 sector_t capacity, int *parm)
2831{
2832 int heads, sectors;
2833 sector_t cylinders;
2834
2835 heads = 128;
2836 sectors = 32;
2837
2838 cylinders = capacity;
2839 sector_div(cylinders, (128 * 32));
2840
2841 /* return result */
2842 parm[0] = heads;
2843 parm[1] = sectors;
2844 parm[2] = cylinders;
2845
2846 return 0;
2847}
2848
2849/**
2850 * ipr_slave_destroy - Unconfigure a SCSI device
2851 * @sdev: scsi device struct
2852 *
2853 * Return value:
2854 * nothing
2855 **/
2856static void ipr_slave_destroy(struct scsi_device *sdev)
2857{
2858 struct ipr_resource_entry *res;
2859 struct ipr_ioa_cfg *ioa_cfg;
2860 unsigned long lock_flags = 0;
2861
2862 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2863
2864 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2865 res = (struct ipr_resource_entry *) sdev->hostdata;
2866 if (res) {
2867 sdev->hostdata = NULL;
2868 res->sdev = NULL;
2869 }
2870 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2871}
2872
2873/**
2874 * ipr_slave_configure - Configure a SCSI device
2875 * @sdev: scsi device struct
2876 *
2877 * This function configures the specified scsi device.
2878 *
2879 * Return value:
2880 * 0 on success
2881 **/
2882static int ipr_slave_configure(struct scsi_device *sdev)
2883{
2884 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2885 struct ipr_resource_entry *res;
2886 unsigned long lock_flags = 0;
2887
2888 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2889 res = sdev->hostdata;
2890 if (res) {
2891 if (ipr_is_af_dasd_device(res))
2892 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06002893 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06002895 sdev->no_uld_attach = 1;
2896 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 if (ipr_is_vset_device(res)) {
2898 sdev->timeout = IPR_VSET_RW_TIMEOUT;
2899 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
2900 }
2901 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
2902 sdev->allow_restart = 1;
2903 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
2904 }
2905 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2906 return 0;
2907}
2908
2909/**
2910 * ipr_slave_alloc - Prepare for commands to a device.
2911 * @sdev: scsi device struct
2912 *
2913 * This function saves a pointer to the resource entry
2914 * in the scsi device struct if the device exists. We
2915 * can then use this pointer in ipr_queuecommand when
2916 * handling new commands.
2917 *
2918 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06002919 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 **/
2921static int ipr_slave_alloc(struct scsi_device *sdev)
2922{
2923 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2924 struct ipr_resource_entry *res;
2925 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06002926 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927
2928 sdev->hostdata = NULL;
2929
2930 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2931
2932 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2933 if ((res->cfgte.res_addr.bus == sdev->channel) &&
2934 (res->cfgte.res_addr.target == sdev->id) &&
2935 (res->cfgte.res_addr.lun == sdev->lun)) {
2936 res->sdev = sdev;
2937 res->add_to_ml = 0;
2938 res->in_erp = 0;
2939 sdev->hostdata = res;
2940 res->needs_sync_complete = 1;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06002941 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942 break;
2943 }
2944 }
2945
2946 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2947
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06002948 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949}
2950
2951/**
2952 * ipr_eh_host_reset - Reset the host adapter
2953 * @scsi_cmd: scsi command struct
2954 *
2955 * Return value:
2956 * SUCCESS / FAILED
2957 **/
Jeff Garzik df0ae242005-05-28 07:57:14 -04002958static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959{
2960 struct ipr_ioa_cfg *ioa_cfg;
2961 int rc;
2962
2963 ENTER;
2964 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2965
2966 dev_err(&ioa_cfg->pdev->dev,
2967 "Adapter being reset as a result of error recovery.\n");
2968
2969 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2970 ioa_cfg->sdt_state = GET_DUMP;
2971
2972 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2973
2974 LEAVE;
2975 return rc;
2976}
2977
Jeff Garzik df0ae242005-05-28 07:57:14 -04002978static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
2979{
2980 int rc;
2981
2982 spin_lock_irq(cmd->device->host->host_lock);
2983 rc = __ipr_eh_host_reset(cmd);
2984 spin_unlock_irq(cmd->device->host->host_lock);
2985
2986 return rc;
2987}
2988
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989/**
2990 * ipr_eh_dev_reset - Reset the device
2991 * @scsi_cmd: scsi command struct
2992 *
2993 * This function issues a device reset to the affected device.
2994 * A LUN reset will be sent to the device first. If that does
2995 * not work, a target reset will be sent.
2996 *
2997 * Return value:
2998 * SUCCESS / FAILED
2999 **/
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04003000static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001{
3002 struct ipr_cmnd *ipr_cmd;
3003 struct ipr_ioa_cfg *ioa_cfg;
3004 struct ipr_resource_entry *res;
3005 struct ipr_cmd_pkt *cmd_pkt;
3006 u32 ioasc;
3007
3008 ENTER;
3009 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3010 res = scsi_cmd->device->hostdata;
3011
3012 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3013 return FAILED;
3014
3015 /*
3016 * If we are currently going through reset/reload, return failed. This will force the
3017 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3018 * reset to complete
3019 */
3020 if (ioa_cfg->in_reset_reload)
3021 return FAILED;
3022 if (ioa_cfg->ioa_is_dead)
3023 return FAILED;
3024
3025 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3026 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3027 if (ipr_cmd->scsi_cmd)
3028 ipr_cmd->done = ipr_scsi_eh_done;
3029 }
3030 }
3031
3032 res->resetting_device = 1;
3033
3034 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3035
3036 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3037 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3038 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3039 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3040
3041 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3042 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3043
3044 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3045
3046 res->resetting_device = 0;
3047
3048 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3049
3050 LEAVE;
3051 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3052}
3053
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04003054static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3055{
3056 int rc;
3057
3058 spin_lock_irq(cmd->device->host->host_lock);
3059 rc = __ipr_eh_dev_reset(cmd);
3060 spin_unlock_irq(cmd->device->host->host_lock);
3061
3062 return rc;
3063}
3064
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065/**
3066 * ipr_bus_reset_done - Op done function for bus reset.
3067 * @ipr_cmd: ipr command struct
3068 *
3069 * This function is the op done function for a bus reset
3070 *
3071 * Return value:
3072 * none
3073 **/
3074static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3075{
3076 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3077 struct ipr_resource_entry *res;
3078
3079 ENTER;
3080 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3081 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3082 sizeof(res->cfgte.res_handle))) {
3083 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3084 break;
3085 }
3086 }
3087
3088 /*
3089 * If abort has not completed, indicate the reset has, else call the
3090 * abort's done function to wake the sleeping eh thread
3091 */
3092 if (ipr_cmd->sibling->sibling)
3093 ipr_cmd->sibling->sibling = NULL;
3094 else
3095 ipr_cmd->sibling->done(ipr_cmd->sibling);
3096
3097 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3098 LEAVE;
3099}
3100
3101/**
3102 * ipr_abort_timeout - An abort task has timed out
3103 * @ipr_cmd: ipr command struct
3104 *
3105 * This function handles when an abort task times out. If this
3106 * happens we issue a bus reset since we have resources tied
3107 * up that must be freed before returning to the midlayer.
3108 *
3109 * Return value:
3110 * none
3111 **/
3112static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3113{
3114 struct ipr_cmnd *reset_cmd;
3115 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3116 struct ipr_cmd_pkt *cmd_pkt;
3117 unsigned long lock_flags = 0;
3118
3119 ENTER;
3120 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3121 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3122 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3123 return;
3124 }
3125
3126 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3127 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3128 ipr_cmd->sibling = reset_cmd;
3129 reset_cmd->sibling = ipr_cmd;
3130 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3131 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3132 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3133 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3134 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3135
3136 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3138 LEAVE;
3139}
3140
3141/**
3142 * ipr_cancel_op - Cancel specified op
3143 * @scsi_cmd: scsi command struct
3144 *
3145 * This function cancels specified op.
3146 *
3147 * Return value:
3148 * SUCCESS / FAILED
3149 **/
3150static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3151{
3152 struct ipr_cmnd *ipr_cmd;
3153 struct ipr_ioa_cfg *ioa_cfg;
3154 struct ipr_resource_entry *res;
3155 struct ipr_cmd_pkt *cmd_pkt;
3156 u32 ioasc;
3157 int op_found = 0;
3158
3159 ENTER;
3160 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3161 res = scsi_cmd->device->hostdata;
3162
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003163 /* If we are currently going through reset/reload, return failed.
3164 * This will force the mid-layer to call ipr_eh_host_reset,
3165 * which will then go to sleep and wait for the reset to complete
3166 */
3167 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3168 return FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3170 return FAILED;
3171
3172 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3173 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3174 ipr_cmd->done = ipr_scsi_eh_done;
3175 op_found = 1;
3176 break;
3177 }
3178 }
3179
3180 if (!op_found)
3181 return SUCCESS;
3182
3183 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3184 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3185 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3186 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3187 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3188 ipr_cmd->u.sdev = scsi_cmd->device;
3189
3190 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3191 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3192 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3193
3194 /*
3195 * If the abort task timed out and we sent a bus reset, we will get
3196 * one the following responses to the abort
3197 */
3198 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3199 ioasc = 0;
3200 ipr_trace;
3201 }
3202
3203 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3204 res->needs_sync_complete = 1;
3205
3206 LEAVE;
3207 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3208}
3209
3210/**
3211 * ipr_eh_abort - Abort a single op
3212 * @scsi_cmd: scsi command struct
3213 *
3214 * Return value:
3215 * SUCCESS / FAILED
3216 **/
3217static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3218{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003219 unsigned long flags;
3220 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221
3222 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003224 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3225 rc = ipr_cancel_op(scsi_cmd);
3226 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227
3228 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04003229 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230}
3231
3232/**
3233 * ipr_handle_other_interrupt - Handle "other" interrupts
3234 * @ioa_cfg: ioa config struct
3235 * @int_reg: interrupt register
3236 *
3237 * Return value:
3238 * IRQ_NONE / IRQ_HANDLED
3239 **/
3240static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3241 volatile u32 int_reg)
3242{
3243 irqreturn_t rc = IRQ_HANDLED;
3244
3245 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3246 /* Mask the interrupt */
3247 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3248
3249 /* Clear the interrupt */
3250 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3251 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3252
3253 list_del(&ioa_cfg->reset_cmd->queue);
3254 del_timer(&ioa_cfg->reset_cmd->timer);
3255 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3256 } else {
3257 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3258 ioa_cfg->ioa_unit_checked = 1;
3259 else
3260 dev_err(&ioa_cfg->pdev->dev,
3261 "Permanent IOA failure. 0x%08X\n", int_reg);
3262
3263 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3264 ioa_cfg->sdt_state = GET_DUMP;
3265
3266 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3267 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3268 }
3269
3270 return rc;
3271}
3272
3273/**
3274 * ipr_isr - Interrupt service routine
3275 * @irq: irq number
3276 * @devp: pointer to ioa config struct
3277 * @regs: pt_regs struct
3278 *
3279 * Return value:
3280 * IRQ_NONE / IRQ_HANDLED
3281 **/
3282static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3283{
3284 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3285 unsigned long lock_flags = 0;
3286 volatile u32 int_reg, int_mask_reg;
3287 u32 ioasc;
3288 u16 cmd_index;
3289 struct ipr_cmnd *ipr_cmd;
3290 irqreturn_t rc = IRQ_NONE;
3291
3292 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3293
3294 /* If interrupts are disabled, ignore the interrupt */
3295 if (!ioa_cfg->allow_interrupts) {
3296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3297 return IRQ_NONE;
3298 }
3299
3300 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3301 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3302
3303 /* If an interrupt on the adapter did not occur, ignore it */
3304 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3305 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3306 return IRQ_NONE;
3307 }
3308
3309 while (1) {
3310 ipr_cmd = NULL;
3311
3312 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3313 ioa_cfg->toggle_bit) {
3314
3315 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3316 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3317
3318 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3319 ioa_cfg->errors_logged++;
3320 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3321
3322 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3323 ioa_cfg->sdt_state = GET_DUMP;
3324
3325 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3326 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3327 return IRQ_HANDLED;
3328 }
3329
3330 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3331
3332 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3333
3334 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3335
3336 list_del(&ipr_cmd->queue);
3337 del_timer(&ipr_cmd->timer);
3338 ipr_cmd->done(ipr_cmd);
3339
3340 rc = IRQ_HANDLED;
3341
3342 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3343 ioa_cfg->hrrq_curr++;
3344 } else {
3345 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3346 ioa_cfg->toggle_bit ^= 1u;
3347 }
3348 }
3349
3350 if (ipr_cmd != NULL) {
3351 /* Clear the PCI interrupt */
3352 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3353 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3354 } else
3355 break;
3356 }
3357
3358 if (unlikely(rc == IRQ_NONE))
3359 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3360
3361 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3362 return rc;
3363}
3364
3365/**
3366 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3367 * @ioa_cfg: ioa config struct
3368 * @ipr_cmd: ipr command struct
3369 *
3370 * Return value:
3371 * 0 on success / -1 on failure
3372 **/
3373static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3374 struct ipr_cmnd *ipr_cmd)
3375{
3376 int i;
3377 struct scatterlist *sglist;
3378 u32 length;
3379 u32 ioadl_flags = 0;
3380 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3381 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3382 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3383
3384 length = scsi_cmd->request_bufflen;
3385
3386 if (length == 0)
3387 return 0;
3388
3389 if (scsi_cmd->use_sg) {
3390 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3391 scsi_cmd->request_buffer,
3392 scsi_cmd->use_sg,
3393 scsi_cmd->sc_data_direction);
3394
3395 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3396 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3397 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3398 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3399 ioarcb->write_ioadl_len =
3400 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3401 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3402 ioadl_flags = IPR_IOADL_FLAGS_READ;
3403 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3404 ioarcb->read_ioadl_len =
3405 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3406 }
3407
3408 sglist = scsi_cmd->request_buffer;
3409
3410 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3411 ioadl[i].flags_and_data_len =
3412 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3413 ioadl[i].address =
3414 cpu_to_be32(sg_dma_address(&sglist[i]));
3415 }
3416
3417 if (likely(ipr_cmd->dma_use_sg)) {
3418 ioadl[i-1].flags_and_data_len |=
3419 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3420 return 0;
3421 } else
3422 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3423 } else {
3424 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3425 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3426 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3427 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3428 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3429 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3430 ioadl_flags = IPR_IOADL_FLAGS_READ;
3431 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3432 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3433 }
3434
3435 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3436 scsi_cmd->request_buffer, length,
3437 scsi_cmd->sc_data_direction);
3438
3439 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3440 ipr_cmd->dma_use_sg = 1;
3441 ioadl[0].flags_and_data_len =
3442 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3443 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3444 return 0;
3445 } else
3446 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3447 }
3448
3449 return -1;
3450}
3451
3452/**
3453 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3454 * @scsi_cmd: scsi command struct
3455 *
3456 * Return value:
3457 * task attributes
3458 **/
3459static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3460{
3461 u8 tag[2];
3462 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3463
3464 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3465 switch (tag[0]) {
3466 case MSG_SIMPLE_TAG:
3467 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3468 break;
3469 case MSG_HEAD_TAG:
3470 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3471 break;
3472 case MSG_ORDERED_TAG:
3473 rc = IPR_FLAGS_LO_ORDERED_TASK;
3474 break;
3475 };
3476 }
3477
3478 return rc;
3479}
3480
3481/**
3482 * ipr_erp_done - Process completion of ERP for a device
3483 * @ipr_cmd: ipr command struct
3484 *
3485 * This function copies the sense buffer into the scsi_cmd
3486 * struct and pushes the scsi_done function.
3487 *
3488 * Return value:
3489 * nothing
3490 **/
3491static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3492{
3493 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3494 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3495 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3496 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3497
3498 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3499 scsi_cmd->result |= (DID_ERROR << 16);
3500 ipr_sdev_err(scsi_cmd->device,
3501 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3502 } else {
3503 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3504 SCSI_SENSE_BUFFERSIZE);
3505 }
3506
3507 if (res) {
3508 res->needs_sync_complete = 1;
3509 res->in_erp = 0;
3510 }
3511 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3512 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3513 scsi_cmd->scsi_done(scsi_cmd);
3514}
3515
3516/**
3517 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3518 * @ipr_cmd: ipr command struct
3519 *
3520 * Return value:
3521 * none
3522 **/
3523static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3524{
3525 struct ipr_ioarcb *ioarcb;
3526 struct ipr_ioasa *ioasa;
3527
3528 ioarcb = &ipr_cmd->ioarcb;
3529 ioasa = &ipr_cmd->ioasa;
3530
3531 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3532 ioarcb->write_data_transfer_length = 0;
3533 ioarcb->read_data_transfer_length = 0;
3534 ioarcb->write_ioadl_len = 0;
3535 ioarcb->read_ioadl_len = 0;
3536 ioasa->ioasc = 0;
3537 ioasa->residual_data_len = 0;
3538}
3539
3540/**
3541 * ipr_erp_request_sense - Send request sense to a device
3542 * @ipr_cmd: ipr command struct
3543 *
3544 * This function sends a request sense to a device as a result
3545 * of a check condition.
3546 *
3547 * Return value:
3548 * nothing
3549 **/
3550static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3551{
3552 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3553 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3554
3555 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3556 ipr_erp_done(ipr_cmd);
3557 return;
3558 }
3559
3560 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3561
3562 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3563 cmd_pkt->cdb[0] = REQUEST_SENSE;
3564 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3565 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3566 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3567 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3568
3569 ipr_cmd->ioadl[0].flags_and_data_len =
3570 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3571 ipr_cmd->ioadl[0].address =
3572 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3573
3574 ipr_cmd->ioarcb.read_ioadl_len =
3575 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3576 ipr_cmd->ioarcb.read_data_transfer_length =
3577 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3578
3579 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3580 IPR_REQUEST_SENSE_TIMEOUT * 2);
3581}
3582
3583/**
3584 * ipr_erp_cancel_all - Send cancel all to a device
3585 * @ipr_cmd: ipr command struct
3586 *
3587 * This function sends a cancel all to a device to clear the
3588 * queue. If we are running TCQ on the device, QERR is set to 1,
3589 * which means all outstanding ops have been dropped on the floor.
3590 * Cancel all will return them to us.
3591 *
3592 * Return value:
3593 * nothing
3594 **/
3595static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3596{
3597 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3598 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3599 struct ipr_cmd_pkt *cmd_pkt;
3600
3601 res->in_erp = 1;
3602
3603 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3604
3605 if (!scsi_get_tag_type(scsi_cmd->device)) {
3606 ipr_erp_request_sense(ipr_cmd);
3607 return;
3608 }
3609
3610 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3611 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3612 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3613
3614 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3615 IPR_CANCEL_ALL_TIMEOUT);
3616}
3617
3618/**
3619 * ipr_dump_ioasa - Dump contents of IOASA
3620 * @ioa_cfg: ioa config struct
3621 * @ipr_cmd: ipr command struct
3622 *
3623 * This function is invoked by the interrupt handler when ops
3624 * fail. It will log the IOASA if appropriate. Only called
3625 * for GPDD ops.
3626 *
3627 * Return value:
3628 * none
3629 **/
3630static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3631 struct ipr_cmnd *ipr_cmd)
3632{
3633 int i;
3634 u16 data_len;
3635 u32 ioasc;
3636 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3637 __be32 *ioasa_data = (__be32 *)ioasa;
3638 int error_index;
3639
3640 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3641
3642 if (0 == ioasc)
3643 return;
3644
3645 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3646 return;
3647
3648 error_index = ipr_get_error(ioasc);
3649
3650 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3651 /* Don't log an error if the IOA already logged one */
3652 if (ioasa->ilid != 0)
3653 return;
3654
3655 if (ipr_error_table[error_index].log_ioasa == 0)
3656 return;
3657 }
3658
3659 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3660 ipr_error_table[error_index].error);
3661
3662 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3663 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3664 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3665 "Device End state: %s Phase: %s\n",
3666 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3667 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3668 }
3669
3670 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3671 data_len = sizeof(struct ipr_ioasa);
3672 else
3673 data_len = be16_to_cpu(ioasa->ret_stat_len);
3674
3675 ipr_err("IOASA Dump:\n");
3676
3677 for (i = 0; i < data_len / 4; i += 4) {
3678 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3679 be32_to_cpu(ioasa_data[i]),
3680 be32_to_cpu(ioasa_data[i+1]),
3681 be32_to_cpu(ioasa_data[i+2]),
3682 be32_to_cpu(ioasa_data[i+3]));
3683 }
3684}
3685
3686/**
3687 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3688 * @ioasa: IOASA
3689 * @sense_buf: sense data buffer
3690 *
3691 * Return value:
3692 * none
3693 **/
3694static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3695{
3696 u32 failing_lba;
3697 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3698 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3699 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3700 u32 ioasc = be32_to_cpu(ioasa->ioasc);
3701
3702 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3703
3704 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3705 return;
3706
3707 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3708
3709 if (ipr_is_vset_device(res) &&
3710 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3711 ioasa->u.vset.failing_lba_hi != 0) {
3712 sense_buf[0] = 0x72;
3713 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3714 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3715 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3716
3717 sense_buf[7] = 12;
3718 sense_buf[8] = 0;
3719 sense_buf[9] = 0x0A;
3720 sense_buf[10] = 0x80;
3721
3722 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3723
3724 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3725 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3726 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3727 sense_buf[15] = failing_lba & 0x000000ff;
3728
3729 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3730
3731 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3732 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3733 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3734 sense_buf[19] = failing_lba & 0x000000ff;
3735 } else {
3736 sense_buf[0] = 0x70;
3737 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3738 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3739 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3740
3741 /* Illegal request */
3742 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3743 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3744 sense_buf[7] = 10; /* additional length */
3745
3746 /* IOARCB was in error */
3747 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3748 sense_buf[15] = 0xC0;
3749 else /* Parameter data was invalid */
3750 sense_buf[15] = 0x80;
3751
3752 sense_buf[16] =
3753 ((IPR_FIELD_POINTER_MASK &
3754 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3755 sense_buf[17] =
3756 (IPR_FIELD_POINTER_MASK &
3757 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3758 } else {
3759 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3760 if (ipr_is_vset_device(res))
3761 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3762 else
3763 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3764
3765 sense_buf[0] |= 0x80; /* Or in the Valid bit */
3766 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3767 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3768 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3769 sense_buf[6] = failing_lba & 0x000000ff;
3770 }
3771
3772 sense_buf[7] = 6; /* additional length */
3773 }
3774 }
3775}
3776
3777/**
3778 * ipr_erp_start - Process an error response for a SCSI op
3779 * @ioa_cfg: ioa config struct
3780 * @ipr_cmd: ipr command struct
3781 *
3782 * This function determines whether or not to initiate ERP
3783 * on the affected device.
3784 *
3785 * Return value:
3786 * nothing
3787 **/
3788static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3789 struct ipr_cmnd *ipr_cmd)
3790{
3791 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3792 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3793 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3794
3795 if (!res) {
3796 ipr_scsi_eh_done(ipr_cmd);
3797 return;
3798 }
3799
3800 if (ipr_is_gscsi(res))
3801 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3802 else
3803 ipr_gen_sense(ipr_cmd);
3804
3805 switch (ioasc & IPR_IOASC_IOASC_MASK) {
3806 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3807 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3808 break;
3809 case IPR_IOASC_IR_RESOURCE_HANDLE:
3810 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3811 break;
3812 case IPR_IOASC_HW_SEL_TIMEOUT:
3813 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3814 res->needs_sync_complete = 1;
3815 break;
3816 case IPR_IOASC_SYNC_REQUIRED:
3817 if (!res->in_erp)
3818 res->needs_sync_complete = 1;
3819 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3820 break;
3821 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3822 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3823 break;
3824 case IPR_IOASC_BUS_WAS_RESET:
3825 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3826 /*
3827 * Report the bus reset and ask for a retry. The device
3828 * will give CC/UA the next command.
3829 */
3830 if (!res->resetting_device)
3831 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3832 scsi_cmd->result |= (DID_ERROR << 16);
3833 res->needs_sync_complete = 1;
3834 break;
3835 case IPR_IOASC_HW_DEV_BUS_STATUS:
3836 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3837 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3838 ipr_erp_cancel_all(ipr_cmd);
3839 return;
3840 }
3841 res->needs_sync_complete = 1;
3842 break;
3843 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3844 break;
3845 default:
3846 scsi_cmd->result |= (DID_ERROR << 16);
3847 if (!ipr_is_vset_device(res))
3848 res->needs_sync_complete = 1;
3849 break;
3850 }
3851
3852 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3853 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3854 scsi_cmd->scsi_done(scsi_cmd);
3855}
3856
3857/**
3858 * ipr_scsi_done - mid-layer done function
3859 * @ipr_cmd: ipr command struct
3860 *
3861 * This function is invoked by the interrupt handler for
3862 * ops generated by the SCSI mid-layer
3863 *
3864 * Return value:
3865 * none
3866 **/
3867static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3868{
3869 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3870 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3871 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3872
3873 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
3874
3875 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3876 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3877 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3878 scsi_cmd->scsi_done(scsi_cmd);
3879 } else
3880 ipr_erp_start(ioa_cfg, ipr_cmd);
3881}
3882
3883/**
3884 * ipr_save_ioafp_mode_select - Save adapters mode select data
3885 * @ioa_cfg: ioa config struct
3886 * @scsi_cmd: scsi command struct
3887 *
3888 * This function saves mode select data for the adapter to
3889 * use following an adapter reset.
3890 *
3891 * Return value:
3892 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
3893 **/
3894static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
3895 struct scsi_cmnd *scsi_cmd)
3896{
3897 if (!ioa_cfg->saved_mode_pages) {
3898 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
3899 GFP_ATOMIC);
3900 if (!ioa_cfg->saved_mode_pages) {
3901 dev_err(&ioa_cfg->pdev->dev,
3902 "IOA mode select buffer allocation failed\n");
3903 return SCSI_MLQUEUE_HOST_BUSY;
3904 }
3905 }
3906
3907 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
3908 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
3909 return 0;
3910}
3911
3912/**
3913 * ipr_queuecommand - Queue a mid-layer request
3914 * @scsi_cmd: scsi command struct
3915 * @done: done function
3916 *
3917 * This function queues a request generated by the mid-layer.
3918 *
3919 * Return value:
3920 * 0 on success
3921 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3922 * SCSI_MLQUEUE_HOST_BUSY if host is busy
3923 **/
3924static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
3925 void (*done) (struct scsi_cmnd *))
3926{
3927 struct ipr_ioa_cfg *ioa_cfg;
3928 struct ipr_resource_entry *res;
3929 struct ipr_ioarcb *ioarcb;
3930 struct ipr_cmnd *ipr_cmd;
3931 int rc = 0;
3932
3933 scsi_cmd->scsi_done = done;
3934 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3935 res = scsi_cmd->device->hostdata;
3936 scsi_cmd->result = (DID_OK << 16);
3937
3938 /*
3939 * We are currently blocking all devices due to a host reset
3940 * We have told the host to stop giving us new requests, but
3941 * ERP ops don't count. FIXME
3942 */
3943 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
3944 return SCSI_MLQUEUE_HOST_BUSY;
3945
3946 /*
3947 * FIXME - Create scsi_set_host_offline interface
3948 * and the ioa_is_dead check can be removed
3949 */
3950 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
3951 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3952 scsi_cmd->result = (DID_NO_CONNECT << 16);
3953 scsi_cmd->scsi_done(scsi_cmd);
3954 return 0;
3955 }
3956
3957 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3958 ioarcb = &ipr_cmd->ioarcb;
3959 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
3960
3961 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3962 ipr_cmd->scsi_cmd = scsi_cmd;
3963 ioarcb->res_handle = res->cfgte.res_handle;
3964 ipr_cmd->done = ipr_scsi_done;
3965 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
3966
3967 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
3968 if (scsi_cmd->underflow == 0)
3969 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3970
3971 if (res->needs_sync_complete) {
3972 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
3973 res->needs_sync_complete = 0;
3974 }
3975
3976 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
3977 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
3978 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
3979 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
3980 }
3981
3982 if (scsi_cmd->cmnd[0] >= 0xC0 &&
3983 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
3984 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
3985
3986 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
3987 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
3988
3989 if (likely(rc == 0))
3990 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
3991
3992 if (likely(rc == 0)) {
3993 mb();
3994 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
3995 ioa_cfg->regs.ioarrin_reg);
3996 } else {
3997 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3998 return SCSI_MLQUEUE_HOST_BUSY;
3999 }
4000
4001 return 0;
4002}
4003
4004/**
4005 * ipr_info - Get information about the card/driver
4006 * @scsi_host: scsi host struct
4007 *
4008 * Return value:
4009 * pointer to buffer with description string
4010 **/
4011static const char * ipr_ioa_info(struct Scsi_Host *host)
4012{
4013 static char buffer[512];
4014 struct ipr_ioa_cfg *ioa_cfg;
4015 unsigned long lock_flags = 0;
4016
4017 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4018
4019 spin_lock_irqsave(host->host_lock, lock_flags);
4020 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4021 spin_unlock_irqrestore(host->host_lock, lock_flags);
4022
4023 return buffer;
4024}
4025
4026static struct scsi_host_template driver_template = {
4027 .module = THIS_MODULE,
4028 .name = "IPR",
4029 .info = ipr_ioa_info,
4030 .queuecommand = ipr_queuecommand,
4031 .eh_abort_handler = ipr_eh_abort,
4032 .eh_device_reset_handler = ipr_eh_dev_reset,
4033 .eh_host_reset_handler = ipr_eh_host_reset,
4034 .slave_alloc = ipr_slave_alloc,
4035 .slave_configure = ipr_slave_configure,
4036 .slave_destroy = ipr_slave_destroy,
4037 .change_queue_depth = ipr_change_queue_depth,
4038 .change_queue_type = ipr_change_queue_type,
4039 .bios_param = ipr_biosparam,
4040 .can_queue = IPR_MAX_COMMANDS,
4041 .this_id = -1,
4042 .sg_tablesize = IPR_MAX_SGLIST,
4043 .max_sectors = IPR_IOA_MAX_SECTORS,
4044 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4045 .use_clustering = ENABLE_CLUSTERING,
4046 .shost_attrs = ipr_ioa_attrs,
4047 .sdev_attrs = ipr_dev_attrs,
4048 .proc_name = IPR_NAME
4049};
4050
4051#ifdef CONFIG_PPC_PSERIES
4052static const u16 ipr_blocked_processors[] = {
4053 PV_NORTHSTAR,
4054 PV_PULSAR,
4055 PV_POWER4,
4056 PV_ICESTAR,
4057 PV_SSTAR,
4058 PV_POWER4p,
4059 PV_630,
4060 PV_630p
4061};
4062
4063/**
4064 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4065 * @ioa_cfg: ioa cfg struct
4066 *
4067 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4068 * certain pSeries hardware. This function determines if the given
4069 * adapter is in one of these confgurations or not.
4070 *
4071 * Return value:
4072 * 1 if adapter is not supported / 0 if adapter is supported
4073 **/
4074static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4075{
4076 u8 rev_id;
4077 int i;
4078
4079 if (ioa_cfg->type == 0x5702) {
4080 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4081 &rev_id) == PCIBIOS_SUCCESSFUL) {
4082 if (rev_id < 4) {
4083 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4084 if (__is_processor(ipr_blocked_processors[i]))
4085 return 1;
4086 }
4087 }
4088 }
4089 }
4090 return 0;
4091}
4092#else
4093#define ipr_invalid_adapter(ioa_cfg) 0
4094#endif
4095
4096/**
4097 * ipr_ioa_bringdown_done - IOA bring down completion.
4098 * @ipr_cmd: ipr command struct
4099 *
4100 * This function processes the completion of an adapter bring down.
4101 * It wakes any reset sleepers.
4102 *
4103 * Return value:
4104 * IPR_RC_JOB_RETURN
4105 **/
4106static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4107{
4108 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4109
4110 ENTER;
4111 ioa_cfg->in_reset_reload = 0;
4112 ioa_cfg->reset_retries = 0;
4113 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4114 wake_up_all(&ioa_cfg->reset_wait_q);
4115
4116 spin_unlock_irq(ioa_cfg->host->host_lock);
4117 scsi_unblock_requests(ioa_cfg->host);
4118 spin_lock_irq(ioa_cfg->host->host_lock);
4119 LEAVE;
4120
4121 return IPR_RC_JOB_RETURN;
4122}
4123
4124/**
4125 * ipr_ioa_reset_done - IOA reset completion.
4126 * @ipr_cmd: ipr command struct
4127 *
4128 * This function processes the completion of an adapter reset.
4129 * It schedules any necessary mid-layer add/removes and
4130 * wakes any reset sleepers.
4131 *
4132 * Return value:
4133 * IPR_RC_JOB_RETURN
4134 **/
4135static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4136{
4137 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4138 struct ipr_resource_entry *res;
4139 struct ipr_hostrcb *hostrcb, *temp;
4140 int i = 0;
4141
4142 ENTER;
4143 ioa_cfg->in_reset_reload = 0;
4144 ioa_cfg->allow_cmds = 1;
4145 ioa_cfg->reset_cmd = NULL;
4146
4147 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4148 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4149 ipr_trace;
4150 break;
4151 }
4152 }
4153 schedule_work(&ioa_cfg->work_q);
4154
4155 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4156 list_del(&hostrcb->queue);
4157 if (i++ < IPR_NUM_LOG_HCAMS)
4158 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4159 else
4160 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4161 }
4162
4163 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4164
4165 ioa_cfg->reset_retries = 0;
4166 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4167 wake_up_all(&ioa_cfg->reset_wait_q);
4168
4169 spin_unlock_irq(ioa_cfg->host->host_lock);
4170 scsi_unblock_requests(ioa_cfg->host);
4171 spin_lock_irq(ioa_cfg->host->host_lock);
4172
4173 if (!ioa_cfg->allow_cmds)
4174 scsi_block_requests(ioa_cfg->host);
4175
4176 LEAVE;
4177 return IPR_RC_JOB_RETURN;
4178}
4179
4180/**
4181 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4182 * @supported_dev: supported device struct
4183 * @vpids: vendor product id struct
4184 *
4185 * Return value:
4186 * none
4187 **/
4188static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4189 struct ipr_std_inq_vpids *vpids)
4190{
4191 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4192 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4193 supported_dev->num_records = 1;
4194 supported_dev->data_length =
4195 cpu_to_be16(sizeof(struct ipr_supported_device));
4196 supported_dev->reserved = 0;
4197}
4198
4199/**
4200 * ipr_set_supported_devs - Send Set Supported Devices for a device
4201 * @ipr_cmd: ipr command struct
4202 *
4203 * This function send a Set Supported Devices to the adapter
4204 *
4205 * Return value:
4206 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4207 **/
4208static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4209{
4210 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4211 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4212 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4213 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4214 struct ipr_resource_entry *res = ipr_cmd->u.res;
4215
4216 ipr_cmd->job_step = ipr_ioa_reset_done;
4217
4218 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
brking@us.ibm.comd0ad6f52005-11-01 17:00:54 -06004219 if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220 continue;
4221
4222 ipr_cmd->u.res = res;
4223 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4224
4225 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4226 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4227 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4228
4229 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4230 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4231 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4232
4233 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4234 sizeof(struct ipr_supported_device));
4235 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4236 offsetof(struct ipr_misc_cbs, supp_dev));
4237 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4238 ioarcb->write_data_transfer_length =
4239 cpu_to_be32(sizeof(struct ipr_supported_device));
4240
4241 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4242 IPR_SET_SUP_DEVICE_TIMEOUT);
4243
4244 ipr_cmd->job_step = ipr_set_supported_devs;
4245 return IPR_RC_JOB_RETURN;
4246 }
4247
4248 return IPR_RC_JOB_CONTINUE;
4249}
4250
4251/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06004252 * ipr_setup_write_cache - Disable write cache if needed
4253 * @ipr_cmd: ipr command struct
4254 *
4255 * This function sets up adapters write cache to desired setting
4256 *
4257 * Return value:
4258 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4259 **/
4260static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4261{
4262 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4263
4264 ipr_cmd->job_step = ipr_set_supported_devs;
4265 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4266 struct ipr_resource_entry, queue);
4267
4268 if (ioa_cfg->cache_state != CACHE_DISABLED)
4269 return IPR_RC_JOB_CONTINUE;
4270
4271 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4272 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4273 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4274 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4275
4276 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4277
4278 return IPR_RC_JOB_RETURN;
4279}
4280
4281/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282 * ipr_get_mode_page - Locate specified mode page
4283 * @mode_pages: mode page buffer
4284 * @page_code: page code to find
4285 * @len: minimum required length for mode page
4286 *
4287 * Return value:
4288 * pointer to mode page / NULL on failure
4289 **/
4290static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4291 u32 page_code, u32 len)
4292{
4293 struct ipr_mode_page_hdr *mode_hdr;
4294 u32 page_length;
4295 u32 length;
4296
4297 if (!mode_pages || (mode_pages->hdr.length == 0))
4298 return NULL;
4299
4300 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4301 mode_hdr = (struct ipr_mode_page_hdr *)
4302 (mode_pages->data + mode_pages->hdr.block_desc_len);
4303
4304 while (length) {
4305 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4306 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4307 return mode_hdr;
4308 break;
4309 } else {
4310 page_length = (sizeof(struct ipr_mode_page_hdr) +
4311 mode_hdr->page_length);
4312 length -= page_length;
4313 mode_hdr = (struct ipr_mode_page_hdr *)
4314 ((unsigned long)mode_hdr + page_length);
4315 }
4316 }
4317 return NULL;
4318}
4319
4320/**
4321 * ipr_check_term_power - Check for term power errors
4322 * @ioa_cfg: ioa config struct
4323 * @mode_pages: IOAFP mode pages buffer
4324 *
4325 * Check the IOAFP's mode page 28 for term power errors
4326 *
4327 * Return value:
4328 * nothing
4329 **/
4330static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4331 struct ipr_mode_pages *mode_pages)
4332{
4333 int i;
4334 int entry_length;
4335 struct ipr_dev_bus_entry *bus;
4336 struct ipr_mode_page28 *mode_page;
4337
4338 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4339 sizeof(struct ipr_mode_page28));
4340
4341 entry_length = mode_page->entry_length;
4342
4343 bus = mode_page->bus;
4344
4345 for (i = 0; i < mode_page->num_entries; i++) {
4346 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4347 dev_err(&ioa_cfg->pdev->dev,
4348 "Term power is absent on scsi bus %d\n",
4349 bus->res_addr.bus);
4350 }
4351
4352 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4353 }
4354}
4355
4356/**
4357 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4358 * @ioa_cfg: ioa config struct
4359 *
4360 * Looks through the config table checking for SES devices. If
4361 * the SES device is in the SES table indicating a maximum SCSI
4362 * bus speed, the speed is limited for the bus.
4363 *
4364 * Return value:
4365 * none
4366 **/
4367static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4368{
4369 u32 max_xfer_rate;
4370 int i;
4371
4372 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4373 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4374 ioa_cfg->bus_attr[i].bus_width);
4375
4376 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4377 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4378 }
4379}
4380
4381/**
4382 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4383 * @ioa_cfg: ioa config struct
4384 * @mode_pages: mode page 28 buffer
4385 *
4386 * Updates mode page 28 based on driver configuration
4387 *
4388 * Return value:
4389 * none
4390 **/
4391static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4392 struct ipr_mode_pages *mode_pages)
4393{
4394 int i, entry_length;
4395 struct ipr_dev_bus_entry *bus;
4396 struct ipr_bus_attributes *bus_attr;
4397 struct ipr_mode_page28 *mode_page;
4398
4399 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4400 sizeof(struct ipr_mode_page28));
4401
4402 entry_length = mode_page->entry_length;
4403
4404 /* Loop for each device bus entry */
4405 for (i = 0, bus = mode_page->bus;
4406 i < mode_page->num_entries;
4407 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4408 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4409 dev_err(&ioa_cfg->pdev->dev,
4410 "Invalid resource address reported: 0x%08X\n",
4411 IPR_GET_PHYS_LOC(bus->res_addr));
4412 continue;
4413 }
4414
4415 bus_attr = &ioa_cfg->bus_attr[i];
4416 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4417 bus->bus_width = bus_attr->bus_width;
4418 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4419 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4420 if (bus_attr->qas_enabled)
4421 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4422 else
4423 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4424 }
4425}
4426
4427/**
4428 * ipr_build_mode_select - Build a mode select command
4429 * @ipr_cmd: ipr command struct
4430 * @res_handle: resource handle to send command to
4431 * @parm: Byte 2 of Mode Sense command
4432 * @dma_addr: DMA buffer address
4433 * @xfer_len: data transfer length
4434 *
4435 * Return value:
4436 * none
4437 **/
4438static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4439 __be32 res_handle, u8 parm, u32 dma_addr,
4440 u8 xfer_len)
4441{
4442 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4443 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4444
4445 ioarcb->res_handle = res_handle;
4446 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4447 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4448 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4449 ioarcb->cmd_pkt.cdb[1] = parm;
4450 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4451
4452 ioadl->flags_and_data_len =
4453 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4454 ioadl->address = cpu_to_be32(dma_addr);
4455 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4456 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4457}
4458
4459/**
4460 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4461 * @ipr_cmd: ipr command struct
4462 *
4463 * This function sets up the SCSI bus attributes and sends
4464 * a Mode Select for Page 28 to activate them.
4465 *
4466 * Return value:
4467 * IPR_RC_JOB_RETURN
4468 **/
4469static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4470{
4471 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4472 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4473 int length;
4474
4475 ENTER;
4476 if (ioa_cfg->saved_mode_pages) {
4477 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4478 ioa_cfg->saved_mode_page_len);
4479 length = ioa_cfg->saved_mode_page_len;
4480 } else {
4481 ipr_scsi_bus_speed_limit(ioa_cfg);
4482 ipr_check_term_power(ioa_cfg, mode_pages);
4483 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4484 length = mode_pages->hdr.length + 1;
4485 mode_pages->hdr.length = 0;
4486 }
4487
4488 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4489 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4490 length);
4491
brking@us.ibm.com62275042005-11-01 17:01:14 -06004492 ipr_cmd->job_step = ipr_setup_write_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4494
4495 LEAVE;
4496 return IPR_RC_JOB_RETURN;
4497}
4498
4499/**
4500 * ipr_build_mode_sense - Builds a mode sense command
4501 * @ipr_cmd: ipr command struct
4502 * @res: resource entry struct
4503 * @parm: Byte 2 of mode sense command
4504 * @dma_addr: DMA address of mode sense buffer
4505 * @xfer_len: Size of DMA buffer
4506 *
4507 * Return value:
4508 * none
4509 **/
4510static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4511 __be32 res_handle,
4512 u8 parm, u32 dma_addr, u8 xfer_len)
4513{
4514 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4515 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4516
4517 ioarcb->res_handle = res_handle;
4518 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4519 ioarcb->cmd_pkt.cdb[2] = parm;
4520 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4521 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4522
4523 ioadl->flags_and_data_len =
4524 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4525 ioadl->address = cpu_to_be32(dma_addr);
4526 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4527 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4528}
4529
4530/**
4531 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4532 * @ipr_cmd: ipr command struct
4533 *
4534 * This function send a Page 28 mode sense to the IOA to
4535 * retrieve SCSI bus attributes.
4536 *
4537 * Return value:
4538 * IPR_RC_JOB_RETURN
4539 **/
4540static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4541{
4542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4543
4544 ENTER;
4545 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4546 0x28, ioa_cfg->vpd_cbs_dma +
4547 offsetof(struct ipr_misc_cbs, mode_pages),
4548 sizeof(struct ipr_mode_pages));
4549
4550 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4551
4552 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4553
4554 LEAVE;
4555 return IPR_RC_JOB_RETURN;
4556}
4557
4558/**
4559 * ipr_init_res_table - Initialize the resource table
4560 * @ipr_cmd: ipr command struct
4561 *
4562 * This function looks through the existing resource table, comparing
4563 * it with the config table. This function will take care of old/new
4564 * devices and schedule adding/removing them from the mid-layer
4565 * as appropriate.
4566 *
4567 * Return value:
4568 * IPR_RC_JOB_CONTINUE
4569 **/
4570static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4571{
4572 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4573 struct ipr_resource_entry *res, *temp;
4574 struct ipr_config_table_entry *cfgte;
4575 int found, i;
4576 LIST_HEAD(old_res);
4577
4578 ENTER;
4579 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4580 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4581
4582 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4583 list_move_tail(&res->queue, &old_res);
4584
4585 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4586 cfgte = &ioa_cfg->cfg_table->dev[i];
4587 found = 0;
4588
4589 list_for_each_entry_safe(res, temp, &old_res, queue) {
4590 if (!memcmp(&res->cfgte.res_addr,
4591 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4592 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4593 found = 1;
4594 break;
4595 }
4596 }
4597
4598 if (!found) {
4599 if (list_empty(&ioa_cfg->free_res_q)) {
4600 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4601 break;
4602 }
4603
4604 found = 1;
4605 res = list_entry(ioa_cfg->free_res_q.next,
4606 struct ipr_resource_entry, queue);
4607 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4608 ipr_init_res_entry(res);
4609 res->add_to_ml = 1;
4610 }
4611
4612 if (found)
4613 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4614 }
4615
4616 list_for_each_entry_safe(res, temp, &old_res, queue) {
4617 if (res->sdev) {
4618 res->del_from_ml = 1;
4619 res->sdev->hostdata = NULL;
4620 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4621 } else {
4622 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4623 }
4624 }
4625
4626 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4627
4628 LEAVE;
4629 return IPR_RC_JOB_CONTINUE;
4630}
4631
4632/**
4633 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4634 * @ipr_cmd: ipr command struct
4635 *
4636 * This function sends a Query IOA Configuration command
4637 * to the adapter to retrieve the IOA configuration table.
4638 *
4639 * Return value:
4640 * IPR_RC_JOB_RETURN
4641 **/
4642static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4643{
4644 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4645 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4646 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4647 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4648
4649 ENTER;
4650 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4651 ucode_vpd->major_release, ucode_vpd->card_type,
4652 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4653 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4654 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4655
4656 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4657 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4658 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4659
4660 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4661 ioarcb->read_data_transfer_length =
4662 cpu_to_be32(sizeof(struct ipr_config_table));
4663
4664 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4665 ioadl->flags_and_data_len =
4666 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4667
4668 ipr_cmd->job_step = ipr_init_res_table;
4669
4670 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4671
4672 LEAVE;
4673 return IPR_RC_JOB_RETURN;
4674}
4675
4676/**
4677 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4678 * @ipr_cmd: ipr command struct
4679 *
4680 * This utility function sends an inquiry to the adapter.
4681 *
4682 * Return value:
4683 * none
4684 **/
4685static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4686 u32 dma_addr, u8 xfer_len)
4687{
4688 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4689 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4690
4691 ENTER;
4692 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4693 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4694
4695 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4696 ioarcb->cmd_pkt.cdb[1] = flags;
4697 ioarcb->cmd_pkt.cdb[2] = page;
4698 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4699
4700 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4701 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4702
4703 ioadl->address = cpu_to_be32(dma_addr);
4704 ioadl->flags_and_data_len =
4705 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4706
4707 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4708 LEAVE;
4709}
4710
4711/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06004712 * ipr_inquiry_page_supported - Is the given inquiry page supported
4713 * @page0: inquiry page 0 buffer
4714 * @page: page code.
4715 *
4716 * This function determines if the specified inquiry page is supported.
4717 *
4718 * Return value:
4719 * 1 if page is supported / 0 if not
4720 **/
4721static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
4722{
4723 int i;
4724
4725 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
4726 if (page0->page[i] == page)
4727 return 1;
4728
4729 return 0;
4730}
4731
4732/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4734 * @ipr_cmd: ipr command struct
4735 *
4736 * This function sends a Page 3 inquiry to the adapter
4737 * to retrieve software VPD information.
4738 *
4739 * Return value:
4740 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4741 **/
4742static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
4743{
4744 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06004745 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
4746
4747 ENTER;
4748
4749 if (!ipr_inquiry_page_supported(page0, 1))
4750 ioa_cfg->cache_state = CACHE_NONE;
4751
4752 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4753
4754 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4755 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4756 sizeof(struct ipr_inquiry_page3));
4757
4758 LEAVE;
4759 return IPR_RC_JOB_RETURN;
4760}
4761
4762/**
4763 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
4764 * @ipr_cmd: ipr command struct
4765 *
4766 * This function sends a Page 0 inquiry to the adapter
4767 * to retrieve supported inquiry pages.
4768 *
4769 * Return value:
4770 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4771 **/
4772static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
4773{
4774 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775 char type[5];
4776
4777 ENTER;
4778
4779 /* Grab the type out of the VPD and store it away */
4780 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4781 type[4] = '\0';
4782 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4783
brking@us.ibm.com62275042005-11-01 17:01:14 -06004784 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785
brking@us.ibm.com62275042005-11-01 17:01:14 -06004786 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
4787 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
4788 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004789
4790 LEAVE;
4791 return IPR_RC_JOB_RETURN;
4792}
4793
4794/**
4795 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4796 * @ipr_cmd: ipr command struct
4797 *
4798 * This function sends a standard inquiry to the adapter.
4799 *
4800 * Return value:
4801 * IPR_RC_JOB_RETURN
4802 **/
4803static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4804{
4805 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4806
4807 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06004808 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004809
4810 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4811 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4812 sizeof(struct ipr_ioa_vpd));
4813
4814 LEAVE;
4815 return IPR_RC_JOB_RETURN;
4816}
4817
4818/**
4819 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4820 * @ipr_cmd: ipr command struct
4821 *
4822 * This function send an Identify Host Request Response Queue
4823 * command to establish the HRRQ with the adapter.
4824 *
4825 * Return value:
4826 * IPR_RC_JOB_RETURN
4827 **/
4828static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4829{
4830 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4831 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4832
4833 ENTER;
4834 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4835
4836 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4837 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4838
4839 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4840 ioarcb->cmd_pkt.cdb[2] =
4841 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4842 ioarcb->cmd_pkt.cdb[3] =
4843 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4844 ioarcb->cmd_pkt.cdb[4] =
4845 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4846 ioarcb->cmd_pkt.cdb[5] =
4847 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
4848 ioarcb->cmd_pkt.cdb[7] =
4849 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4850 ioarcb->cmd_pkt.cdb[8] =
4851 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4852
4853 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4854
4855 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4856
4857 LEAVE;
4858 return IPR_RC_JOB_RETURN;
4859}
4860
4861/**
4862 * ipr_reset_timer_done - Adapter reset timer function
4863 * @ipr_cmd: ipr command struct
4864 *
4865 * Description: This function is used in adapter reset processing
4866 * for timing events. If the reset_cmd pointer in the IOA
4867 * config struct is not this adapter's we are doing nested
4868 * resets and fail_all_ops will take care of freeing the
4869 * command block.
4870 *
4871 * Return value:
4872 * none
4873 **/
4874static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
4875{
4876 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4877 unsigned long lock_flags = 0;
4878
4879 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4880
4881 if (ioa_cfg->reset_cmd == ipr_cmd) {
4882 list_del(&ipr_cmd->queue);
4883 ipr_cmd->done(ipr_cmd);
4884 }
4885
4886 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4887}
4888
4889/**
4890 * ipr_reset_start_timer - Start a timer for adapter reset job
4891 * @ipr_cmd: ipr command struct
4892 * @timeout: timeout value
4893 *
4894 * Description: This function is used in adapter reset processing
4895 * for timing events. If the reset_cmd pointer in the IOA
4896 * config struct is not this adapter's we are doing nested
4897 * resets and fail_all_ops will take care of freeing the
4898 * command block.
4899 *
4900 * Return value:
4901 * none
4902 **/
4903static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
4904 unsigned long timeout)
4905{
4906 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
4907 ipr_cmd->done = ipr_reset_ioa_job;
4908
4909 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4910 ipr_cmd->timer.expires = jiffies + timeout;
4911 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
4912 add_timer(&ipr_cmd->timer);
4913}
4914
4915/**
4916 * ipr_init_ioa_mem - Initialize ioa_cfg control block
4917 * @ioa_cfg: ioa cfg struct
4918 *
4919 * Return value:
4920 * nothing
4921 **/
4922static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
4923{
4924 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
4925
4926 /* Initialize Host RRQ pointers */
4927 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
4928 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
4929 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4930 ioa_cfg->toggle_bit = 1;
4931
4932 /* Zero out config table */
4933 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
4934}
4935
4936/**
4937 * ipr_reset_enable_ioa - Enable the IOA following a reset.
4938 * @ipr_cmd: ipr command struct
4939 *
4940 * This function reinitializes some control blocks and
4941 * enables destructive diagnostics on the adapter.
4942 *
4943 * Return value:
4944 * IPR_RC_JOB_RETURN
4945 **/
4946static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
4947{
4948 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4949 volatile u32 int_reg;
4950
4951 ENTER;
4952 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
4953 ipr_init_ioa_mem(ioa_cfg);
4954
4955 ioa_cfg->allow_interrupts = 1;
4956 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4957
4958 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4959 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
4960 ioa_cfg->regs.clr_interrupt_mask_reg);
4961 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4962 return IPR_RC_JOB_CONTINUE;
4963 }
4964
4965 /* Enable destructive diagnostics on IOA */
4966 writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
4967
4968 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
4969 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4970
4971 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
4972
4973 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4974 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
4975 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
4976 ipr_cmd->done = ipr_reset_ioa_job;
4977 add_timer(&ipr_cmd->timer);
4978 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4979
4980 LEAVE;
4981 return IPR_RC_JOB_RETURN;
4982}
4983
4984/**
4985 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
4986 * @ipr_cmd: ipr command struct
4987 *
4988 * This function is invoked when an adapter dump has run out
4989 * of processing time.
4990 *
4991 * Return value:
4992 * IPR_RC_JOB_CONTINUE
4993 **/
4994static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
4995{
4996 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4997
4998 if (ioa_cfg->sdt_state == GET_DUMP)
4999 ioa_cfg->sdt_state = ABORT_DUMP;
5000
5001 ipr_cmd->job_step = ipr_reset_alert;
5002
5003 return IPR_RC_JOB_CONTINUE;
5004}
5005
5006/**
5007 * ipr_unit_check_no_data - Log a unit check/no data error log
5008 * @ioa_cfg: ioa config struct
5009 *
5010 * Logs an error indicating the adapter unit checked, but for some
5011 * reason, we were unable to fetch the unit check buffer.
5012 *
5013 * Return value:
5014 * nothing
5015 **/
5016static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5017{
5018 ioa_cfg->errors_logged++;
5019 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5020}
5021
5022/**
5023 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5024 * @ioa_cfg: ioa config struct
5025 *
5026 * Fetches the unit check buffer from the adapter by clocking the data
5027 * through the mailbox register.
5028 *
5029 * Return value:
5030 * nothing
5031 **/
5032static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5033{
5034 unsigned long mailbox;
5035 struct ipr_hostrcb *hostrcb;
5036 struct ipr_uc_sdt sdt;
5037 int rc, length;
5038
5039 mailbox = readl(ioa_cfg->ioa_mailbox);
5040
5041 if (!ipr_sdt_is_fmt2(mailbox)) {
5042 ipr_unit_check_no_data(ioa_cfg);
5043 return;
5044 }
5045
5046 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5047 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5048 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5049
5050 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5051 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5052 ipr_unit_check_no_data(ioa_cfg);
5053 return;
5054 }
5055
5056 /* Find length of the first sdt entry (UC buffer) */
5057 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5058 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5059
5060 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5061 struct ipr_hostrcb, queue);
5062 list_del(&hostrcb->queue);
5063 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5064
5065 rc = ipr_get_ldump_data_section(ioa_cfg,
5066 be32_to_cpu(sdt.entry[0].bar_str_offset),
5067 (__be32 *)&hostrcb->hcam,
5068 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5069
5070 if (!rc)
5071 ipr_handle_log_data(ioa_cfg, hostrcb);
5072 else
5073 ipr_unit_check_no_data(ioa_cfg);
5074
5075 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5076}
5077
5078/**
5079 * ipr_reset_restore_cfg_space - Restore PCI config space.
5080 * @ipr_cmd: ipr command struct
5081 *
5082 * Description: This function restores the saved PCI config space of
5083 * the adapter, fails all outstanding ops back to the callers, and
5084 * fetches the dump/unit check if applicable to this reset.
5085 *
5086 * Return value:
5087 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5088 **/
5089static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5090{
5091 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5092 int rc;
5093
5094 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07005095 pci_unblock_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005096 rc = pci_restore_state(ioa_cfg->pdev);
5097
5098 if (rc != PCIBIOS_SUCCESSFUL) {
5099 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5100 return IPR_RC_JOB_CONTINUE;
5101 }
5102
5103 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5104 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5105 return IPR_RC_JOB_CONTINUE;
5106 }
5107
5108 ipr_fail_all_ops(ioa_cfg);
5109
5110 if (ioa_cfg->ioa_unit_checked) {
5111 ioa_cfg->ioa_unit_checked = 0;
5112 ipr_get_unit_check_buffer(ioa_cfg);
5113 ipr_cmd->job_step = ipr_reset_alert;
5114 ipr_reset_start_timer(ipr_cmd, 0);
5115 return IPR_RC_JOB_RETURN;
5116 }
5117
5118 if (ioa_cfg->in_ioa_bringdown) {
5119 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5120 } else {
5121 ipr_cmd->job_step = ipr_reset_enable_ioa;
5122
5123 if (GET_DUMP == ioa_cfg->sdt_state) {
5124 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5125 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5126 schedule_work(&ioa_cfg->work_q);
5127 return IPR_RC_JOB_RETURN;
5128 }
5129 }
5130
5131 ENTER;
5132 return IPR_RC_JOB_CONTINUE;
5133}
5134
5135/**
5136 * ipr_reset_start_bist - Run BIST on the adapter.
5137 * @ipr_cmd: ipr command struct
5138 *
5139 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5140 *
5141 * Return value:
5142 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5143 **/
5144static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5145{
5146 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5147 int rc;
5148
5149 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07005150 pci_block_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5152
5153 if (rc != PCIBIOS_SUCCESSFUL) {
5154 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5155 rc = IPR_RC_JOB_CONTINUE;
5156 } else {
5157 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5158 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5159 rc = IPR_RC_JOB_RETURN;
5160 }
5161
5162 LEAVE;
5163 return rc;
5164}
5165
5166/**
5167 * ipr_reset_allowed - Query whether or not IOA can be reset
5168 * @ioa_cfg: ioa config struct
5169 *
5170 * Return value:
5171 * 0 if reset not allowed / non-zero if reset is allowed
5172 **/
5173static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5174{
5175 volatile u32 temp_reg;
5176
5177 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5178 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5179}
5180
5181/**
5182 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5183 * @ipr_cmd: ipr command struct
5184 *
5185 * Description: This function waits for adapter permission to run BIST,
5186 * then runs BIST. If the adapter does not give permission after a
5187 * reasonable time, we will reset the adapter anyway. The impact of
5188 * resetting the adapter without warning the adapter is the risk of
5189 * losing the persistent error log on the adapter. If the adapter is
5190 * reset while it is writing to the flash on the adapter, the flash
5191 * segment will have bad ECC and be zeroed.
5192 *
5193 * Return value:
5194 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5195 **/
5196static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5197{
5198 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5199 int rc = IPR_RC_JOB_RETURN;
5200
5201 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5202 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5203 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5204 } else {
5205 ipr_cmd->job_step = ipr_reset_start_bist;
5206 rc = IPR_RC_JOB_CONTINUE;
5207 }
5208
5209 return rc;
5210}
5211
5212/**
5213 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5214 * @ipr_cmd: ipr command struct
5215 *
5216 * Description: This function alerts the adapter that it will be reset.
5217 * If memory space is not currently enabled, proceed directly
5218 * to running BIST on the adapter. The timer must always be started
5219 * so we guarantee we do not run BIST from ipr_isr.
5220 *
5221 * Return value:
5222 * IPR_RC_JOB_RETURN
5223 **/
5224static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5225{
5226 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5227 u16 cmd_reg;
5228 int rc;
5229
5230 ENTER;
5231 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5232
5233 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5234 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5235 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5236 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5237 } else {
5238 ipr_cmd->job_step = ipr_reset_start_bist;
5239 }
5240
5241 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5242 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5243
5244 LEAVE;
5245 return IPR_RC_JOB_RETURN;
5246}
5247
5248/**
5249 * ipr_reset_ucode_download_done - Microcode download completion
5250 * @ipr_cmd: ipr command struct
5251 *
5252 * Description: This function unmaps the microcode download buffer.
5253 *
5254 * Return value:
5255 * IPR_RC_JOB_CONTINUE
5256 **/
5257static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5258{
5259 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5260 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5261
5262 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5263 sglist->num_sg, DMA_TO_DEVICE);
5264
5265 ipr_cmd->job_step = ipr_reset_alert;
5266 return IPR_RC_JOB_CONTINUE;
5267}
5268
5269/**
5270 * ipr_reset_ucode_download - Download microcode to the adapter
5271 * @ipr_cmd: ipr command struct
5272 *
5273 * Description: This function checks to see if it there is microcode
5274 * to download to the adapter. If there is, a download is performed.
5275 *
5276 * Return value:
5277 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5278 **/
5279static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5280{
5281 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5282 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5283
5284 ENTER;
5285 ipr_cmd->job_step = ipr_reset_alert;
5286
5287 if (!sglist)
5288 return IPR_RC_JOB_CONTINUE;
5289
5290 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5291 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5292 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5293 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5294 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5295 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5296 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5297
5298 if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) {
5299 dev_err(&ioa_cfg->pdev->dev,
5300 "Failed to map microcode download buffer\n");
5301 return IPR_RC_JOB_CONTINUE;
5302 }
5303
5304 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5305
5306 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5307 IPR_WRITE_BUFFER_TIMEOUT);
5308
5309 LEAVE;
5310 return IPR_RC_JOB_RETURN;
5311}
5312
5313/**
5314 * ipr_reset_shutdown_ioa - Shutdown the adapter
5315 * @ipr_cmd: ipr command struct
5316 *
5317 * Description: This function issues an adapter shutdown of the
5318 * specified type to the specified adapter as part of the
5319 * adapter reset job.
5320 *
5321 * Return value:
5322 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5323 **/
5324static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5325{
5326 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5327 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5328 unsigned long timeout;
5329 int rc = IPR_RC_JOB_CONTINUE;
5330
5331 ENTER;
5332 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5333 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5334 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5335 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5336 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5337
5338 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5339 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5340 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5341 timeout = IPR_INTERNAL_TIMEOUT;
5342 else
5343 timeout = IPR_SHUTDOWN_TIMEOUT;
5344
5345 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5346
5347 rc = IPR_RC_JOB_RETURN;
5348 ipr_cmd->job_step = ipr_reset_ucode_download;
5349 } else
5350 ipr_cmd->job_step = ipr_reset_alert;
5351
5352 LEAVE;
5353 return rc;
5354}
5355
5356/**
5357 * ipr_reset_ioa_job - Adapter reset job
5358 * @ipr_cmd: ipr command struct
5359 *
5360 * Description: This function is the job router for the adapter reset job.
5361 *
5362 * Return value:
5363 * none
5364 **/
5365static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5366{
5367 u32 rc, ioasc;
5368 unsigned long scratch = ipr_cmd->u.scratch;
5369 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5370
5371 do {
5372 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5373
5374 if (ioa_cfg->reset_cmd != ipr_cmd) {
5375 /*
5376 * We are doing nested adapter resets and this is
5377 * not the current reset job.
5378 */
5379 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5380 return;
5381 }
5382
5383 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5384 dev_err(&ioa_cfg->pdev->dev,
5385 "0x%02X failed with IOASC: 0x%08X\n",
5386 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5387
5388 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5389 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5390 return;
5391 }
5392
5393 ipr_reinit_ipr_cmnd(ipr_cmd);
5394 ipr_cmd->u.scratch = scratch;
5395 rc = ipr_cmd->job_step(ipr_cmd);
5396 } while(rc == IPR_RC_JOB_CONTINUE);
5397}
5398
5399/**
5400 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5401 * @ioa_cfg: ioa config struct
5402 * @job_step: first job step of reset job
5403 * @shutdown_type: shutdown type
5404 *
5405 * Description: This function will initiate the reset of the given adapter
5406 * starting at the selected job step.
5407 * If the caller needs to wait on the completion of the reset,
5408 * the caller must sleep on the reset_wait_q.
5409 *
5410 * Return value:
5411 * none
5412 **/
5413static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5414 int (*job_step) (struct ipr_cmnd *),
5415 enum ipr_shutdown_type shutdown_type)
5416{
5417 struct ipr_cmnd *ipr_cmd;
5418
5419 ioa_cfg->in_reset_reload = 1;
5420 ioa_cfg->allow_cmds = 0;
5421 scsi_block_requests(ioa_cfg->host);
5422
5423 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5424 ioa_cfg->reset_cmd = ipr_cmd;
5425 ipr_cmd->job_step = job_step;
5426 ipr_cmd->u.shutdown_type = shutdown_type;
5427
5428 ipr_reset_ioa_job(ipr_cmd);
5429}
5430
5431/**
5432 * ipr_initiate_ioa_reset - Initiate an adapter reset
5433 * @ioa_cfg: ioa config struct
5434 * @shutdown_type: shutdown type
5435 *
5436 * Description: This function will initiate the reset of the given adapter.
5437 * If the caller needs to wait on the completion of the reset,
5438 * the caller must sleep on the reset_wait_q.
5439 *
5440 * Return value:
5441 * none
5442 **/
5443static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5444 enum ipr_shutdown_type shutdown_type)
5445{
5446 if (ioa_cfg->ioa_is_dead)
5447 return;
5448
5449 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5450 ioa_cfg->sdt_state = ABORT_DUMP;
5451
5452 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5453 dev_err(&ioa_cfg->pdev->dev,
5454 "IOA taken offline - error recovery failed\n");
5455
5456 ioa_cfg->reset_retries = 0;
5457 ioa_cfg->ioa_is_dead = 1;
5458
5459 if (ioa_cfg->in_ioa_bringdown) {
5460 ioa_cfg->reset_cmd = NULL;
5461 ioa_cfg->in_reset_reload = 0;
5462 ipr_fail_all_ops(ioa_cfg);
5463 wake_up_all(&ioa_cfg->reset_wait_q);
5464
5465 spin_unlock_irq(ioa_cfg->host->host_lock);
5466 scsi_unblock_requests(ioa_cfg->host);
5467 spin_lock_irq(ioa_cfg->host->host_lock);
5468 return;
5469 } else {
5470 ioa_cfg->in_ioa_bringdown = 1;
5471 shutdown_type = IPR_SHUTDOWN_NONE;
5472 }
5473 }
5474
5475 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5476 shutdown_type);
5477}
5478
5479/**
5480 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5481 * @ioa_cfg: ioa cfg struct
5482 *
5483 * Description: This is the second phase of adapter intialization
5484 * This function takes care of initilizing the adapter to the point
5485 * where it can accept new commands.
5486
5487 * Return value:
5488 * 0 on sucess / -EIO on failure
5489 **/
5490static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5491{
5492 int rc = 0;
5493 unsigned long host_lock_flags = 0;
5494
5495 ENTER;
5496 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5497 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5498 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5499
5500 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5501 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5502 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5503
5504 if (ioa_cfg->ioa_is_dead) {
5505 rc = -EIO;
5506 } else if (ipr_invalid_adapter(ioa_cfg)) {
5507 if (!ipr_testmode)
5508 rc = -EIO;
5509
5510 dev_err(&ioa_cfg->pdev->dev,
5511 "Adapter not supported in this hardware configuration.\n");
5512 }
5513
5514 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5515
5516 LEAVE;
5517 return rc;
5518}
5519
5520/**
5521 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5522 * @ioa_cfg: ioa config struct
5523 *
5524 * Return value:
5525 * none
5526 **/
5527static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5528{
5529 int i;
5530
5531 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5532 if (ioa_cfg->ipr_cmnd_list[i])
5533 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5534 ioa_cfg->ipr_cmnd_list[i],
5535 ioa_cfg->ipr_cmnd_list_dma[i]);
5536
5537 ioa_cfg->ipr_cmnd_list[i] = NULL;
5538 }
5539
5540 if (ioa_cfg->ipr_cmd_pool)
5541 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5542
5543 ioa_cfg->ipr_cmd_pool = NULL;
5544}
5545
5546/**
5547 * ipr_free_mem - Frees memory allocated for an adapter
5548 * @ioa_cfg: ioa cfg struct
5549 *
5550 * Return value:
5551 * nothing
5552 **/
5553static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5554{
5555 int i;
5556
5557 kfree(ioa_cfg->res_entries);
5558 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5559 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5560 ipr_free_cmd_blks(ioa_cfg);
5561 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5562 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5563 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5564 ioa_cfg->cfg_table,
5565 ioa_cfg->cfg_table_dma);
5566
5567 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5568 pci_free_consistent(ioa_cfg->pdev,
5569 sizeof(struct ipr_hostrcb),
5570 ioa_cfg->hostrcb[i],
5571 ioa_cfg->hostrcb_dma[i]);
5572 }
5573
5574 ipr_free_dump(ioa_cfg);
5575 kfree(ioa_cfg->saved_mode_pages);
5576 kfree(ioa_cfg->trace);
5577}
5578
5579/**
5580 * ipr_free_all_resources - Free all allocated resources for an adapter.
5581 * @ipr_cmd: ipr command struct
5582 *
5583 * This function frees all allocated resources for the
5584 * specified adapter.
5585 *
5586 * Return value:
5587 * none
5588 **/
5589static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5590{
5591 struct pci_dev *pdev = ioa_cfg->pdev;
5592
5593 ENTER;
5594 free_irq(pdev->irq, ioa_cfg);
5595 iounmap(ioa_cfg->hdw_dma_regs);
5596 pci_release_regions(pdev);
5597 ipr_free_mem(ioa_cfg);
5598 scsi_host_put(ioa_cfg->host);
5599 pci_disable_device(pdev);
5600 LEAVE;
5601}
5602
5603/**
5604 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5605 * @ioa_cfg: ioa config struct
5606 *
5607 * Return value:
5608 * 0 on success / -ENOMEM on allocation failure
5609 **/
5610static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5611{
5612 struct ipr_cmnd *ipr_cmd;
5613 struct ipr_ioarcb *ioarcb;
5614 dma_addr_t dma_addr;
5615 int i;
5616
5617 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5618 sizeof(struct ipr_cmnd), 8, 0);
5619
5620 if (!ioa_cfg->ipr_cmd_pool)
5621 return -ENOMEM;
5622
5623 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5624 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5625
5626 if (!ipr_cmd) {
5627 ipr_free_cmd_blks(ioa_cfg);
5628 return -ENOMEM;
5629 }
5630
5631 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5632 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5633 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5634
5635 ioarcb = &ipr_cmd->ioarcb;
5636 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5637 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5638 ioarcb->write_ioadl_addr =
5639 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5640 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5641 ioarcb->ioasa_host_pci_addr =
5642 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5643 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5644 ipr_cmd->cmd_index = i;
5645 ipr_cmd->ioa_cfg = ioa_cfg;
5646 ipr_cmd->sense_buffer_dma = dma_addr +
5647 offsetof(struct ipr_cmnd, sense_buffer);
5648
5649 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5650 }
5651
5652 return 0;
5653}
5654
5655/**
5656 * ipr_alloc_mem - Allocate memory for an adapter
5657 * @ioa_cfg: ioa config struct
5658 *
5659 * Return value:
5660 * 0 on success / non-zero for error
5661 **/
5662static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5663{
5664 struct pci_dev *pdev = ioa_cfg->pdev;
5665 int i, rc = -ENOMEM;
5666
5667 ENTER;
5668 ioa_cfg->res_entries = kmalloc(sizeof(struct ipr_resource_entry) *
5669 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5670
5671 if (!ioa_cfg->res_entries)
5672 goto out;
5673
5674 memset(ioa_cfg->res_entries, 0,
5675 sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS);
5676
5677 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5678 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5679
5680 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5681 sizeof(struct ipr_misc_cbs),
5682 &ioa_cfg->vpd_cbs_dma);
5683
5684 if (!ioa_cfg->vpd_cbs)
5685 goto out_free_res_entries;
5686
5687 if (ipr_alloc_cmd_blks(ioa_cfg))
5688 goto out_free_vpd_cbs;
5689
5690 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5691 sizeof(u32) * IPR_NUM_CMD_BLKS,
5692 &ioa_cfg->host_rrq_dma);
5693
5694 if (!ioa_cfg->host_rrq)
5695 goto out_ipr_free_cmd_blocks;
5696
5697 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5698 sizeof(struct ipr_config_table),
5699 &ioa_cfg->cfg_table_dma);
5700
5701 if (!ioa_cfg->cfg_table)
5702 goto out_free_host_rrq;
5703
5704 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5705 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5706 sizeof(struct ipr_hostrcb),
5707 &ioa_cfg->hostrcb_dma[i]);
5708
5709 if (!ioa_cfg->hostrcb[i])
5710 goto out_free_hostrcb_dma;
5711
5712 ioa_cfg->hostrcb[i]->hostrcb_dma =
5713 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5714 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5715 }
5716
5717 ioa_cfg->trace = kmalloc(sizeof(struct ipr_trace_entry) *
5718 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5719
5720 if (!ioa_cfg->trace)
5721 goto out_free_hostrcb_dma;
5722
5723 memset(ioa_cfg->trace, 0,
5724 sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES);
5725
5726 rc = 0;
5727out:
5728 LEAVE;
5729 return rc;
5730
5731out_free_hostrcb_dma:
5732 while (i-- > 0) {
5733 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
5734 ioa_cfg->hostrcb[i],
5735 ioa_cfg->hostrcb_dma[i]);
5736 }
5737 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
5738 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
5739out_free_host_rrq:
5740 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5741 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5742out_ipr_free_cmd_blocks:
5743 ipr_free_cmd_blks(ioa_cfg);
5744out_free_vpd_cbs:
5745 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
5746 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5747out_free_res_entries:
5748 kfree(ioa_cfg->res_entries);
5749 goto out;
5750}
5751
5752/**
5753 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5754 * @ioa_cfg: ioa config struct
5755 *
5756 * Return value:
5757 * none
5758 **/
5759static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5760{
5761 int i;
5762
5763 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5764 ioa_cfg->bus_attr[i].bus = i;
5765 ioa_cfg->bus_attr[i].qas_enabled = 0;
5766 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5767 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5768 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5769 else
5770 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5771 }
5772}
5773
5774/**
5775 * ipr_init_ioa_cfg - Initialize IOA config struct
5776 * @ioa_cfg: ioa config struct
5777 * @host: scsi host struct
5778 * @pdev: PCI dev struct
5779 *
5780 * Return value:
5781 * none
5782 **/
5783static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5784 struct Scsi_Host *host, struct pci_dev *pdev)
5785{
5786 const struct ipr_interrupt_offsets *p;
5787 struct ipr_interrupts *t;
5788 void __iomem *base;
5789
5790 ioa_cfg->host = host;
5791 ioa_cfg->pdev = pdev;
5792 ioa_cfg->log_level = ipr_log_level;
5793 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5794 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5795 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5796 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5797 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5798 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5799 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5800 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5801
5802 INIT_LIST_HEAD(&ioa_cfg->free_q);
5803 INIT_LIST_HEAD(&ioa_cfg->pending_q);
5804 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5805 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5806 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5807 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5808 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5809 init_waitqueue_head(&ioa_cfg->reset_wait_q);
5810 ioa_cfg->sdt_state = INACTIVE;
brking@us.ibm.com62275042005-11-01 17:01:14 -06005811 if (ipr_enable_cache)
5812 ioa_cfg->cache_state = CACHE_ENABLED;
5813 else
5814 ioa_cfg->cache_state = CACHE_DISABLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005815
5816 ipr_initialize_bus_attr(ioa_cfg);
5817
5818 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5819 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5820 host->max_channel = IPR_MAX_BUS_TO_SCAN;
5821 host->unique_id = host->host_no;
5822 host->max_cmd_len = IPR_MAX_CDB_LEN;
5823 pci_set_drvdata(pdev, ioa_cfg);
5824
5825 p = &ioa_cfg->chip_cfg->regs;
5826 t = &ioa_cfg->regs;
5827 base = ioa_cfg->hdw_dma_regs;
5828
5829 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
5830 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
5831 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
5832 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
5833 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
5834 t->ioarrin_reg = base + p->ioarrin_reg;
5835 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
5836 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
5837 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
5838}
5839
5840/**
5841 * ipr_get_chip_cfg - Find adapter chip configuration
5842 * @dev_id: PCI device id struct
5843 *
5844 * Return value:
5845 * ptr to chip config on success / NULL on failure
5846 **/
5847static const struct ipr_chip_cfg_t * __devinit
5848ipr_get_chip_cfg(const struct pci_device_id *dev_id)
5849{
5850 int i;
5851
5852 if (dev_id->driver_data)
5853 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5854
5855 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
5856 if (ipr_chip[i].vendor == dev_id->vendor &&
5857 ipr_chip[i].device == dev_id->device)
5858 return ipr_chip[i].cfg;
5859 return NULL;
5860}
5861
5862/**
5863 * ipr_probe_ioa - Allocates memory and does first stage of initialization
5864 * @pdev: PCI device struct
5865 * @dev_id: PCI device id struct
5866 *
5867 * Return value:
5868 * 0 on success / non-zero on failure
5869 **/
5870static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
5871 const struct pci_device_id *dev_id)
5872{
5873 struct ipr_ioa_cfg *ioa_cfg;
5874 struct Scsi_Host *host;
5875 unsigned long ipr_regs_pci;
5876 void __iomem *ipr_regs;
5877 u32 rc = PCIBIOS_SUCCESSFUL;
5878
5879 ENTER;
5880
5881 if ((rc = pci_enable_device(pdev))) {
5882 dev_err(&pdev->dev, "Cannot enable adapter\n");
5883 goto out;
5884 }
5885
5886 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
5887
5888 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
5889
5890 if (!host) {
5891 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
5892 rc = -ENOMEM;
5893 goto out_disable;
5894 }
5895
5896 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
5897 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
5898
5899 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
5900
5901 if (!ioa_cfg->chip_cfg) {
5902 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
5903 dev_id->vendor, dev_id->device);
5904 goto out_scsi_host_put;
5905 }
5906
5907 ipr_regs_pci = pci_resource_start(pdev, 0);
5908
5909 rc = pci_request_regions(pdev, IPR_NAME);
5910 if (rc < 0) {
5911 dev_err(&pdev->dev,
5912 "Couldn't register memory range of registers\n");
5913 goto out_scsi_host_put;
5914 }
5915
5916 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
5917
5918 if (!ipr_regs) {
5919 dev_err(&pdev->dev,
5920 "Couldn't map memory range of registers\n");
5921 rc = -ENOMEM;
5922 goto out_release_regions;
5923 }
5924
5925 ioa_cfg->hdw_dma_regs = ipr_regs;
5926 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
5927 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
5928
5929 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
5930
5931 pci_set_master(pdev);
5932
5933 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
5934 if (rc < 0) {
5935 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5936 goto cleanup_nomem;
5937 }
5938
5939 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5940 ioa_cfg->chip_cfg->cache_line_size);
5941
5942 if (rc != PCIBIOS_SUCCESSFUL) {
5943 dev_err(&pdev->dev, "Write of cache line size failed\n");
5944 rc = -EIO;
5945 goto cleanup_nomem;
5946 }
5947
5948 /* Save away PCI config space for use following IOA reset */
5949 rc = pci_save_state(pdev);
5950
5951 if (rc != PCIBIOS_SUCCESSFUL) {
5952 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5953 rc = -EIO;
5954 goto cleanup_nomem;
5955 }
5956
5957 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
5958 goto cleanup_nomem;
5959
5960 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
5961 goto cleanup_nomem;
5962
5963 rc = ipr_alloc_mem(ioa_cfg);
5964 if (rc < 0) {
5965 dev_err(&pdev->dev,
5966 "Couldn't allocate enough memory for device driver!\n");
5967 goto cleanup_nomem;
5968 }
5969
5970 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
5971 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
5972
5973 if (rc) {
5974 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
5975 pdev->irq, rc);
5976 goto cleanup_nolog;
5977 }
5978
5979 spin_lock(&ipr_driver_lock);
5980 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
5981 spin_unlock(&ipr_driver_lock);
5982
5983 LEAVE;
5984out:
5985 return rc;
5986
5987cleanup_nolog:
5988 ipr_free_mem(ioa_cfg);
5989cleanup_nomem:
5990 iounmap(ipr_regs);
5991out_release_regions:
5992 pci_release_regions(pdev);
5993out_scsi_host_put:
5994 scsi_host_put(host);
5995out_disable:
5996 pci_disable_device(pdev);
5997 goto out;
5998}
5999
6000/**
6001 * ipr_scan_vsets - Scans for VSET devices
6002 * @ioa_cfg: ioa config struct
6003 *
6004 * Description: Since the VSET resources do not follow SAM in that we can have
6005 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6006 *
6007 * Return value:
6008 * none
6009 **/
6010static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6011{
6012 int target, lun;
6013
6014 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6015 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6016 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6017}
6018
6019/**
6020 * ipr_initiate_ioa_bringdown - Bring down an adapter
6021 * @ioa_cfg: ioa config struct
6022 * @shutdown_type: shutdown type
6023 *
6024 * Description: This function will initiate bringing down the adapter.
6025 * This consists of issuing an IOA shutdown to the adapter
6026 * to flush the cache, and running BIST.
6027 * If the caller needs to wait on the completion of the reset,
6028 * the caller must sleep on the reset_wait_q.
6029 *
6030 * Return value:
6031 * none
6032 **/
6033static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6034 enum ipr_shutdown_type shutdown_type)
6035{
6036 ENTER;
6037 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6038 ioa_cfg->sdt_state = ABORT_DUMP;
6039 ioa_cfg->reset_retries = 0;
6040 ioa_cfg->in_ioa_bringdown = 1;
6041 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6042 LEAVE;
6043}
6044
6045/**
6046 * __ipr_remove - Remove a single adapter
6047 * @pdev: pci device struct
6048 *
6049 * Adapter hot plug remove entry point.
6050 *
6051 * Return value:
6052 * none
6053 **/
6054static void __ipr_remove(struct pci_dev *pdev)
6055{
6056 unsigned long host_lock_flags = 0;
6057 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6058 ENTER;
6059
6060 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6061 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6062
6063 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6064 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
brking@us.ibm.com 5cbf5ea2005-05-02 19:50:47 -05006065 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006066 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6067
6068 spin_lock(&ipr_driver_lock);
6069 list_del(&ioa_cfg->queue);
6070 spin_unlock(&ipr_driver_lock);
6071
6072 if (ioa_cfg->sdt_state == ABORT_DUMP)
6073 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6074 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6075
6076 ipr_free_all_resources(ioa_cfg);
6077
6078 LEAVE;
6079}
6080
6081/**
6082 * ipr_remove - IOA hot plug remove entry point
6083 * @pdev: pci device struct
6084 *
6085 * Adapter hot plug remove entry point.
6086 *
6087 * Return value:
6088 * none
6089 **/
6090static void ipr_remove(struct pci_dev *pdev)
6091{
6092 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6093
6094 ENTER;
6095
Linus Torvalds1da177e2005-04-16 15:20:36 -07006096 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6097 &ipr_trace_attr);
6098 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6099 &ipr_dump_attr);
6100 scsi_remove_host(ioa_cfg->host);
6101
6102 __ipr_remove(pdev);
6103
6104 LEAVE;
6105}
6106
6107/**
6108 * ipr_probe - Adapter hot plug add entry point
6109 *
6110 * Return value:
6111 * 0 on success / non-zero on failure
6112 **/
6113static int __devinit ipr_probe(struct pci_dev *pdev,
6114 const struct pci_device_id *dev_id)
6115{
6116 struct ipr_ioa_cfg *ioa_cfg;
6117 int rc;
6118
6119 rc = ipr_probe_ioa(pdev, dev_id);
6120
6121 if (rc)
6122 return rc;
6123
6124 ioa_cfg = pci_get_drvdata(pdev);
6125 rc = ipr_probe_ioa_part2(ioa_cfg);
6126
6127 if (rc) {
6128 __ipr_remove(pdev);
6129 return rc;
6130 }
6131
6132 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6133
6134 if (rc) {
6135 __ipr_remove(pdev);
6136 return rc;
6137 }
6138
6139 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6140 &ipr_trace_attr);
6141
6142 if (rc) {
6143 scsi_remove_host(ioa_cfg->host);
6144 __ipr_remove(pdev);
6145 return rc;
6146 }
6147
6148 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6149 &ipr_dump_attr);
6150
6151 if (rc) {
6152 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6153 &ipr_trace_attr);
6154 scsi_remove_host(ioa_cfg->host);
6155 __ipr_remove(pdev);
6156 return rc;
6157 }
6158
6159 scsi_scan_host(ioa_cfg->host);
6160 ipr_scan_vsets(ioa_cfg);
6161 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6162 ioa_cfg->allow_ml_add_del = 1;
brking@us.ibm.com11cd8f12005-11-01 17:00:11 -06006163 ioa_cfg->host->max_channel = IPR_VSET_BUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006164 schedule_work(&ioa_cfg->work_q);
6165 return 0;
6166}
6167
6168/**
6169 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006170 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006171 *
6172 * This function is invoked upon system shutdown/reboot. It will issue
6173 * an adapter shutdown to the adapter to flush the write cache.
6174 *
6175 * Return value:
6176 * none
6177 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006178static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006179{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006180 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006181 unsigned long lock_flags = 0;
6182
6183 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6184 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6185 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6186 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6187}
6188
6189static struct pci_device_id ipr_pci_table[] __devinitdata = {
6190 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6191 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6192 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6193 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6194 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6195 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6196 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6197 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6198 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6199 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6200 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6201 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6202 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6203 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6204 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6205 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6206 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6207 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6208 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6209 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6210 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6211 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6212 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6213 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6214 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6215 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6216 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6217 { }
6218};
6219MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6220
6221static struct pci_driver ipr_driver = {
6222 .name = IPR_NAME,
6223 .id_table = ipr_pci_table,
6224 .probe = ipr_probe,
6225 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07006226 .shutdown = ipr_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006227};
6228
6229/**
6230 * ipr_init - Module entry point
6231 *
6232 * Return value:
6233 * 0 on success / negative value on failure
6234 **/
6235static int __init ipr_init(void)
6236{
6237 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6238 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6239
6240 return pci_module_init(&ipr_driver);
6241}
6242
6243/**
6244 * ipr_exit - Module unload
6245 *
6246 * Module unload entry point.
6247 *
6248 * Return value:
6249 * none
6250 **/
6251static void __exit ipr_exit(void)
6252{
6253 pci_unregister_driver(&ipr_driver);
6254}
6255
6256module_init(ipr_init);
6257module_exit(ipr_exit);