blob: cf1822a6361ce9ba0e2297475ec9366c21cb635d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22#include <linux/config.h> /* CONFIG_PROC_FS */
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/bio.h>
27#include <linux/interrupt.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/major.h>
32#include <linux/fs.h>
33#include <linux/blkpg.h>
34#include <linux/timer.h>
35#include <linux/proc_fs.h>
36#include <linux/devfs_fs_kernel.h>
37#include <linux/init.h>
38#include <linux/hdreg.h>
39#include <linux/spinlock.h>
40#include <linux/blkdev.h>
41#include <linux/genhd.h>
42#include <asm/uaccess.h>
43#include <asm/io.h>
44
45
46#define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
47
48#define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
49#define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
50
51/* Embedded module documentation macros - see modules.h */
52/* Original author Chris Frantz - Compaq Computer Corporation */
53MODULE_AUTHOR("Compaq Computer Corporation");
54MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
55MODULE_LICENSE("GPL");
56
57#include "cpqarray.h"
58#include "ida_cmd.h"
59#include "smart1,2.h"
60#include "ida_ioctl.h"
61
62#define READ_AHEAD 128
63#define NR_CMDS 128 /* This could probably go as high as ~400 */
64
65#define MAX_CTLR 8
66#define CTLR_SHIFT 8
67
68#define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
69
70static int nr_ctlr;
71static ctlr_info_t *hba[MAX_CTLR];
72
73static int eisa[8];
74
75#define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
76
77/* board_id = Subsystem Device ID & Vendor ID
78 * product = Marketing Name for the board
79 * access = Address of the struct of function pointers
80 */
81static struct board_type products[] = {
82 { 0x0040110E, "IDA", &smart1_access },
83 { 0x0140110E, "IDA-2", &smart1_access },
84 { 0x1040110E, "IAES", &smart1_access },
85 { 0x2040110E, "SMART", &smart1_access },
86 { 0x3040110E, "SMART-2/E", &smart2e_access },
87 { 0x40300E11, "SMART-2/P", &smart2_access },
88 { 0x40310E11, "SMART-2SL", &smart2_access },
89 { 0x40320E11, "Smart Array 3200", &smart2_access },
90 { 0x40330E11, "Smart Array 3100ES", &smart2_access },
91 { 0x40340E11, "Smart Array 221", &smart2_access },
92 { 0x40400E11, "Integrated Array", &smart4_access },
93 { 0x40480E11, "Compaq Raid LC2", &smart4_access },
94 { 0x40500E11, "Smart Array 4200", &smart4_access },
95 { 0x40510E11, "Smart Array 4250ES", &smart4_access },
96 { 0x40580E11, "Smart Array 431", &smart4_access },
97};
98
99/* define the PCI info for the PCI cards this driver can control */
100static const struct pci_device_id cpqarray_pci_device_id[] =
101{
102 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
103 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */
104 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
105 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */
106 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
107 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */
108 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
109 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */
110 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
111 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */
112 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
113 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */
114 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
115 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/
116 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
117 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/
118 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
119 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/
120 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
121 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */
122 { 0 }
123};
124
125MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
126
127static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
128
129/* Debug... */
130#define DBG(s) do { s } while(0)
131/* Debug (general info)... */
132#define DBGINFO(s) do { } while(0)
133/* Debug Paranoid... */
134#define DBGP(s) do { } while(0)
135/* Debug Extra Paranoid... */
136#define DBGPX(s) do { } while(0)
137
138static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
139static void __iomem *remap_pci_mem(ulong base, ulong size);
140static int cpqarray_eisa_detect(void);
141static int pollcomplete(int ctlr);
142static void getgeometry(int ctlr);
143static void start_fwbk(int ctlr);
144
145static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
146static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
147
148static void free_hba(int i);
149static int alloc_cpqarray_hba(void);
150
151static int sendcmd(
152 __u8 cmd,
153 int ctlr,
154 void *buff,
155 size_t size,
156 unsigned int blk,
157 unsigned int blkcnt,
158 unsigned int log_unit );
159
160static int ida_open(struct inode *inode, struct file *filep);
161static int ida_release(struct inode *inode, struct file *filep);
162static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
163static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
164
165static void do_ida_request(request_queue_t *q);
166static void start_io(ctlr_info_t *h);
167
168static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
169static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
170static inline void complete_buffers(struct bio *bio, int ok);
171static inline void complete_command(cmdlist_t *cmd, int timeout);
172
173static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs * regs);
174static void ida_timer(unsigned long tdata);
175static int ida_revalidate(struct gendisk *disk);
176static int revalidate_allvol(ctlr_info_t *host);
177static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
178
179#ifdef CONFIG_PROC_FS
180static void ida_procinit(int i);
181static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
182#else
183static void ida_procinit(int i) {}
184#endif
185
186static inline drv_info_t *get_drv(struct gendisk *disk)
187{
188 return disk->private_data;
189}
190
191static inline ctlr_info_t *get_host(struct gendisk *disk)
192{
193 return disk->queue->queuedata;
194}
195
196
197static struct block_device_operations ida_fops = {
198 .owner = THIS_MODULE,
199 .open = ida_open,
200 .release = ida_release,
201 .ioctl = ida_ioctl,
202 .revalidate_disk= ida_revalidate,
203};
204
205
206#ifdef CONFIG_PROC_FS
207
208static struct proc_dir_entry *proc_array;
209
210/*
211 * Get us a file in /proc/array that says something about each controller.
212 * Create /proc/array if it doesn't exist yet.
213 */
214static void __init ida_procinit(int i)
215{
216 if (proc_array == NULL) {
217 proc_array = proc_mkdir("cpqarray", proc_root_driver);
218 if (!proc_array) return;
219 }
220
221 create_proc_read_entry(hba[i]->devname, 0, proc_array,
222 ida_proc_get_info, hba[i]);
223}
224
225/*
226 * Report information about this controller.
227 */
228static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
229{
230 off_t pos = 0;
231 off_t len = 0;
232 int size, i, ctlr;
233 ctlr_info_t *h = (ctlr_info_t*)data;
234 drv_info_t *drv;
235#ifdef CPQ_PROC_PRINT_QUEUES
236 cmdlist_t *c;
237 unsigned long flags;
238#endif
239
240 ctlr = h->ctlr;
241 size = sprintf(buffer, "%s: Compaq %s Controller\n"
242 " Board ID: 0x%08lx\n"
243 " Firmware Revision: %c%c%c%c\n"
244 " Controller Sig: 0x%08lx\n"
245 " Memory Address: 0x%08lx\n"
246 " I/O Port: 0x%04x\n"
247 " IRQ: %d\n"
248 " Logical drives: %d\n"
249 " Physical drives: %d\n\n"
250 " Current Q depth: %d\n"
251 " Max Q depth since init: %d\n\n",
252 h->devname,
253 h->product_name,
254 (unsigned long)h->board_id,
255 h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
256 (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
257 (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
258 h->log_drives, h->phys_drives,
259 h->Qdepth, h->maxQsinceinit);
260
261 pos += size; len += size;
262
263 size = sprintf(buffer+len, "Logical Drive Info:\n");
264 pos += size; len += size;
265
266 for(i=0; i<h->log_drives; i++) {
267 drv = &h->drv[i];
268 size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
269 ctlr, i, drv->blk_size, drv->nr_blks);
270 pos += size; len += size;
271 }
272
273#ifdef CPQ_PROC_PRINT_QUEUES
274 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
275 size = sprintf(buffer+len, "\nCurrent Queues:\n");
276 pos += size; len += size;
277
278 c = h->reqQ;
279 size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size;
280 if (c) c=c->next;
281 while(c && c != h->reqQ) {
282 size = sprintf(buffer+len, "->%p", c);
283 pos += size; len += size;
284 c=c->next;
285 }
286
287 c = h->cmpQ;
288 size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size;
289 if (c) c=c->next;
290 while(c && c != h->cmpQ) {
291 size = sprintf(buffer+len, "->%p", c);
292 pos += size; len += size;
293 c=c->next;
294 }
295
296 size = sprintf(buffer+len, "\n"); pos += size; len += size;
297 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
298#endif
299 size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n",
300 h->nr_allocs, h->nr_frees);
301 pos += size; len += size;
302
303 *eof = 1;
304 *start = buffer+offset;
305 len -= offset;
306 if (len>length)
307 len = length;
308 return len;
309}
310#endif /* CONFIG_PROC_FS */
311
312module_param_array(eisa, int, NULL, 0);
313
314static void release_io_mem(ctlr_info_t *c)
315{
316 /* if IO mem was not protected do nothing */
317 if( c->io_mem_addr == 0)
318 return;
319 release_region(c->io_mem_addr, c->io_mem_length);
320 c->io_mem_addr = 0;
321 c->io_mem_length = 0;
322}
323
324static void __devexit cpqarray_remove_one(int i)
325{
326 int j;
327 char buff[4];
328
329 /* sendcmd will turn off interrupt, and send the flush...
330 * To write all data in the battery backed cache to disks
331 * no data returned, but don't want to send NULL to sendcmd */
332 if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
333 {
334 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
335 i);
336 }
337 free_irq(hba[i]->intr, hba[i]);
338 iounmap(hba[i]->vaddr);
339 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
340 del_timer(&hba[i]->timer);
341 remove_proc_entry(hba[i]->devname, proc_array);
342 pci_free_consistent(hba[i]->pci_dev,
343 NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
344 hba[i]->cmd_pool_dhandle);
345 kfree(hba[i]->cmd_pool_bits);
346 for(j = 0; j < NWD; j++) {
347 if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
348 del_gendisk(ida_gendisk[i][j]);
349 devfs_remove("ida/c%dd%d",i,j);
350 put_disk(ida_gendisk[i][j]);
351 }
352 blk_cleanup_queue(hba[i]->queue);
353 release_io_mem(hba[i]);
354 free_hba(i);
355}
356
357static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
358{
359 int i;
360 ctlr_info_t *tmp_ptr;
361
362 if (pci_get_drvdata(pdev) == NULL) {
363 printk( KERN_ERR "cpqarray: Unable to remove device \n");
364 return;
365 }
366
367 tmp_ptr = pci_get_drvdata(pdev);
368 i = tmp_ptr->ctlr;
369 if (hba[i] == NULL) {
370 printk(KERN_ERR "cpqarray: controller %d appears to have"
371 "already been removed \n", i);
372 return;
373 }
374 pci_set_drvdata(pdev, NULL);
375
376 cpqarray_remove_one(i);
377}
378
379/* removing an instance that was not removed automatically..
380 * must be an eisa card.
381 */
382static void __devexit cpqarray_remove_one_eisa (int i)
383{
384 if (hba[i] == NULL) {
385 printk(KERN_ERR "cpqarray: controller %d appears to have"
386 "already been removed \n", i);
387 return;
388 }
389 cpqarray_remove_one(i);
390}
391
392/* pdev is NULL for eisa */
393static int cpqarray_register_ctlr( int i, struct pci_dev *pdev)
394{
395 request_queue_t *q;
396 int j;
397
398 /*
399 * register block devices
400 * Find disks and fill in structs
401 * Get an interrupt, set the Q depth and get into /proc
402 */
403
404 /* If this successful it should insure that we are the only */
405 /* instance of the driver */
406 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
407 goto Enomem4;
408 }
409 hba[i]->access.set_intr_mask(hba[i], 0);
410 if (request_irq(hba[i]->intr, do_ida_intr,
411 SA_INTERRUPT|SA_SHIRQ|SA_SAMPLE_RANDOM,
412 hba[i]->devname, hba[i]))
413 {
414 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
415 hba[i]->intr, hba[i]->devname);
416 goto Enomem3;
417 }
418
419 for (j=0; j<NWD; j++) {
420 ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
421 if (!ida_gendisk[i][j])
422 goto Enomem2;
423 }
424
425 hba[i]->cmd_pool = (cmdlist_t *)pci_alloc_consistent(
426 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
427 &(hba[i]->cmd_pool_dhandle));
428 hba[i]->cmd_pool_bits = kmalloc(
429 ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long),
430 GFP_KERNEL);
431
432 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
433 goto Enomem1;
434
435 memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
436 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
437 printk(KERN_INFO "cpqarray: Finding drives on %s",
438 hba[i]->devname);
439
440 spin_lock_init(&hba[i]->lock);
441 q = blk_init_queue(do_ida_request, &hba[i]->lock);
442 if (!q)
443 goto Enomem1;
444
445 hba[i]->queue = q;
446 q->queuedata = hba[i];
447
448 getgeometry(i);
449 start_fwbk(i);
450
451 ida_procinit(i);
452
453 if (pdev)
454 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
455
456 /* This is a hardware imposed limit. */
457 blk_queue_max_hw_segments(q, SG_MAX);
458
459 /* This is a driver limit and could be eliminated. */
460 blk_queue_max_phys_segments(q, SG_MAX);
461
462 init_timer(&hba[i]->timer);
463 hba[i]->timer.expires = jiffies + IDA_TIMER;
464 hba[i]->timer.data = (unsigned long)hba[i];
465 hba[i]->timer.function = ida_timer;
466 add_timer(&hba[i]->timer);
467
468 /* Enable IRQ now that spinlock and rate limit timer are set up */
469 hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
470
471 for(j=0; j<NWD; j++) {
472 struct gendisk *disk = ida_gendisk[i][j];
473 drv_info_t *drv = &hba[i]->drv[j];
474 sprintf(disk->disk_name, "ida/c%dd%d", i, j);
475 disk->major = COMPAQ_SMART2_MAJOR + i;
476 disk->first_minor = j<<NWD_SHIFT;
477 disk->fops = &ida_fops;
478 if (j && !drv->nr_blks)
479 continue;
480 blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
481 set_capacity(disk, drv->nr_blks);
482 disk->queue = hba[i]->queue;
483 disk->private_data = drv;
484 add_disk(disk);
485 }
486
487 /* done ! */
488 return(i);
489
490Enomem1:
491 nr_ctlr = i;
492 kfree(hba[i]->cmd_pool_bits);
493 if (hba[i]->cmd_pool)
494 pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t),
495 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
496Enomem2:
497 while (j--) {
498 put_disk(ida_gendisk[i][j]);
499 ida_gendisk[i][j] = NULL;
500 }
501 free_irq(hba[i]->intr, hba[i]);
502Enomem3:
503 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
504Enomem4:
505 if (pdev)
506 pci_set_drvdata(pdev, NULL);
507 release_io_mem(hba[i]);
508 free_hba(i);
509
510 printk( KERN_ERR "cpqarray: out of memory");
511
512 return -1;
513}
514
515static int __init cpqarray_init_one( struct pci_dev *pdev,
516 const struct pci_device_id *ent)
517{
518 int i;
519
520 printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
521 " bus %d dev %d func %d\n",
522 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
523 PCI_FUNC(pdev->devfn));
524 i = alloc_cpqarray_hba();
525 if( i < 0 )
526 return (-1);
527 memset(hba[i], 0, sizeof(ctlr_info_t));
528 sprintf(hba[i]->devname, "ida%d", i);
529 hba[i]->ctlr = i;
530 /* Initialize the pdev driver private data */
531 pci_set_drvdata(pdev, hba[i]);
532
533 if (cpqarray_pci_init(hba[i], pdev) != 0) {
534 pci_set_drvdata(pdev, NULL);
535 release_io_mem(hba[i]);
536 free_hba(i);
537 return -1;
538 }
539
540 return (cpqarray_register_ctlr(i, pdev));
541}
542
543static struct pci_driver cpqarray_pci_driver = {
544 .name = "cpqarray",
545 .probe = cpqarray_init_one,
546 .remove = __devexit_p(cpqarray_remove_one_pci),
547 .id_table = cpqarray_pci_device_id,
548};
549
550/*
551 * This is it. Find all the controllers and register them.
552 * returns the number of block devices registered.
553 */
554static int __init cpqarray_init(void)
555{
556 int num_cntlrs_reg = 0;
557 int i;
558 int rc = 0;
559
560 /* detect controllers */
561 printk(DRIVER_NAME "\n");
562
563 rc = pci_register_driver(&cpqarray_pci_driver);
564 if (rc)
565 return rc;
566 cpqarray_eisa_detect();
567
568 for (i=0; i < MAX_CTLR; i++) {
569 if (hba[i] != NULL)
570 num_cntlrs_reg++;
571 }
572
573 return(num_cntlrs_reg);
574}
575
576/* Function to find the first free pointer into our hba[] array */
577/* Returns -1 if no free entries are left. */
578static int alloc_cpqarray_hba(void)
579{
580 int i;
581
582 for(i=0; i< MAX_CTLR; i++) {
583 if (hba[i] == NULL) {
584 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
585 if(hba[i]==NULL) {
586 printk(KERN_ERR "cpqarray: out of memory.\n");
587 return (-1);
588 }
589 return (i);
590 }
591 }
592 printk(KERN_WARNING "cpqarray: This driver supports a maximum"
593 " of 8 controllers.\n");
594 return(-1);
595}
596
597static void free_hba(int i)
598{
599 kfree(hba[i]);
600 hba[i]=NULL;
601}
602
603/*
604 * Find the IO address of the controller, its IRQ and so forth. Fill
605 * in some basic stuff into the ctlr_info_t structure.
606 */
607static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
608{
609 ushort vendor_id, device_id, command;
610 unchar cache_line_size, latency_timer;
611 unchar irq, revision;
612 unsigned long addr[6];
613 __u32 board_id;
614
615 int i;
616
617 c->pci_dev = pdev;
618 if (pci_enable_device(pdev)) {
619 printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
620 return -1;
621 }
622 vendor_id = pdev->vendor;
623 device_id = pdev->device;
624 irq = pdev->irq;
625
626 for(i=0; i<6; i++)
627 addr[i] = pci_resource_start(pdev, i);
628
629 if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
630 {
631 printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
632 return -1;
633 }
634
635 pci_read_config_word(pdev, PCI_COMMAND, &command);
636 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
637 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
638 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
639
640 pci_read_config_dword(pdev, 0x2c, &board_id);
641
642 /* check to see if controller has been disabled */
643 if(!(command & 0x02)) {
644 printk(KERN_WARNING
645 "cpqarray: controller appears to be disabled\n");
646 return(-1);
647 }
648
649DBGINFO(
650 printk("vendor_id = %x\n", vendor_id);
651 printk("device_id = %x\n", device_id);
652 printk("command = %x\n", command);
653 for(i=0; i<6; i++)
654 printk("addr[%d] = %lx\n", i, addr[i]);
655 printk("revision = %x\n", revision);
656 printk("irq = %x\n", irq);
657 printk("cache_line_size = %x\n", cache_line_size);
658 printk("latency_timer = %x\n", latency_timer);
659 printk("board_id = %x\n", board_id);
660);
661
662 c->intr = irq;
663
664 for(i=0; i<6; i++) {
665 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
666 { /* IO space */
667 c->io_mem_addr = addr[i];
668 c->io_mem_length = pci_resource_end(pdev, i)
669 - pci_resource_start(pdev, i) + 1;
670 if(!request_region( c->io_mem_addr, c->io_mem_length,
671 "cpqarray"))
672 {
673 printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
674 c->io_mem_addr = 0;
675 c->io_mem_length = 0;
676 }
677 break;
678 }
679 }
680
681 c->paddr = 0;
682 for(i=0; i<6; i++)
683 if (!(pci_resource_flags(pdev, i) &
684 PCI_BASE_ADDRESS_SPACE_IO)) {
685 c->paddr = pci_resource_start (pdev, i);
686 break;
687 }
688 if (!c->paddr)
689 return -1;
690 c->vaddr = remap_pci_mem(c->paddr, 128);
691 if (!c->vaddr)
692 return -1;
693 c->board_id = board_id;
694
695 for(i=0; i<NR_PRODUCTS; i++) {
696 if (board_id == products[i].board_id) {
697 c->product_name = products[i].product_name;
698 c->access = *(products[i].access);
699 break;
700 }
701 }
702 if (i == NR_PRODUCTS) {
703 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
704 " to access the SMART Array controller %08lx\n",
705 (unsigned long)board_id);
706 return -1;
707 }
708
709 return 0;
710}
711
712/*
713 * Map (physical) PCI mem into (virtual) kernel space
714 */
715static void __iomem *remap_pci_mem(ulong base, ulong size)
716{
717 ulong page_base = ((ulong) base) & PAGE_MASK;
718 ulong page_offs = ((ulong) base) - page_base;
719 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
720
721 return (page_remapped ? (page_remapped + page_offs) : NULL);
722}
723
724#ifndef MODULE
725/*
726 * Config string is a comma separated set of i/o addresses of EISA cards.
727 */
728static int cpqarray_setup(char *str)
729{
730 int i, ints[9];
731
732 (void)get_options(str, ARRAY_SIZE(ints), ints);
733
734 for(i=0; i<ints[0] && i<8; i++)
735 eisa[i] = ints[i+1];
736 return 1;
737}
738
739__setup("smart2=", cpqarray_setup);
740
741#endif
742
743/*
744 * Find an EISA controller's signature. Set up an hba if we find it.
745 */
746static int cpqarray_eisa_detect(void)
747{
748 int i=0, j;
749 __u32 board_id;
750 int intr;
751 int ctlr;
752 int num_ctlr = 0;
753
754 while(i<8 && eisa[i]) {
755 ctlr = alloc_cpqarray_hba();
756 if(ctlr == -1)
757 break;
758 board_id = inl(eisa[i]+0xC80);
759 for(j=0; j < NR_PRODUCTS; j++)
760 if (board_id == products[j].board_id)
761 break;
762
763 if (j == NR_PRODUCTS) {
764 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
765 " to access the SMART Array controller %08lx\n", (unsigned long)board_id);
766 continue;
767 }
768
769 memset(hba[ctlr], 0, sizeof(ctlr_info_t));
770 hba[ctlr]->io_mem_addr = eisa[i];
771 hba[ctlr]->io_mem_length = 0x7FF;
772 if(!request_region(hba[ctlr]->io_mem_addr,
773 hba[ctlr]->io_mem_length,
774 "cpqarray"))
775 {
776 printk(KERN_WARNING "cpqarray: I/O range already in "
777 "use addr = %lx length = %ld\n",
778 hba[ctlr]->io_mem_addr,
779 hba[ctlr]->io_mem_length);
780 free_hba(ctlr);
781 continue;
782 }
783
784 /*
785 * Read the config register to find our interrupt
786 */
787 intr = inb(eisa[i]+0xCC0) >> 4;
788 if (intr & 1) intr = 11;
789 else if (intr & 2) intr = 10;
790 else if (intr & 4) intr = 14;
791 else if (intr & 8) intr = 15;
792
793 hba[ctlr]->intr = intr;
794 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
795 hba[ctlr]->product_name = products[j].product_name;
796 hba[ctlr]->access = *(products[j].access);
797 hba[ctlr]->ctlr = ctlr;
798 hba[ctlr]->board_id = board_id;
799 hba[ctlr]->pci_dev = NULL; /* not PCI */
800
801DBGINFO(
802 printk("i = %d, j = %d\n", i, j);
803 printk("irq = %x\n", intr);
804 printk("product name = %s\n", products[j].product_name);
805 printk("board_id = %x\n", board_id);
806);
807
808 num_ctlr++;
809 i++;
810
811 if (cpqarray_register_ctlr(ctlr, NULL) == -1)
812 printk(KERN_WARNING
813 "cpqarray: Can't register EISA controller %d\n",
814 ctlr);
815
816 }
817
818 return num_ctlr;
819}
820
821/*
822 * Open. Make sure the device is really there.
823 */
824static int ida_open(struct inode *inode, struct file *filep)
825{
826 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
827 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
828
829 DBGINFO(printk("ida_open %s\n", inode->i_bdev->bd_disk->disk_name));
830 /*
831 * Root is allowed to open raw volume zero even if it's not configured
832 * so array config can still work. I don't think I really like this,
833 * but I'm already using way to many device nodes to claim another one
834 * for "raw controller".
835 */
836 if (!drv->nr_blks) {
837 if (!capable(CAP_SYS_RAWIO))
838 return -ENXIO;
839 if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
840 return -ENXIO;
841 }
842 host->usage_count++;
843 return 0;
844}
845
846/*
847 * Close. Sync first.
848 */
849static int ida_release(struct inode *inode, struct file *filep)
850{
851 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
852 host->usage_count--;
853 return 0;
854}
855
856/*
857 * Enqueuing and dequeuing functions for cmdlists.
858 */
859static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
860{
861 if (*Qptr == NULL) {
862 *Qptr = c;
863 c->next = c->prev = c;
864 } else {
865 c->prev = (*Qptr)->prev;
866 c->next = (*Qptr);
867 (*Qptr)->prev->next = c;
868 (*Qptr)->prev = c;
869 }
870}
871
872static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
873{
874 if (c && c->next != c) {
875 if (*Qptr == c) *Qptr = c->next;
876 c->prev->next = c->next;
877 c->next->prev = c->prev;
878 } else {
879 *Qptr = NULL;
880 }
881 return c;
882}
883
884/*
885 * Get a request and submit it to the controller.
886 * This routine needs to grab all the requests it possibly can from the
887 * req Q and submit them. Interrupts are off (and need to be off) when you
888 * are in here (either via the dummy do_ida_request functions or by being
889 * called from the interrupt handler
890 */
891static void do_ida_request(request_queue_t *q)
892{
893 ctlr_info_t *h = q->queuedata;
894 cmdlist_t *c;
895 struct request *creq;
896 struct scatterlist tmp_sg[SG_MAX];
897 int i, dir, seg;
898
899 if (blk_queue_plugged(q))
900 goto startio;
901
902queue_next:
903 creq = elv_next_request(q);
904 if (!creq)
905 goto startio;
906
907 if (creq->nr_phys_segments > SG_MAX)
908 BUG();
909
910 if ((c = cmd_alloc(h,1)) == NULL)
911 goto startio;
912
913 blkdev_dequeue_request(creq);
914
915 c->ctlr = h->ctlr;
916 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
917 c->hdr.size = sizeof(rblk_t) >> 2;
918 c->size += sizeof(rblk_t);
919
920 c->req.hdr.blk = creq->sector;
921 c->rq = creq;
922DBGPX(
923 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
924);
925 seg = blk_rq_map_sg(q, creq, tmp_sg);
926
927 /* Now do all the DMA Mappings */
928 if (rq_data_dir(creq) == READ)
929 dir = PCI_DMA_FROMDEVICE;
930 else
931 dir = PCI_DMA_TODEVICE;
932 for( i=0; i < seg; i++)
933 {
934 c->req.sg[i].size = tmp_sg[i].length;
935 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
936 tmp_sg[i].page,
937 tmp_sg[i].offset,
938 tmp_sg[i].length, dir);
939 }
940DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
941 c->req.hdr.sg_cnt = seg;
942 c->req.hdr.blk_cnt = creq->nr_sectors;
943 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
944 c->type = CMD_RWREQ;
945
946 /* Put the request on the tail of the request queue */
947 addQ(&h->reqQ, c);
948 h->Qdepth++;
949 if (h->Qdepth > h->maxQsinceinit)
950 h->maxQsinceinit = h->Qdepth;
951
952 goto queue_next;
953
954startio:
955 start_io(h);
956}
957
958/*
959 * start_io submits everything on a controller's request queue
960 * and moves it to the completion queue.
961 *
962 * Interrupts had better be off if you're in here
963 */
964static void start_io(ctlr_info_t *h)
965{
966 cmdlist_t *c;
967
968 while((c = h->reqQ) != NULL) {
969 /* Can't do anything if we're busy */
970 if (h->access.fifo_full(h) == 0)
971 return;
972
973 /* Get the first entry from the request Q */
974 removeQ(&h->reqQ, c);
975 h->Qdepth--;
976
977 /* Tell the controller to do our bidding */
978 h->access.submit_command(h, c);
979
980 /* Get onto the completion Q */
981 addQ(&h->cmpQ, c);
982 }
983}
984
985static inline void complete_buffers(struct bio *bio, int ok)
986{
987 struct bio *xbh;
988 while(bio) {
989 int nr_sectors = bio_sectors(bio);
990
991 xbh = bio->bi_next;
992 bio->bi_next = NULL;
993
994 blk_finished_io(nr_sectors);
995 bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);
996
997 bio = xbh;
998 }
999}
1000/*
1001 * Mark all buffers that cmd was responsible for
1002 */
1003static inline void complete_command(cmdlist_t *cmd, int timeout)
1004{
1005 int ok=1;
1006 int i, ddir;
1007
1008 if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1009 (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1010 printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1011 cmd->ctlr, cmd->hdr.unit);
1012 hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1013 }
1014 if (cmd->req.hdr.rcode & RCODE_FATAL) {
1015 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1016 cmd->ctlr, cmd->hdr.unit);
1017 ok = 0;
1018 }
1019 if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1020 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1021 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1022 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1023 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1024 ok = 0;
1025 }
1026 if (timeout) ok = 0;
1027 /* unmap the DMA mapping for all the scatter gather elements */
1028 if (cmd->req.hdr.cmd == IDA_READ)
1029 ddir = PCI_DMA_FROMDEVICE;
1030 else
1031 ddir = PCI_DMA_TODEVICE;
1032 for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1033 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1034 cmd->req.sg[i].size, ddir);
1035
1036 complete_buffers(cmd->rq->bio, ok);
1037
1038 DBGPX(printk("Done with %p\n", cmd->rq););
1039 end_that_request_last(cmd->rq);
1040}
1041
1042/*
1043 * The controller will interrupt us upon completion of commands.
1044 * Find the command on the completion queue, remove it, tell the OS and
1045 * try to queue up more IO
1046 */
1047static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs *regs)
1048{
1049 ctlr_info_t *h = dev_id;
1050 cmdlist_t *c;
1051 unsigned long istat;
1052 unsigned long flags;
1053 __u32 a,a1;
1054
1055 istat = h->access.intr_pending(h);
1056 /* Is this interrupt for us? */
1057 if (istat == 0)
1058 return IRQ_NONE;
1059
1060 /*
1061 * If there are completed commands in the completion queue,
1062 * we had better do something about it.
1063 */
1064 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
1065 if (istat & FIFO_NOT_EMPTY) {
1066 while((a = h->access.command_completed(h))) {
1067 a1 = a; a &= ~3;
1068 if ((c = h->cmpQ) == NULL)
1069 {
1070 printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1071 continue;
1072 }
1073 while(c->busaddr != a) {
1074 c = c->next;
1075 if (c == h->cmpQ)
1076 break;
1077 }
1078 /*
1079 * If we've found the command, take it off the
1080 * completion Q and free it
1081 */
1082 if (c->busaddr == a) {
1083 removeQ(&h->cmpQ, c);
1084 /* Check for invalid command.
1085 * Controller returns command error,
1086 * But rcode = 0.
1087 */
1088
1089 if((a1 & 0x03) && (c->req.hdr.rcode == 0))
1090 {
1091 c->req.hdr.rcode = RCODE_INVREQ;
1092 }
1093 if (c->type == CMD_RWREQ) {
1094 complete_command(c, 0);
1095 cmd_free(h, c, 1);
1096 } else if (c->type == CMD_IOCTL_PEND) {
1097 c->type = CMD_IOCTL_DONE;
1098 }
1099 continue;
1100 }
1101 }
1102 }
1103
1104 /*
1105 * See if we can queue up some more IO
1106 */
1107 do_ida_request(h->queue);
1108 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
1109 return IRQ_HANDLED;
1110}
1111
1112/*
1113 * This timer was for timing out requests that haven't happened after
1114 * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to
1115 * reset a flags structure so we don't flood the user with
1116 * "Non-Fatal error" messages.
1117 */
1118static void ida_timer(unsigned long tdata)
1119{
1120 ctlr_info_t *h = (ctlr_info_t*)tdata;
1121
1122 h->timer.expires = jiffies + IDA_TIMER;
1123 add_timer(&h->timer);
1124 h->misc_tflags = 0;
1125}
1126
1127/*
1128 * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1129 * setting readahead and submitting commands from userspace to the controller.
1130 */
1131static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg)
1132{
1133 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
1134 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
1135 int error;
1136 int diskinfo[4];
1137 struct hd_geometry __user *geo = (struct hd_geometry __user *)arg;
1138 ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1139 ida_ioctl_t *my_io;
1140
1141 switch(cmd) {
1142 case HDIO_GETGEO:
1143 if (drv->cylinders) {
1144 diskinfo[0] = drv->heads;
1145 diskinfo[1] = drv->sectors;
1146 diskinfo[2] = drv->cylinders;
1147 } else {
1148 diskinfo[0] = 0xff;
1149 diskinfo[1] = 0x3f;
1150 diskinfo[2] = drv->nr_blks / (0xff*0x3f);
1151 }
1152 put_user(diskinfo[0], &geo->heads);
1153 put_user(diskinfo[1], &geo->sectors);
1154 put_user(diskinfo[2], &geo->cylinders);
1155 put_user(get_start_sect(inode->i_bdev), &geo->start);
1156 return 0;
1157 case IDAGETDRVINFO:
1158 if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
1159 return -EFAULT;
1160 return 0;
1161 case IDAPASSTHRU:
1162 if (!capable(CAP_SYS_RAWIO))
1163 return -EPERM;
1164 my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
1165 if (!my_io)
1166 return -ENOMEM;
1167 error = -EFAULT;
1168 if (copy_from_user(my_io, io, sizeof(*my_io)))
1169 goto out_passthru;
1170 error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
1171 if (error)
1172 goto out_passthru;
1173 error = -EFAULT;
1174 if (copy_to_user(io, my_io, sizeof(*my_io)))
1175 goto out_passthru;
1176 error = 0;
1177out_passthru:
1178 kfree(my_io);
1179 return error;
1180 case IDAGETCTLRSIG:
1181 if (!arg) return -EINVAL;
1182 put_user(host->ctlr_sig, (int __user *)arg);
1183 return 0;
1184 case IDAREVALIDATEVOLS:
1185 if (iminor(inode) != 0)
1186 return -ENXIO;
1187 return revalidate_allvol(host);
1188 case IDADRIVERVERSION:
1189 if (!arg) return -EINVAL;
1190 put_user(DRIVER_VERSION, (unsigned long __user *)arg);
1191 return 0;
1192 case IDAGETPCIINFO:
1193 {
1194
1195 ida_pci_info_struct pciinfo;
1196
1197 if (!arg) return -EINVAL;
1198 pciinfo.bus = host->pci_dev->bus->number;
1199 pciinfo.dev_fn = host->pci_dev->devfn;
1200 pciinfo.board_id = host->board_id;
1201 if(copy_to_user((void __user *) arg, &pciinfo,
1202 sizeof( ida_pci_info_struct)))
1203 return -EFAULT;
1204 return(0);
1205 }
1206
1207 default:
1208 return -EINVAL;
1209 }
1210
1211}
1212/*
1213 * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1214 * The command block (io) has already been copied to kernel space for us,
1215 * however, any elements in the sglist need to be copied to kernel space
1216 * or copied back to userspace.
1217 *
1218 * Only root may perform a controller passthru command, however I'm not doing
1219 * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and
1220 * putting a 64M buffer in the sglist is probably a *bad* idea.
1221 */
1222static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
1223{
1224 int ctlr = h->ctlr;
1225 cmdlist_t *c;
1226 void *p = NULL;
1227 unsigned long flags;
1228 int error;
1229
1230 if ((c = cmd_alloc(h, 0)) == NULL)
1231 return -ENOMEM;
1232 c->ctlr = ctlr;
1233 c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1234 c->hdr.size = sizeof(rblk_t) >> 2;
1235 c->size += sizeof(rblk_t);
1236
1237 c->req.hdr.cmd = io->cmd;
1238 c->req.hdr.blk = io->blk;
1239 c->req.hdr.blk_cnt = io->blk_cnt;
1240 c->type = CMD_IOCTL_PEND;
1241
1242 /* Pre submit processing */
1243 switch(io->cmd) {
1244 case PASSTHRU_A:
1245 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1246 if (!p)
1247 {
1248 error = -ENOMEM;
1249 cmd_free(h, c, 0);
1250 return(error);
1251 }
1252 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1253 kfree(p);
1254 cmd_free(h, c, 0);
1255 return -EFAULT;
1256 }
1257 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
1258 sizeof(ida_ioctl_t),
1259 PCI_DMA_BIDIRECTIONAL);
1260 c->req.sg[0].size = io->sg[0].size;
1261 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1262 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1263 c->req.hdr.sg_cnt = 1;
1264 break;
1265 case IDA_READ:
1266 case READ_FLASH_ROM:
1267 case SENSE_CONTROLLER_PERFORMANCE:
1268 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1269 if (!p)
1270 {
1271 error = -ENOMEM;
1272 cmd_free(h, c, 0);
1273 return(error);
1274 }
1275
1276 c->req.sg[0].size = io->sg[0].size;
1277 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1278 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1279 c->req.hdr.sg_cnt = 1;
1280 break;
1281 case IDA_WRITE:
1282 case IDA_WRITE_MEDIA:
1283 case DIAG_PASS_THRU:
1284 case COLLECT_BUFFER:
1285 case WRITE_FLASH_ROM:
1286 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1287 if (!p)
1288 {
1289 error = -ENOMEM;
1290 cmd_free(h, c, 0);
1291 return(error);
1292 }
1293 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1294 kfree(p);
1295 cmd_free(h, c, 0);
1296 return -EFAULT;
1297 }
1298 c->req.sg[0].size = io->sg[0].size;
1299 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1300 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1301 c->req.hdr.sg_cnt = 1;
1302 break;
1303 default:
1304 c->req.sg[0].size = sizeof(io->c);
1305 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
1306 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1307 c->req.hdr.sg_cnt = 1;
1308 }
1309
1310 /* Put the request on the tail of the request queue */
1311 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1312 addQ(&h->reqQ, c);
1313 h->Qdepth++;
1314 start_io(h);
1315 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1316
1317 /* Wait for completion */
1318 while(c->type != CMD_IOCTL_DONE)
1319 schedule();
1320
1321 /* Unmap the DMA */
1322 pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
1323 PCI_DMA_BIDIRECTIONAL);
1324 /* Post submit processing */
1325 switch(io->cmd) {
1326 case PASSTHRU_A:
1327 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1328 sizeof(ida_ioctl_t),
1329 PCI_DMA_BIDIRECTIONAL);
1330 case IDA_READ:
1331 case DIAG_PASS_THRU:
1332 case SENSE_CONTROLLER_PERFORMANCE:
1333 case READ_FLASH_ROM:
1334 if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
1335 kfree(p);
1336 return -EFAULT;
1337 }
1338 /* fall through and free p */
1339 case IDA_WRITE:
1340 case IDA_WRITE_MEDIA:
1341 case COLLECT_BUFFER:
1342 case WRITE_FLASH_ROM:
1343 kfree(p);
1344 break;
1345 default:;
1346 /* Nothing to do */
1347 }
1348
1349 io->rcode = c->req.hdr.rcode;
1350 cmd_free(h, c, 0);
1351 return(0);
1352}
1353
1354/*
1355 * Commands are pre-allocated in a large block. Here we use a simple bitmap
1356 * scheme to suballocte them to the driver. Operations that are not time
1357 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1358 * as the first argument to get a new command.
1359 */
1360static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1361{
1362 cmdlist_t * c;
1363 int i;
1364 dma_addr_t cmd_dhandle;
1365
1366 if (!get_from_pool) {
1367 c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
1368 sizeof(cmdlist_t), &cmd_dhandle);
1369 if(c==NULL)
1370 return NULL;
1371 } else {
1372 do {
1373 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1374 if (i == NR_CMDS)
1375 return NULL;
1376 } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
1377 c = h->cmd_pool + i;
1378 cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1379 h->nr_allocs++;
1380 }
1381
1382 memset(c, 0, sizeof(cmdlist_t));
1383 c->busaddr = cmd_dhandle;
1384 return c;
1385}
1386
1387static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1388{
1389 int i;
1390
1391 if (!got_from_pool) {
1392 pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1393 c->busaddr);
1394 } else {
1395 i = c - h->cmd_pool;
1396 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
1397 h->nr_frees++;
1398 }
1399}
1400
1401/***********************************************************************
1402 name: sendcmd
1403 Send a command to an IDA using the memory mapped FIFO interface
1404 and wait for it to complete.
1405 This routine should only be called at init time.
1406***********************************************************************/
1407static int sendcmd(
1408 __u8 cmd,
1409 int ctlr,
1410 void *buff,
1411 size_t size,
1412 unsigned int blk,
1413 unsigned int blkcnt,
1414 unsigned int log_unit )
1415{
1416 cmdlist_t *c;
1417 int complete;
1418 unsigned long temp;
1419 unsigned long i;
1420 ctlr_info_t *info_p = hba[ctlr];
1421
1422 c = cmd_alloc(info_p, 1);
1423 if(!c)
1424 return IO_ERROR;
1425 c->ctlr = ctlr;
1426 c->hdr.unit = log_unit;
1427 c->hdr.prio = 0;
1428 c->hdr.size = sizeof(rblk_t) >> 2;
1429 c->size += sizeof(rblk_t);
1430
1431 /* The request information. */
1432 c->req.hdr.next = 0;
1433 c->req.hdr.rcode = 0;
1434 c->req.bp = 0;
1435 c->req.hdr.sg_cnt = 1;
1436 c->req.hdr.reserved = 0;
1437
1438 if (size == 0)
1439 c->req.sg[0].size = 512;
1440 else
1441 c->req.sg[0].size = size;
1442
1443 c->req.hdr.blk = blk;
1444 c->req.hdr.blk_cnt = blkcnt;
1445 c->req.hdr.cmd = (unsigned char) cmd;
1446 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
1447 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1448 /*
1449 * Disable interrupt
1450 */
1451 info_p->access.set_intr_mask(info_p, 0);
1452 /* Make sure there is room in the command FIFO */
1453 /* Actually it should be completely empty at this time. */
1454 for (i = 200000; i > 0; i--) {
1455 temp = info_p->access.fifo_full(info_p);
1456 if (temp != 0) {
1457 break;
1458 }
1459 udelay(10);
1460DBG(
1461 printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1462 " waiting!\n", ctlr);
1463);
1464 }
1465 /*
1466 * Send the cmd
1467 */
1468 info_p->access.submit_command(info_p, c);
1469 complete = pollcomplete(ctlr);
1470
1471 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
1472 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1473 if (complete != 1) {
1474 if (complete != c->busaddr) {
1475 printk( KERN_WARNING
1476 "cpqarray ida%d: idaSendPciCmd "
1477 "Invalid command list address returned! (%08lx)\n",
1478 ctlr, (unsigned long)complete);
1479 cmd_free(info_p, c, 1);
1480 return (IO_ERROR);
1481 }
1482 } else {
1483 printk( KERN_WARNING
1484 "cpqarray ida%d: idaSendPciCmd Timeout out, "
1485 "No command list address returned!\n",
1486 ctlr);
1487 cmd_free(info_p, c, 1);
1488 return (IO_ERROR);
1489 }
1490
1491 if (c->req.hdr.rcode & 0x00FE) {
1492 if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1493 printk( KERN_WARNING
1494 "cpqarray ida%d: idaSendPciCmd, error: "
1495 "Controller failed at init time "
1496 "cmd: 0x%x, return code = 0x%x\n",
1497 ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1498
1499 cmd_free(info_p, c, 1);
1500 return (IO_ERROR);
1501 }
1502 }
1503 cmd_free(info_p, c, 1);
1504 return (IO_OK);
1505}
1506
1507/*
1508 * revalidate_allvol is for online array config utilities. After a
1509 * utility reconfigures the drives in the array, it can use this function
1510 * (through an ioctl) to make the driver zap any previous disk structs for
1511 * that controller and get new ones.
1512 *
1513 * Right now I'm using the getgeometry() function to do this, but this
1514 * function should probably be finer grained and allow you to revalidate one
1515 * particualar logical volume (instead of all of them on a particular
1516 * controller).
1517 */
1518static int revalidate_allvol(ctlr_info_t *host)
1519{
1520 int ctlr = host->ctlr;
1521 int i;
1522 unsigned long flags;
1523
1524 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1525 if (host->usage_count > 1) {
1526 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1527 printk(KERN_WARNING "cpqarray: Device busy for volume"
1528 " revalidation (usage=%d)\n", host->usage_count);
1529 return -EBUSY;
1530 }
1531 host->usage_count++;
1532 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1533
1534 /*
1535 * Set the partition and block size structures for all volumes
1536 * on this controller to zero. We will reread all of this data
1537 */
1538 set_capacity(ida_gendisk[ctlr][0], 0);
1539 for (i = 1; i < NWD; i++) {
1540 struct gendisk *disk = ida_gendisk[ctlr][i];
1541 if (disk->flags & GENHD_FL_UP)
1542 del_gendisk(disk);
1543 }
1544 memset(host->drv, 0, sizeof(drv_info_t)*NWD);
1545
1546 /*
1547 * Tell the array controller not to give us any interrupts while
1548 * we check the new geometry. Then turn interrupts back on when
1549 * we're done.
1550 */
1551 host->access.set_intr_mask(host, 0);
1552 getgeometry(ctlr);
1553 host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
1554
1555 for(i=0; i<NWD; i++) {
1556 struct gendisk *disk = ida_gendisk[ctlr][i];
1557 drv_info_t *drv = &host->drv[i];
1558 if (i && !drv->nr_blks)
1559 continue;
1560 blk_queue_hardsect_size(host->queue, drv->blk_size);
1561 set_capacity(disk, drv->nr_blks);
1562 disk->queue = host->queue;
1563 disk->private_data = drv;
1564 if (i)
1565 add_disk(disk);
1566 }
1567
1568 host->usage_count--;
1569 return 0;
1570}
1571
1572static int ida_revalidate(struct gendisk *disk)
1573{
1574 drv_info_t *drv = disk->private_data;
1575 set_capacity(disk, drv->nr_blks);
1576 return 0;
1577}
1578
1579/********************************************************************
1580 name: pollcomplete
1581 Wait polling for a command to complete.
1582 The memory mapped FIFO is polled for the completion.
1583 Used only at init time, interrupts disabled.
1584 ********************************************************************/
1585static int pollcomplete(int ctlr)
1586{
1587 int done;
1588 int i;
1589
1590 /* Wait (up to 2 seconds) for a command to complete */
1591
1592 for (i = 200000; i > 0; i--) {
1593 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1594 if (done == 0) {
1595 udelay(10); /* a short fixed delay */
1596 } else
1597 return (done);
1598 }
1599 /* Invalid address to tell caller we ran out of time */
1600 return 1;
1601}
1602/*****************************************************************
1603 start_fwbk
1604 Starts controller firmwares background processing.
1605 Currently only the Integrated Raid controller needs this done.
1606 If the PCI mem address registers are written to after this,
1607 data corruption may occur
1608*****************************************************************/
1609static void start_fwbk(int ctlr)
1610{
1611 id_ctlr_t *id_ctlr_buf;
1612 int ret_code;
1613
1614 if( (hba[ctlr]->board_id != 0x40400E11)
1615 && (hba[ctlr]->board_id != 0x40480E11) )
1616
1617 /* Not a Integrated Raid, so there is nothing for us to do */
1618 return;
1619 printk(KERN_DEBUG "cpqarray: Starting firmware's background"
1620 " processing\n");
1621 /* Command does not return anything, but idasend command needs a
1622 buffer */
1623 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1624 if(id_ctlr_buf==NULL)
1625 {
1626 printk(KERN_WARNING "cpqarray: Out of memory. "
1627 "Unable to start background processing.\n");
1628 return;
1629 }
1630 ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
1631 id_ctlr_buf, 0, 0, 0, 0);
1632 if(ret_code != IO_OK)
1633 printk(KERN_WARNING "cpqarray: Unable to start"
1634 " background processing\n");
1635
1636 kfree(id_ctlr_buf);
1637}
1638/*****************************************************************
1639 getgeometry
1640 Get ida logical volume geometry from the controller
1641 This is a large bit of code which once existed in two flavors,
1642 It is used only at init time.
1643*****************************************************************/
1644static void getgeometry(int ctlr)
1645{
1646 id_log_drv_t *id_ldrive;
1647 id_ctlr_t *id_ctlr_buf;
1648 sense_log_drv_stat_t *id_lstatus_buf;
1649 config_t *sense_config_buf;
1650 unsigned int log_unit, log_index;
1651 int ret_code, size;
1652 drv_info_t *drv;
1653 ctlr_info_t *info_p = hba[ctlr];
1654 int i;
1655
1656 info_p->log_drv_map = 0;
1657
1658 id_ldrive = (id_log_drv_t *)kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1659 if(id_ldrive == NULL)
1660 {
1661 printk( KERN_ERR "cpqarray: out of memory.\n");
1662 return;
1663 }
1664
1665 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1666 if(id_ctlr_buf == NULL)
1667 {
1668 kfree(id_ldrive);
1669 printk( KERN_ERR "cpqarray: out of memory.\n");
1670 return;
1671 }
1672
1673 id_lstatus_buf = (sense_log_drv_stat_t *)kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1674 if(id_lstatus_buf == NULL)
1675 {
1676 kfree(id_ctlr_buf);
1677 kfree(id_ldrive);
1678 printk( KERN_ERR "cpqarray: out of memory.\n");
1679 return;
1680 }
1681
1682 sense_config_buf = (config_t *)kmalloc(sizeof(config_t), GFP_KERNEL);
1683 if(sense_config_buf == NULL)
1684 {
1685 kfree(id_lstatus_buf);
1686 kfree(id_ctlr_buf);
1687 kfree(id_ldrive);
1688 printk( KERN_ERR "cpqarray: out of memory.\n");
1689 return;
1690 }
1691
1692 memset(id_ldrive, 0, sizeof(id_log_drv_t));
1693 memset(id_ctlr_buf, 0, sizeof(id_ctlr_t));
1694 memset(id_lstatus_buf, 0, sizeof(sense_log_drv_stat_t));
1695 memset(sense_config_buf, 0, sizeof(config_t));
1696
1697 info_p->phys_drives = 0;
1698 info_p->log_drv_map = 0;
1699 info_p->drv_assign_map = 0;
1700 info_p->drv_spare_map = 0;
1701 info_p->mp_failed_drv_map = 0; /* only initialized here */
1702 /* Get controllers info for this logical drive */
1703 ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
1704 if (ret_code == IO_ERROR) {
1705 /*
1706 * If can't get controller info, set the logical drive map to 0,
1707 * so the idastubopen will fail on all logical drives
1708 * on the controller.
1709 */
1710 /* Free all the buffers and return */
1711 printk(KERN_ERR "cpqarray: error sending ID controller\n");
1712 kfree(sense_config_buf);
1713 kfree(id_lstatus_buf);
1714 kfree(id_ctlr_buf);
1715 kfree(id_ldrive);
1716 return;
1717 }
1718
1719 info_p->log_drives = id_ctlr_buf->nr_drvs;
1720 for(i=0;i<4;i++)
1721 info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
1722 info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
1723
1724 printk(" (%s)\n", info_p->product_name);
1725 /*
1726 * Initialize logical drive map to zero
1727 */
1728 log_index = 0;
1729 /*
1730 * Get drive geometry for all logical drives
1731 */
1732 if (id_ctlr_buf->nr_drvs > 16)
1733 printk(KERN_WARNING "cpqarray ida%d: This driver supports "
1734 "16 logical drives per controller.\n. "
1735 " Additional drives will not be "
1736 "detected\n", ctlr);
1737
1738 for (log_unit = 0;
1739 (log_index < id_ctlr_buf->nr_drvs)
1740 && (log_unit < NWD);
1741 log_unit++) {
1742 struct gendisk *disk = ida_gendisk[ctlr][log_unit];
1743
1744 size = sizeof(sense_log_drv_stat_t);
1745
1746 /*
1747 Send "Identify logical drive status" cmd
1748 */
1749 ret_code = sendcmd(SENSE_LOG_DRV_STAT,
1750 ctlr, id_lstatus_buf, size, 0, 0, log_unit);
1751 if (ret_code == IO_ERROR) {
1752 /*
1753 If can't get logical drive status, set
1754 the logical drive map to 0, so the
1755 idastubopen will fail for all logical drives
1756 on the controller.
1757 */
1758 info_p->log_drv_map = 0;
1759 printk( KERN_WARNING
1760 "cpqarray ida%d: idaGetGeometry - Controller"
1761 " failed to report status of logical drive %d\n"
1762 "Access to this controller has been disabled\n",
1763 ctlr, log_unit);
1764 /* Free all the buffers and return */
1765 kfree(sense_config_buf);
1766 kfree(id_lstatus_buf);
1767 kfree(id_ctlr_buf);
1768 kfree(id_ldrive);
1769 return;
1770 }
1771 /*
1772 Make sure the logical drive is configured
1773 */
1774 if (id_lstatus_buf->status != LOG_NOT_CONF) {
1775 ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
1776 sizeof(id_log_drv_t), 0, 0, log_unit);
1777 /*
1778 If error, the bit for this
1779 logical drive won't be set and
1780 idastubopen will return error.
1781 */
1782 if (ret_code != IO_ERROR) {
1783 drv = &info_p->drv[log_unit];
1784 drv->blk_size = id_ldrive->blk_size;
1785 drv->nr_blks = id_ldrive->nr_blks;
1786 drv->cylinders = id_ldrive->drv.cyl;
1787 drv->heads = id_ldrive->drv.heads;
1788 drv->sectors = id_ldrive->drv.sect_per_track;
1789 info_p->log_drv_map |= (1 << log_unit);
1790
1791 printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
1792 ctlr, log_unit, drv->blk_size, drv->nr_blks);
1793 ret_code = sendcmd(SENSE_CONFIG,
1794 ctlr, sense_config_buf,
1795 sizeof(config_t), 0, 0, log_unit);
1796 if (ret_code == IO_ERROR) {
1797 info_p->log_drv_map = 0;
1798 /* Free all the buffers and return */
1799 printk(KERN_ERR "cpqarray: error sending sense config\n");
1800 kfree(sense_config_buf);
1801 kfree(id_lstatus_buf);
1802 kfree(id_ctlr_buf);
1803 kfree(id_ldrive);
1804 return;
1805
1806 }
1807
1808 sprintf(disk->devfs_name, "ida/c%dd%d", ctlr, log_unit);
1809
1810 info_p->phys_drives =
1811 sense_config_buf->ctlr_phys_drv;
1812 info_p->drv_assign_map
1813 |= sense_config_buf->drv_asgn_map;
1814 info_p->drv_assign_map
1815 |= sense_config_buf->spare_asgn_map;
1816 info_p->drv_spare_map
1817 |= sense_config_buf->spare_asgn_map;
1818 } /* end of if no error on id_ldrive */
1819 log_index = log_index + 1;
1820 } /* end of if logical drive configured */
1821 } /* end of for log_unit */
1822 kfree(sense_config_buf);
1823 kfree(id_ldrive);
1824 kfree(id_lstatus_buf);
1825 kfree(id_ctlr_buf);
1826 return;
1827
1828}
1829
1830static void __exit cpqarray_exit(void)
1831{
1832 int i;
1833
1834 pci_unregister_driver(&cpqarray_pci_driver);
1835
1836 /* Double check that all controller entries have been removed */
1837 for(i=0; i<MAX_CTLR; i++) {
1838 if (hba[i] != NULL) {
1839 printk(KERN_WARNING "cpqarray: Removing EISA "
1840 "controller %d\n", i);
1841 cpqarray_remove_one_eisa(i);
1842 }
1843 }
1844
1845 devfs_remove("ida");
1846 remove_proc_entry("cpqarray", proc_root_driver);
1847}
1848
1849module_init(cpqarray_init)
1850module_exit(cpqarray_exit)