blob: 69985b08a270944512ca313f05c9b23fdabd6bad [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 * commsup.c
26 *
27 * Abstract: Contain all routines that are required for FSA host/adapter
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -070028 * communication.
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 *
30 */
31
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/types.h>
35#include <linux/sched.h>
36#include <linux/pci.h>
37#include <linux/spinlock.h>
38#include <linux/slab.h>
39#include <linux/completion.h>
40#include <linux/blkdev.h>
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -070041#include <scsi/scsi_host.h>
Mark Haverkamp131256c2005-09-26 13:04:56 -070042#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <asm/semaphore.h>
44
45#include "aacraid.h"
46
47/**
48 * fib_map_alloc - allocate the fib objects
49 * @dev: Adapter to allocate for
50 *
51 * Allocate and map the shared PCI space for the FIB blocks used to
52 * talk to the Adaptec firmware.
53 */
54
55static int fib_map_alloc(struct aac_dev *dev)
56{
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -070057 dprintk((KERN_INFO
58 "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
59 dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
60 AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
61 if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
62 * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
63 &dev->hw_fib_pa))==NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 return -ENOMEM;
65 return 0;
66}
67
68/**
69 * fib_map_free - free the fib objects
70 * @dev: Adapter to free
71 *
72 * Free the PCI mappings and the memory allocated for FIB blocks
73 * on this adapter.
74 */
75
76void fib_map_free(struct aac_dev *dev)
77{
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -070078 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
81/**
82 * fib_setup - setup the fibs
83 * @dev: Adapter to set up
84 *
85 * Allocate the PCI space for the fibs, map it and then intialise the
86 * fib area, the unmapped fib data and also the free list
87 */
88
89int fib_setup(struct aac_dev * dev)
90{
91 struct fib *fibptr;
92 struct hw_fib *hw_fib_va;
93 dma_addr_t hw_fib_pa;
94 int i;
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -070095
96 while (((i = fib_map_alloc(dev)) == -ENOMEM)
97 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
98 dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
99 dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
100 }
101 if (i<0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 return -ENOMEM;
103
104 hw_fib_va = dev->hw_fib_va;
105 hw_fib_pa = dev->hw_fib_pa;
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -0700106 memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 /*
108 * Initialise the fibs
109 */
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -0700110 for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 {
112 fibptr->dev = dev;
113 fibptr->hw_fib = hw_fib_va;
114 fibptr->data = (void *) fibptr->hw_fib->data;
115 fibptr->next = fibptr+1; /* Forward chain the fibs */
116 init_MUTEX_LOCKED(&fibptr->event_wait);
117 spin_lock_init(&fibptr->event_lock);
Mark Haverkamp 56b58712005-04-27 06:05:51 -0700118 hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -0700119 hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 fibptr->hw_fib_pa = hw_fib_pa;
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -0700121 hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size);
122 hw_fib_pa = hw_fib_pa + dev->max_fib_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 }
124 /*
125 * Add the fib chain to the free list
126 */
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -0700127 dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 /*
129 * Enable this to debug out of queue space
130 */
131 dev->free_fib = &dev->fibs[0];
132 return 0;
133}
134
135/**
136 * fib_alloc - allocate a fib
137 * @dev: Adapter to allocate the fib for
138 *
139 * Allocate a fib from the adapter fib pool. If the pool is empty we
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -0700140 * return NULL.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 */
142
143struct fib * fib_alloc(struct aac_dev *dev)
144{
145 struct fib * fibptr;
146 unsigned long flags;
147 spin_lock_irqsave(&dev->fib_lock, flags);
148 fibptr = dev->free_fib;
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -0700149 if(!fibptr){
150 spin_unlock_irqrestore(&dev->fib_lock, flags);
151 return fibptr;
152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 dev->free_fib = fibptr->next;
154 spin_unlock_irqrestore(&dev->fib_lock, flags);
155 /*
156 * Set the proper node type code and node byte size
157 */
158 fibptr->type = FSAFS_NTC_FIB_CONTEXT;
159 fibptr->size = sizeof(struct fib);
160 /*
161 * Null out fields that depend on being zero at the start of
162 * each I/O
163 */
164 fibptr->hw_fib->header.XferState = 0;
165 fibptr->callback = NULL;
166 fibptr->callback_data = NULL;
167
168 return fibptr;
169}
170
171/**
172 * fib_free - free a fib
173 * @fibptr: fib to free up
174 *
175 * Frees up a fib and places it on the appropriate queue
176 * (either free or timed out)
177 */
178
179void fib_free(struct fib * fibptr)
180{
181 unsigned long flags;
182
183 spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
184 if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
185 aac_config.fib_timeouts++;
186 fibptr->next = fibptr->dev->timeout_fib;
187 fibptr->dev->timeout_fib = fibptr;
188 } else {
189 if (fibptr->hw_fib->header.XferState != 0) {
190 printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
191 (void*)fibptr,
192 le32_to_cpu(fibptr->hw_fib->header.XferState));
193 }
194 fibptr->next = fibptr->dev->free_fib;
195 fibptr->dev->free_fib = fibptr;
196 }
197 spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
198}
199
200/**
201 * fib_init - initialise a fib
202 * @fibptr: The fib to initialize
203 *
204 * Set up the generic fib fields ready for use
205 */
206
207void fib_init(struct fib *fibptr)
208{
209 struct hw_fib *hw_fib = fibptr->hw_fib;
210
211 hw_fib->header.StructType = FIB_MAGIC;
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -0700212 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
213 hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
215 hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -0700216 hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217}
218
219/**
220 * fib_deallocate - deallocate a fib
221 * @fibptr: fib to deallocate
222 *
223 * Will deallocate and return to the free pool the FIB pointed to by the
224 * caller.
225 */
226
Adrian Bunk 48338692005-04-25 19:45:58 -0700227static void fib_dealloc(struct fib * fibptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228{
229 struct hw_fib *hw_fib = fibptr->hw_fib;
230 if(hw_fib->header.StructType != FIB_MAGIC)
231 BUG();
232 hw_fib->header.XferState = 0;
233}
234
235/*
236 * Commuication primitives define and support the queuing method we use to
237 * support host to adapter commuication. All queue accesses happen through
238 * these routines and are the only routines which have a knowledge of the
239 * how these queues are implemented.
240 */
241
242/**
243 * aac_get_entry - get a queue entry
244 * @dev: Adapter
245 * @qid: Queue Number
246 * @entry: Entry return
247 * @index: Index return
248 * @nonotify: notification control
249 *
250 * With a priority the routine returns a queue entry if the queue has free entries. If the queue
251 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
252 * returned.
253 */
254
255static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
256{
257 struct aac_queue * q;
Mark Haverkampbed30de2005-08-03 15:38:51 -0700258 unsigned long idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260 /*
261 * All of the queues wrap when they reach the end, so we check
262 * to see if they have reached the end and if they have we just
263 * set the index back to zero. This is a wrap. You could or off
264 * the high bits in all updates but this is a bit faster I think.
265 */
266
267 q = &dev->queues->queue[qid];
Mark Haverkampbed30de2005-08-03 15:38:51 -0700268
269 idx = *index = le32_to_cpu(*(q->headers.producer));
270 /* Interrupt Moderation, only interrupt for first two entries */
271 if (idx != le32_to_cpu(*(q->headers.consumer))) {
272 if (--idx == 0) {
273 if (qid == AdapHighCmdQueue)
274 idx = ADAP_HIGH_CMD_ENTRIES;
275 else if (qid == AdapNormCmdQueue)
276 idx = ADAP_NORM_CMD_ENTRIES;
277 else if (qid == AdapHighRespQueue)
278 idx = ADAP_HIGH_RESP_ENTRIES;
279 else if (qid == AdapNormRespQueue)
280 idx = ADAP_NORM_RESP_ENTRIES;
281 }
282 if (idx != le32_to_cpu(*(q->headers.consumer)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 *nonotify = 1;
Mark Haverkampbed30de2005-08-03 15:38:51 -0700284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
286 if (qid == AdapHighCmdQueue) {
287 if (*index >= ADAP_HIGH_CMD_ENTRIES)
288 *index = 0;
289 } else if (qid == AdapNormCmdQueue) {
290 if (*index >= ADAP_NORM_CMD_ENTRIES)
291 *index = 0; /* Wrap to front of the Producer Queue. */
292 }
293 else if (qid == AdapHighRespQueue)
294 {
295 if (*index >= ADAP_HIGH_RESP_ENTRIES)
296 *index = 0;
297 }
298 else if (qid == AdapNormRespQueue)
299 {
300 if (*index >= ADAP_NORM_RESP_ENTRIES)
301 *index = 0; /* Wrap to front of the Producer Queue. */
302 }
303 else {
304 printk("aacraid: invalid qid\n");
305 BUG();
306 }
307
308 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -0700309 printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 qid, q->numpending);
311 return 0;
312 } else {
313 *entry = q->base + *index;
314 return 1;
315 }
316}
317
318/**
319 * aac_queue_get - get the next free QE
320 * @dev: Adapter
321 * @index: Returned index
322 * @priority: Priority of fib
323 * @fib: Fib to associate with the queue entry
324 * @wait: Wait if queue full
325 * @fibptr: Driver fib object to go with fib
326 * @nonotify: Don't notify the adapter
327 *
328 * Gets the next free QE off the requested priorty adapter command
329 * queue and associates the Fib with the QE. The QE represented by
330 * index is ready to insert on the queue when this routine returns
331 * success.
332 */
333
334static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
335{
336 struct aac_entry * entry = NULL;
337 int map = 0;
338 struct aac_queue * q = &dev->queues->queue[qid];
339
340 spin_lock_irqsave(q->lock, q->SavedIrql);
341
342 if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue)
343 {
344 /* if no entries wait for some if caller wants to */
345 while (!aac_get_entry(dev, qid, &entry, index, nonotify))
346 {
347 printk(KERN_ERR "GetEntries failed\n");
348 }
349 /*
350 * Setup queue entry with a command, status and fib mapped
351 */
352 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
353 map = 1;
354 }
355 else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
356 {
357 while(!aac_get_entry(dev, qid, &entry, index, nonotify))
358 {
359 /* if no entries wait for some if caller wants to */
360 }
361 /*
362 * Setup queue entry with command, status and fib mapped
363 */
364 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
365 entry->addr = hw_fib->header.SenderFibAddress;
366 /* Restore adapters pointer to the FIB */
367 hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
368 map = 0;
369 }
370 /*
371 * If MapFib is true than we need to map the Fib and put pointers
372 * in the queue entry.
373 */
374 if (map)
375 entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
376 return 0;
377}
378
379
380/**
381 * aac_insert_entry - insert a queue entry
382 * @dev: Adapter
383 * @index: Index of entry to insert
384 * @qid: Queue number
385 * @nonotify: Suppress adapter notification
386 *
387 * Gets the next free QE off the requested priorty adapter command
388 * queue and associates the Fib with the QE. The QE represented by
389 * index is ready to insert on the queue when this routine returns
390 * success.
391 */
392
393static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
394{
395 struct aac_queue * q = &dev->queues->queue[qid];
396
397 if(q == NULL)
398 BUG();
399 *(q->headers.producer) = cpu_to_le32(index + 1);
400 spin_unlock_irqrestore(q->lock, q->SavedIrql);
401
402 if (qid == AdapHighCmdQueue ||
403 qid == AdapNormCmdQueue ||
404 qid == AdapHighRespQueue ||
405 qid == AdapNormRespQueue)
406 {
407 if (!nonotify)
408 aac_adapter_notify(dev, qid);
409 }
410 else
411 printk("Suprise insert!\n");
412 return 0;
413}
414
415/*
416 * Define the highest level of host to adapter communication routines.
417 * These routines will support host to adapter FS commuication. These
418 * routines have no knowledge of the commuication method used. This level
419 * sends and receives FIBs. This level has no knowledge of how these FIBs
420 * get passed back and forth.
421 */
422
423/**
424 * fib_send - send a fib to the adapter
425 * @command: Command to send
426 * @fibptr: The fib
427 * @size: Size of fib data area
428 * @priority: Priority of Fib
429 * @wait: Async/sync select
430 * @reply: True if a reply is wanted
431 * @callback: Called with reply
432 * @callback_data: Passed to callback
433 *
434 * Sends the requested FIB to the adapter and optionally will wait for a
435 * response FIB. If the caller does not wish to wait for a response than
436 * an event to wait on must be supplied. This event will be set when a
437 * response FIB is received from the adapter.
438 */
439
440int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
441{
442 u32 index;
443 u32 qid;
444 struct aac_dev * dev = fibptr->dev;
445 unsigned long nointr = 0;
446 struct hw_fib * hw_fib = fibptr->hw_fib;
447 struct aac_queue * q;
448 unsigned long flags = 0;
449 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
450 return -EBUSY;
451 /*
452 * There are 5 cases with the wait and reponse requested flags.
453 * The only invalid cases are if the caller requests to wait and
454 * does not request a response and if the caller does not want a
455 * response and the Fib is not allocated from pool. If a response
456 * is not requesed the Fib will just be deallocaed by the DPC
457 * routine when the response comes back from the adapter. No
458 * further processing will be done besides deleting the Fib. We
459 * will have a debug mode where the adapter can notify the host
460 * it had a problem and the host can log that fact.
461 */
462 if (wait && !reply) {
463 return -EINVAL;
464 } else if (!wait && reply) {
465 hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
466 FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
467 } else if (!wait && !reply) {
468 hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
469 FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
470 } else if (wait && reply) {
471 hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
472 FIB_COUNTER_INCREMENT(aac_config.NormalSent);
473 }
474 /*
475 * Map the fib into 32bits by using the fib number
476 */
477
478 hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1);
479 hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
480 /*
481 * Set FIB state to indicate where it came from and if we want a
482 * response from the adapter. Also load the command from the
483 * caller.
484 *
485 * Map the hw fib pointer as a 32bit value
486 */
487 hw_fib->header.Command = cpu_to_le16(command);
488 hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
489 fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/
490 /*
491 * Set the size of the Fib we want to send to the adapter
492 */
493 hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
494 if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
495 return -EMSGSIZE;
496 }
497 /*
498 * Get a queue entry connect the FIB to it and send an notify
499 * the adapter a command is ready.
500 */
501 if (priority == FsaHigh) {
502 hw_fib->header.XferState |= cpu_to_le32(HighPriority);
503 qid = AdapHighCmdQueue;
504 } else {
505 hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
506 qid = AdapNormCmdQueue;
507 }
508 q = &dev->queues->queue[qid];
509
510 if(wait)
511 spin_lock_irqsave(&fibptr->event_lock, flags);
512 if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
513 return -EWOULDBLOCK;
514 dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
515 dprintk((KERN_DEBUG "Fib contents:.\n"));
516 dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
517 dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
518 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
519 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
520 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
521 /*
522 * Fill in the Callback and CallbackContext if we are not
523 * going to wait.
524 */
525 if (!wait) {
526 fibptr->callback = callback;
527 fibptr->callback_data = callback_data;
528 }
529 FIB_COUNTER_INCREMENT(aac_config.FibsSent);
530 list_add_tail(&fibptr->queue, &q->pendingq);
531 q->numpending++;
532
533 fibptr->done = 0;
534 fibptr->flags = 0;
535
536 if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
537 return -EWOULDBLOCK;
538 /*
539 * If the caller wanted us to wait for response wait now.
540 */
541
542 if (wait) {
543 spin_unlock_irqrestore(&fibptr->event_lock, flags);
544 down(&fibptr->event_wait);
545 if(fibptr->done == 0)
546 BUG();
547
548 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
549 return -ETIMEDOUT;
550 } else {
551 return 0;
552 }
553 }
554 /*
555 * If the user does not want a response than return success otherwise
556 * return pending
557 */
558 if (reply)
559 return -EINPROGRESS;
560 else
561 return 0;
562}
563
564/**
565 * aac_consumer_get - get the top of the queue
566 * @dev: Adapter
567 * @q: Queue
568 * @entry: Return entry
569 *
570 * Will return a pointer to the entry on the top of the queue requested that
571 * we are a consumer of, and return the address of the queue entry. It does
572 * not change the state of the queue.
573 */
574
575int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
576{
577 u32 index;
578 int status;
579 if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
580 status = 0;
581 } else {
582 /*
583 * The consumer index must be wrapped if we have reached
584 * the end of the queue, else we just use the entry
585 * pointed to by the header index
586 */
587 if (le32_to_cpu(*q->headers.consumer) >= q->entries)
588 index = 0;
589 else
590 index = le32_to_cpu(*q->headers.consumer);
591 *entry = q->base + index;
592 status = 1;
593 }
594 return(status);
595}
596
597/**
598 * aac_consumer_free - free consumer entry
599 * @dev: Adapter
600 * @q: Queue
601 * @qid: Queue ident
602 *
603 * Frees up the current top of the queue we are a consumer of. If the
604 * queue was full notify the producer that the queue is no longer full.
605 */
606
607void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
608{
609 int wasfull = 0;
610 u32 notify;
611
612 if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
613 wasfull = 1;
614
615 if (le32_to_cpu(*q->headers.consumer) >= q->entries)
616 *q->headers.consumer = cpu_to_le32(1);
617 else
618 *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
619
620 if (wasfull) {
621 switch (qid) {
622
623 case HostNormCmdQueue:
624 notify = HostNormCmdNotFull;
625 break;
626 case HostHighCmdQueue:
627 notify = HostHighCmdNotFull;
628 break;
629 case HostNormRespQueue:
630 notify = HostNormRespNotFull;
631 break;
632 case HostHighRespQueue:
633 notify = HostHighRespNotFull;
634 break;
635 default:
636 BUG();
637 return;
638 }
639 aac_adapter_notify(dev, notify);
640 }
641}
642
643/**
644 * fib_adapter_complete - complete adapter issued fib
645 * @fibptr: fib to complete
646 * @size: size of fib
647 *
648 * Will do all necessary work to complete a FIB that was sent from
649 * the adapter.
650 */
651
652int fib_adapter_complete(struct fib * fibptr, unsigned short size)
653{
654 struct hw_fib * hw_fib = fibptr->hw_fib;
655 struct aac_dev * dev = fibptr->dev;
656 unsigned long nointr = 0;
657 if (hw_fib->header.XferState == 0)
658 return 0;
659 /*
660 * If we plan to do anything check the structure type first.
661 */
662 if ( hw_fib->header.StructType != FIB_MAGIC ) {
663 return -EINVAL;
664 }
665 /*
666 * This block handles the case where the adapter had sent us a
667 * command and we have finished processing the command. We
668 * call completeFib when we are done processing the command
669 * and want to send a response back to the adapter. This will
670 * send the completed cdb to the adapter.
671 */
672 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
673 hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
674 if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
675 u32 index;
676 if (size)
677 {
678 size += sizeof(struct aac_fibhdr);
679 if (size > le16_to_cpu(hw_fib->header.SenderSize))
680 return -EMSGSIZE;
681 hw_fib->header.Size = cpu_to_le16(size);
682 }
683 if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
684 return -EWOULDBLOCK;
685 }
686 if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
687 }
Mark Haverkamp 56b58712005-04-27 06:05:51 -0700688 } else if (hw_fib->header.XferState &
689 cpu_to_le32(NormalPriority)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 u32 index;
691
692 if (size) {
693 size += sizeof(struct aac_fibhdr);
694 if (size > le16_to_cpu(hw_fib->header.SenderSize))
695 return -EMSGSIZE;
696 hw_fib->header.Size = cpu_to_le16(size);
697 }
698 if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
699 return -EWOULDBLOCK;
700 if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
701 {
702 }
703 }
704 }
705 else
706 {
707 printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
708 BUG();
709 }
710 return 0;
711}
712
713/**
714 * fib_complete - fib completion handler
715 * @fib: FIB to complete
716 *
717 * Will do all necessary work to complete a FIB.
718 */
719
720int fib_complete(struct fib * fibptr)
721{
722 struct hw_fib * hw_fib = fibptr->hw_fib;
723
724 /*
725 * Check for a fib which has already been completed
726 */
727
728 if (hw_fib->header.XferState == 0)
729 return 0;
730 /*
731 * If we plan to do anything check the structure type first.
732 */
733
734 if (hw_fib->header.StructType != FIB_MAGIC)
735 return -EINVAL;
736 /*
737 * This block completes a cdb which orginated on the host and we
738 * just need to deallocate the cdb or reinit it. At this point the
739 * command is complete that we had sent to the adapter and this
740 * cdb could be reused.
741 */
742 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
743 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
744 {
745 fib_dealloc(fibptr);
746 }
747 else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
748 {
749 /*
750 * This handles the case when the host has aborted the I/O
751 * to the adapter because the adapter is not responding
752 */
753 fib_dealloc(fibptr);
754 } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
755 fib_dealloc(fibptr);
756 } else {
757 BUG();
758 }
759 return 0;
760}
761
762/**
763 * aac_printf - handle printf from firmware
764 * @dev: Adapter
765 * @val: Message info
766 *
767 * Print a message passed to us by the controller firmware on the
768 * Adaptec board
769 */
770
771void aac_printf(struct aac_dev *dev, u32 val)
772{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 char *cp = dev->printfbuf;
Mark Haverkamp 7c00ffa2005-05-16 18:28:42 -0700774 if (dev->printf_enabled)
775 {
776 int length = val & 0xffff;
777 int level = (val >> 16) & 0xffff;
778
779 /*
780 * The size of the printfbuf is set in port.c
781 * There is no variable or define for it
782 */
783 if (length > 255)
784 length = 255;
785 if (cp[length] != 0)
786 cp[length] = 0;
787 if (level == LOG_AAC_HIGH_ERROR)
788 printk(KERN_WARNING "aacraid:%s", cp);
789 else
790 printk(KERN_INFO "aacraid:%s", cp);
791 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 memset(cp, 0, 256);
793}
794
Mark Haverkamp131256c2005-09-26 13:04:56 -0700795
796/**
797 * aac_handle_aif - Handle a message from the firmware
798 * @dev: Which adapter this fib is from
799 * @fibptr: Pointer to fibptr from adapter
800 *
801 * This routine handles a driver notify fib from the adapter and
802 * dispatches it to the appropriate routine for handling.
803 */
804
805static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
806{
807 struct hw_fib * hw_fib = fibptr->hw_fib;
808 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
809 int busy;
810 u32 container;
811 struct scsi_device *device;
812 enum {
813 NOTHING,
814 DELETE,
815 ADD,
816 CHANGE
817 } device_config_needed;
818
819 /* Sniff for container changes */
820
821 if (!dev)
822 return;
823 container = (u32)-1;
824
825 /*
826 * We have set this up to try and minimize the number of
827 * re-configures that take place. As a result of this when
828 * certain AIF's come in we will set a flag waiting for another
829 * type of AIF before setting the re-config flag.
830 */
831 switch (le32_to_cpu(aifcmd->command)) {
832 case AifCmdDriverNotify:
833 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
834 /*
835 * Morph or Expand complete
836 */
837 case AifDenMorphComplete:
838 case AifDenVolumeExtendComplete:
839 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
840 if (container >= dev->maximum_num_containers)
841 break;
842
843 /*
844 * Find the Scsi_Device associated with the SCSI
845 * address. Make sure we have the right array, and if
846 * so set the flag to initiate a new re-config once we
847 * see an AifEnConfigChange AIF come through.
848 */
849
850 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
851 device = scsi_device_lookup(dev->scsi_host_ptr,
852 CONTAINER_TO_CHANNEL(container),
853 CONTAINER_TO_ID(container),
854 CONTAINER_TO_LUN(container));
855 if (device) {
856 dev->fsa_dev[container].config_needed = CHANGE;
857 dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
858 scsi_device_put(device);
859 }
860 }
861 }
862
863 /*
864 * If we are waiting on something and this happens to be
865 * that thing then set the re-configure flag.
866 */
867 if (container != (u32)-1) {
868 if (container >= dev->maximum_num_containers)
869 break;
870 if (dev->fsa_dev[container].config_waiting_on ==
871 le32_to_cpu(*(u32 *)aifcmd->data))
872 dev->fsa_dev[container].config_waiting_on = 0;
873 } else for (container = 0;
874 container < dev->maximum_num_containers; ++container) {
875 if (dev->fsa_dev[container].config_waiting_on ==
876 le32_to_cpu(*(u32 *)aifcmd->data))
877 dev->fsa_dev[container].config_waiting_on = 0;
878 }
879 break;
880
881 case AifCmdEventNotify:
882 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
883 /*
884 * Add an Array.
885 */
886 case AifEnAddContainer:
887 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
888 if (container >= dev->maximum_num_containers)
889 break;
890 dev->fsa_dev[container].config_needed = ADD;
891 dev->fsa_dev[container].config_waiting_on =
892 AifEnConfigChange;
893 break;
894
895 /*
896 * Delete an Array.
897 */
898 case AifEnDeleteContainer:
899 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
900 if (container >= dev->maximum_num_containers)
901 break;
902 dev->fsa_dev[container].config_needed = DELETE;
903 dev->fsa_dev[container].config_waiting_on =
904 AifEnConfigChange;
905 break;
906
907 /*
908 * Container change detected. If we currently are not
909 * waiting on something else, setup to wait on a Config Change.
910 */
911 case AifEnContainerChange:
912 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
913 if (container >= dev->maximum_num_containers)
914 break;
915 if (dev->fsa_dev[container].config_waiting_on)
916 break;
917 dev->fsa_dev[container].config_needed = CHANGE;
918 dev->fsa_dev[container].config_waiting_on =
919 AifEnConfigChange;
920 break;
921
922 case AifEnConfigChange:
923 break;
924
925 }
926
927 /*
928 * If we are waiting on something and this happens to be
929 * that thing then set the re-configure flag.
930 */
931 if (container != (u32)-1) {
932 if (container >= dev->maximum_num_containers)
933 break;
934 if (dev->fsa_dev[container].config_waiting_on ==
935 le32_to_cpu(*(u32 *)aifcmd->data))
936 dev->fsa_dev[container].config_waiting_on = 0;
937 } else for (container = 0;
938 container < dev->maximum_num_containers; ++container) {
939 if (dev->fsa_dev[container].config_waiting_on ==
940 le32_to_cpu(*(u32 *)aifcmd->data))
941 dev->fsa_dev[container].config_waiting_on = 0;
942 }
943 break;
944
945 case AifCmdJobProgress:
946 /*
947 * These are job progress AIF's. When a Clear is being
948 * done on a container it is initially created then hidden from
949 * the OS. When the clear completes we don't get a config
950 * change so we monitor the job status complete on a clear then
951 * wait for a container change.
952 */
953
954 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
955 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
956 || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
957 for (container = 0;
958 container < dev->maximum_num_containers;
959 ++container) {
960 /*
961 * Stomp on all config sequencing for all
962 * containers?
963 */
964 dev->fsa_dev[container].config_waiting_on =
965 AifEnContainerChange;
966 dev->fsa_dev[container].config_needed = ADD;
967 }
968 }
969 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
970 && (((u32 *)aifcmd->data)[6] == 0)
971 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
972 for (container = 0;
973 container < dev->maximum_num_containers;
974 ++container) {
975 /*
976 * Stomp on all config sequencing for all
977 * containers?
978 */
979 dev->fsa_dev[container].config_waiting_on =
980 AifEnContainerChange;
981 dev->fsa_dev[container].config_needed = DELETE;
982 }
983 }
984 break;
985 }
986
987 device_config_needed = NOTHING;
988 for (container = 0; container < dev->maximum_num_containers;
989 ++container) {
990 if ((dev->fsa_dev[container].config_waiting_on == 0)
991 && (dev->fsa_dev[container].config_needed != NOTHING)) {
992 device_config_needed =
993 dev->fsa_dev[container].config_needed;
994 dev->fsa_dev[container].config_needed = NOTHING;
995 break;
996 }
997 }
998 if (device_config_needed == NOTHING)
999 return;
1000
1001 /*
1002 * If we decided that a re-configuration needs to be done,
1003 * schedule it here on the way out the door, please close the door
1004 * behind you.
1005 */
1006
1007 busy = 0;
1008
1009
1010 /*
1011 * Find the Scsi_Device associated with the SCSI address,
1012 * and mark it as changed, invalidating the cache. This deals
1013 * with changes to existing device IDs.
1014 */
1015
1016 if (!dev || !dev->scsi_host_ptr)
1017 return;
1018 /*
1019 * force reload of disk info via probe_container
1020 */
1021 if ((device_config_needed == CHANGE)
1022 && (dev->fsa_dev[container].valid == 1))
1023 dev->fsa_dev[container].valid = 2;
1024 if ((device_config_needed == CHANGE) ||
1025 (device_config_needed == ADD))
1026 probe_container(dev, container);
1027 device = scsi_device_lookup(dev->scsi_host_ptr,
1028 CONTAINER_TO_CHANNEL(container),
1029 CONTAINER_TO_ID(container),
1030 CONTAINER_TO_LUN(container));
1031 if (device) {
1032 switch (device_config_needed) {
1033 case DELETE:
1034 scsi_remove_device(device);
1035 break;
1036 case CHANGE:
1037 if (!dev->fsa_dev[container].valid) {
1038 scsi_remove_device(device);
1039 break;
1040 }
1041 scsi_rescan_device(&device->sdev_gendev);
1042
1043 default:
1044 break;
1045 }
1046 scsi_device_put(device);
1047 }
1048 if (device_config_needed == ADD) {
1049 scsi_add_device(dev->scsi_host_ptr,
1050 CONTAINER_TO_CHANNEL(container),
1051 CONTAINER_TO_ID(container),
1052 CONTAINER_TO_LUN(container));
1053 }
1054
1055}
1056
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057/**
1058 * aac_command_thread - command processing thread
1059 * @dev: Adapter to monitor
1060 *
1061 * Waits on the commandready event in it's queue. When the event gets set
1062 * it will pull FIBs off it's queue. It will continue to pull FIBs off
1063 * until the queue is empty. When the queue is empty it will wait for
1064 * more FIBs.
1065 */
1066
1067int aac_command_thread(struct aac_dev * dev)
1068{
1069 struct hw_fib *hw_fib, *hw_newfib;
1070 struct fib *fib, *newfib;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 struct aac_fib_context *fibctx;
1072 unsigned long flags;
1073 DECLARE_WAITQUEUE(wait, current);
1074
1075 /*
1076 * We can only have one thread per adapter for AIF's.
1077 */
1078 if (dev->aif_thread)
1079 return -EINVAL;
1080 /*
1081 * Set up the name that will appear in 'ps'
1082 * stored in task_struct.comm[16].
1083 */
1084 daemonize("aacraid");
1085 allow_signal(SIGKILL);
1086 /*
1087 * Let the DPC know it has a place to send the AIF's to.
1088 */
1089 dev->aif_thread = 1;
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001090 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 set_current_state(TASK_INTERRUPTIBLE);
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001092 dprintk ((KERN_INFO "aac_command_thread start\n"));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 while(1)
1094 {
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001095 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1096 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 struct list_head *entry;
1098 struct aac_aifcmd * aifcmd;
1099
1100 set_current_state(TASK_RUNNING);
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001101
1102 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 list_del(entry);
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001104
1105 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 fib = list_entry(entry, struct fib, fiblink);
1107 /*
1108 * We will process the FIB here or pass it to a
1109 * worker thread that is TBD. We Really can't
1110 * do anything at this point since we don't have
1111 * anything defined for this thread to do.
1112 */
1113 hw_fib = fib->hw_fib;
1114 memset(fib, 0, sizeof(struct fib));
1115 fib->type = FSAFS_NTC_FIB_CONTEXT;
1116 fib->size = sizeof( struct fib );
1117 fib->hw_fib = hw_fib;
1118 fib->data = hw_fib->data;
1119 fib->dev = dev;
1120 /*
1121 * We only handle AifRequest fibs from the adapter.
1122 */
1123 aifcmd = (struct aac_aifcmd *) hw_fib->data;
1124 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
1125 /* Handle Driver Notify Events */
Mark Haverkamp131256c2005-09-26 13:04:56 -07001126 aac_handle_aif(dev, fib);
Mark Haverkamp 56b58712005-04-27 06:05:51 -07001127 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1128 fib_adapter_complete(fib, (u16)sizeof(u32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 } else {
1130 struct list_head *entry;
1131 /* The u32 here is important and intended. We are using
1132 32bit wrapping time to fit the adapter field */
1133
1134 u32 time_now, time_last;
1135 unsigned long flagv;
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001136 unsigned num;
1137 struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1138 struct fib ** fib_pool, ** fib_p;
Mark Haverkamp131256c2005-09-26 13:04:56 -07001139
1140 /* Sniff events */
1141 if ((aifcmd->command ==
1142 cpu_to_le32(AifCmdEventNotify)) ||
1143 (aifcmd->command ==
1144 cpu_to_le32(AifCmdJobProgress))) {
1145 aac_handle_aif(dev, fib);
1146 }
1147
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 time_now = jiffies/HZ;
1149
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001150 /*
1151 * Warning: no sleep allowed while
1152 * holding spinlock. We take the estimate
1153 * and pre-allocate a set of fibs outside the
1154 * lock.
1155 */
1156 num = le32_to_cpu(dev->init->AdapterFibsSize)
1157 / sizeof(struct hw_fib); /* some extra */
1158 spin_lock_irqsave(&dev->fib_lock, flagv);
1159 entry = dev->fib_list.next;
1160 while (entry != &dev->fib_list) {
1161 entry = entry->next;
1162 ++num;
1163 }
1164 spin_unlock_irqrestore(&dev->fib_lock, flagv);
1165 hw_fib_pool = NULL;
1166 fib_pool = NULL;
1167 if (num
1168 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1169 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1170 hw_fib_p = hw_fib_pool;
1171 fib_p = fib_pool;
1172 while (hw_fib_p < &hw_fib_pool[num]) {
1173 if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1174 --hw_fib_p;
1175 break;
1176 }
1177 if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1178 kfree(*(--hw_fib_p));
1179 break;
1180 }
1181 }
1182 if ((num = hw_fib_p - hw_fib_pool) == 0) {
1183 kfree(fib_pool);
1184 fib_pool = NULL;
1185 kfree(hw_fib_pool);
1186 hw_fib_pool = NULL;
1187 }
1188 } else if (hw_fib_pool) {
1189 kfree(hw_fib_pool);
1190 hw_fib_pool = NULL;
1191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 spin_lock_irqsave(&dev->fib_lock, flagv);
1193 entry = dev->fib_list.next;
1194 /*
1195 * For each Context that is on the
1196 * fibctxList, make a copy of the
1197 * fib, and then set the event to wake up the
1198 * thread that is waiting for it.
1199 */
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001200 hw_fib_p = hw_fib_pool;
1201 fib_p = fib_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 while (entry != &dev->fib_list) {
1203 /*
1204 * Extract the fibctx
1205 */
1206 fibctx = list_entry(entry, struct aac_fib_context, next);
1207 /*
1208 * Check if the queue is getting
1209 * backlogged
1210 */
1211 if (fibctx->count > 20)
1212 {
1213 /*
1214 * It's *not* jiffies folks,
1215 * but jiffies / HZ so do not
1216 * panic ...
1217 */
1218 time_last = fibctx->jiffies;
1219 /*
1220 * Has it been > 2 minutes
1221 * since the last read off
1222 * the queue?
1223 */
1224 if ((time_now - time_last) > 120) {
1225 entry = entry->next;
1226 aac_close_fib_context(dev, fibctx);
1227 continue;
1228 }
1229 }
1230 /*
1231 * Warning: no sleep allowed while
1232 * holding spinlock
1233 */
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001234 if (hw_fib_p < &hw_fib_pool[num]) {
1235 hw_newfib = *hw_fib_p;
1236 *(hw_fib_p++) = NULL;
1237 newfib = *fib_p;
1238 *(fib_p++) = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 /*
1240 * Make the copy of the FIB
1241 */
1242 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1243 memcpy(newfib, fib, sizeof(struct fib));
1244 newfib->hw_fib = hw_newfib;
1245 /*
1246 * Put the FIB onto the
1247 * fibctx's fibs
1248 */
1249 list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1250 fibctx->count++;
1251 /*
1252 * Set the event to wake up the
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001253 * thread that is waiting.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 */
1255 up(&fibctx->wait_sem);
1256 } else {
1257 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 }
1259 entry = entry->next;
1260 }
1261 /*
1262 * Set the status of this FIB
1263 */
Mark Haverkamp 56b58712005-04-27 06:05:51 -07001264 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 fib_adapter_complete(fib, sizeof(u32));
1266 spin_unlock_irqrestore(&dev->fib_lock, flagv);
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001267 /* Free up the remaining resources */
1268 hw_fib_p = hw_fib_pool;
1269 fib_p = fib_pool;
1270 while (hw_fib_p < &hw_fib_pool[num]) {
1271 if (*hw_fib_p)
1272 kfree(*hw_fib_p);
1273 if (*fib_p)
1274 kfree(*fib_p);
1275 ++fib_p;
1276 ++hw_fib_p;
1277 }
1278 if (hw_fib_pool)
1279 kfree(hw_fib_pool);
1280 if (fib_pool)
1281 kfree(fib_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 kfree(fib);
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001284 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 }
1286 /*
1287 * There are no more AIF's
1288 */
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001289 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 schedule();
1291
1292 if(signal_pending(current))
1293 break;
1294 set_current_state(TASK_INTERRUPTIBLE);
1295 }
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001296 if (dev->queues)
1297 remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 dev->aif_thread = 0;
1299 complete_and_exit(&dev->aif_completion, 0);
Mark Haverkamp2f1309802005-09-26 13:02:15 -07001300 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301}