Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Inline routines shareable across OS platforms. |
| 3 | * |
| 4 | * Copyright (c) 1994-2001 Justin T. Gibbs. |
| 5 | * Copyright (c) 2000-2001 Adaptec Inc. |
| 6 | * All rights reserved. |
| 7 | * |
| 8 | * Redistribution and use in source and binary forms, with or without |
| 9 | * modification, are permitted provided that the following conditions |
| 10 | * are met: |
| 11 | * 1. Redistributions of source code must retain the above copyright |
| 12 | * notice, this list of conditions, and the following disclaimer, |
| 13 | * without modification. |
| 14 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer |
| 15 | * substantially similar to the "NO WARRANTY" disclaimer below |
| 16 | * ("Disclaimer") and any redistribution must be conditioned upon |
| 17 | * including a substantially similar Disclaimer requirement for further |
| 18 | * binary redistribution. |
| 19 | * 3. Neither the names of the above-listed copyright holders nor the names |
| 20 | * of any contributors may be used to endorse or promote products derived |
| 21 | * from this software without specific prior written permission. |
| 22 | * |
| 23 | * Alternatively, this software may be distributed under the terms of the |
| 24 | * GNU General Public License ("GPL") version 2 as published by the Free |
| 25 | * Software Foundation. |
| 26 | * |
| 27 | * NO WARRANTY |
| 28 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 29 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 30 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR |
| 31 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 32 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 33 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 34 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 35 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
| 36 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
| 37 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 38 | * POSSIBILITY OF SUCH DAMAGES. |
| 39 | * |
| 40 | * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_inline.h#43 $ |
| 41 | * |
| 42 | * $FreeBSD$ |
| 43 | */ |
| 44 | |
| 45 | #ifndef _AIC7XXX_INLINE_H_ |
| 46 | #define _AIC7XXX_INLINE_H_ |
| 47 | |
| 48 | /************************* Sequencer Execution Control ************************/ |
| 49 | static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc); |
| 50 | static __inline int ahc_is_paused(struct ahc_softc *ahc); |
| 51 | static __inline void ahc_pause(struct ahc_softc *ahc); |
| 52 | static __inline void ahc_unpause(struct ahc_softc *ahc); |
| 53 | |
| 54 | /* |
| 55 | * Work around any chip bugs related to halting sequencer execution. |
| 56 | * On Ultra2 controllers, we must clear the CIOBUS stretch signal by |
| 57 | * reading a register that will set this signal and deassert it. |
| 58 | * Without this workaround, if the chip is paused, by an interrupt or |
| 59 | * manual pause while accessing scb ram, accesses to certain registers |
| 60 | * will hang the system (infinite pci retries). |
| 61 | */ |
| 62 | static __inline void |
| 63 | ahc_pause_bug_fix(struct ahc_softc *ahc) |
| 64 | { |
| 65 | if ((ahc->features & AHC_ULTRA2) != 0) |
| 66 | (void)ahc_inb(ahc, CCSCBCTL); |
| 67 | } |
| 68 | |
| 69 | /* |
| 70 | * Determine whether the sequencer has halted code execution. |
| 71 | * Returns non-zero status if the sequencer is stopped. |
| 72 | */ |
| 73 | static __inline int |
| 74 | ahc_is_paused(struct ahc_softc *ahc) |
| 75 | { |
| 76 | return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); |
| 77 | } |
| 78 | |
| 79 | /* |
| 80 | * Request that the sequencer stop and wait, indefinitely, for it |
| 81 | * to stop. The sequencer will only acknowledge that it is paused |
| 82 | * once it has reached an instruction boundary and PAUSEDIS is |
| 83 | * cleared in the SEQCTL register. The sequencer may use PAUSEDIS |
| 84 | * for critical sections. |
| 85 | */ |
| 86 | static __inline void |
| 87 | ahc_pause(struct ahc_softc *ahc) |
| 88 | { |
| 89 | ahc_outb(ahc, HCNTRL, ahc->pause); |
| 90 | |
| 91 | /* |
| 92 | * Since the sequencer can disable pausing in a critical section, we |
| 93 | * must loop until it actually stops. |
| 94 | */ |
| 95 | while (ahc_is_paused(ahc) == 0) |
| 96 | ; |
| 97 | |
| 98 | ahc_pause_bug_fix(ahc); |
| 99 | } |
| 100 | |
| 101 | /* |
| 102 | * Allow the sequencer to continue program execution. |
| 103 | * We check here to ensure that no additional interrupt |
| 104 | * sources that would cause the sequencer to halt have been |
| 105 | * asserted. If, for example, a SCSI bus reset is detected |
| 106 | * while we are fielding a different, pausing, interrupt type, |
| 107 | * we don't want to release the sequencer before going back |
| 108 | * into our interrupt handler and dealing with this new |
| 109 | * condition. |
| 110 | */ |
| 111 | static __inline void |
| 112 | ahc_unpause(struct ahc_softc *ahc) |
| 113 | { |
| 114 | if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) |
| 115 | ahc_outb(ahc, HCNTRL, ahc->unpause); |
| 116 | } |
| 117 | |
| 118 | /*********************** Untagged Transaction Routines ************************/ |
| 119 | static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc); |
| 120 | static __inline void ahc_release_untagged_queues(struct ahc_softc *ahc); |
| 121 | |
| 122 | /* |
| 123 | * Block our completion routine from starting the next untagged |
| 124 | * transaction for this target or target lun. |
| 125 | */ |
| 126 | static __inline void |
| 127 | ahc_freeze_untagged_queues(struct ahc_softc *ahc) |
| 128 | { |
| 129 | if ((ahc->flags & AHC_SCB_BTT) == 0) |
| 130 | ahc->untagged_queue_lock++; |
| 131 | } |
| 132 | |
| 133 | /* |
| 134 | * Allow the next untagged transaction for this target or target lun |
| 135 | * to be executed. We use a counting semaphore to allow the lock |
| 136 | * to be acquired recursively. Once the count drops to zero, the |
| 137 | * transaction queues will be run. |
| 138 | */ |
| 139 | static __inline void |
| 140 | ahc_release_untagged_queues(struct ahc_softc *ahc) |
| 141 | { |
| 142 | if ((ahc->flags & AHC_SCB_BTT) == 0) { |
| 143 | ahc->untagged_queue_lock--; |
| 144 | if (ahc->untagged_queue_lock == 0) |
| 145 | ahc_run_untagged_queues(ahc); |
| 146 | } |
| 147 | } |
| 148 | |
| 149 | /************************** Memory mapping routines ***************************/ |
| 150 | static __inline struct ahc_dma_seg * |
| 151 | ahc_sg_bus_to_virt(struct scb *scb, |
| 152 | uint32_t sg_busaddr); |
| 153 | static __inline uint32_t |
| 154 | ahc_sg_virt_to_bus(struct scb *scb, |
| 155 | struct ahc_dma_seg *sg); |
| 156 | static __inline uint32_t |
| 157 | ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index); |
| 158 | static __inline void ahc_sync_scb(struct ahc_softc *ahc, |
| 159 | struct scb *scb, int op); |
| 160 | static __inline void ahc_sync_sglist(struct ahc_softc *ahc, |
| 161 | struct scb *scb, int op); |
| 162 | static __inline uint32_t |
| 163 | ahc_targetcmd_offset(struct ahc_softc *ahc, |
| 164 | u_int index); |
| 165 | |
| 166 | static __inline struct ahc_dma_seg * |
| 167 | ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr) |
| 168 | { |
| 169 | int sg_index; |
| 170 | |
| 171 | sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg); |
| 172 | /* sg_list_phys points to entry 1, not 0 */ |
| 173 | sg_index++; |
| 174 | |
| 175 | return (&scb->sg_list[sg_index]); |
| 176 | } |
| 177 | |
| 178 | static __inline uint32_t |
| 179 | ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg) |
| 180 | { |
| 181 | int sg_index; |
| 182 | |
| 183 | /* sg_list_phys points to entry 1, not 0 */ |
| 184 | sg_index = sg - &scb->sg_list[1]; |
| 185 | |
| 186 | return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); |
| 187 | } |
| 188 | |
| 189 | static __inline uint32_t |
| 190 | ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) |
| 191 | { |
| 192 | return (ahc->scb_data->hscb_busaddr |
| 193 | + (sizeof(struct hardware_scb) * index)); |
| 194 | } |
| 195 | |
| 196 | static __inline void |
| 197 | ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op) |
| 198 | { |
| 199 | ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat, |
| 200 | ahc->scb_data->hscb_dmamap, |
| 201 | /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb), |
| 202 | /*len*/sizeof(*scb->hscb), op); |
| 203 | } |
| 204 | |
| 205 | static __inline void |
| 206 | ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op) |
| 207 | { |
| 208 | if (scb->sg_count == 0) |
| 209 | return; |
| 210 | |
| 211 | ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap, |
| 212 | /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr) |
| 213 | * sizeof(struct ahc_dma_seg), |
| 214 | /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op); |
| 215 | } |
| 216 | |
| 217 | static __inline uint32_t |
| 218 | ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index) |
| 219 | { |
| 220 | return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo); |
| 221 | } |
| 222 | |
| 223 | /******************************** Debugging ***********************************/ |
| 224 | static __inline char *ahc_name(struct ahc_softc *ahc); |
| 225 | |
| 226 | static __inline char * |
| 227 | ahc_name(struct ahc_softc *ahc) |
| 228 | { |
| 229 | return (ahc->name); |
| 230 | } |
| 231 | |
| 232 | /*********************** Miscelaneous Support Functions ***********************/ |
| 233 | |
| 234 | static __inline void ahc_update_residual(struct ahc_softc *ahc, |
| 235 | struct scb *scb); |
| 236 | static __inline struct ahc_initiator_tinfo * |
| 237 | ahc_fetch_transinfo(struct ahc_softc *ahc, |
| 238 | char channel, u_int our_id, |
| 239 | u_int remote_id, |
| 240 | struct ahc_tmode_tstate **tstate); |
| 241 | static __inline uint16_t |
| 242 | ahc_inw(struct ahc_softc *ahc, u_int port); |
| 243 | static __inline void ahc_outw(struct ahc_softc *ahc, u_int port, |
| 244 | u_int value); |
| 245 | static __inline uint32_t |
| 246 | ahc_inl(struct ahc_softc *ahc, u_int port); |
| 247 | static __inline void ahc_outl(struct ahc_softc *ahc, u_int port, |
| 248 | uint32_t value); |
| 249 | static __inline uint64_t |
| 250 | ahc_inq(struct ahc_softc *ahc, u_int port); |
| 251 | static __inline void ahc_outq(struct ahc_softc *ahc, u_int port, |
| 252 | uint64_t value); |
| 253 | static __inline struct scb* |
| 254 | ahc_get_scb(struct ahc_softc *ahc); |
| 255 | static __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb); |
| 256 | static __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc, |
| 257 | struct scb *scb); |
| 258 | static __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb); |
| 259 | static __inline struct scsi_sense_data * |
| 260 | ahc_get_sense_buf(struct ahc_softc *ahc, |
| 261 | struct scb *scb); |
| 262 | static __inline uint32_t |
| 263 | ahc_get_sense_bufaddr(struct ahc_softc *ahc, |
| 264 | struct scb *scb); |
| 265 | |
| 266 | /* |
| 267 | * Determine whether the sequencer reported a residual |
| 268 | * for this SCB/transaction. |
| 269 | */ |
| 270 | static __inline void |
| 271 | ahc_update_residual(struct ahc_softc *ahc, struct scb *scb) |
| 272 | { |
| 273 | uint32_t sgptr; |
| 274 | |
| 275 | sgptr = ahc_le32toh(scb->hscb->sgptr); |
| 276 | if ((sgptr & SG_RESID_VALID) != 0) |
| 277 | ahc_calc_residual(ahc, scb); |
| 278 | } |
| 279 | |
| 280 | /* |
| 281 | * Return pointers to the transfer negotiation information |
| 282 | * for the specified our_id/remote_id pair. |
| 283 | */ |
| 284 | static __inline struct ahc_initiator_tinfo * |
| 285 | ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, |
| 286 | u_int remote_id, struct ahc_tmode_tstate **tstate) |
| 287 | { |
| 288 | /* |
| 289 | * Transfer data structures are stored from the perspective |
| 290 | * of the target role. Since the parameters for a connection |
| 291 | * in the initiator role to a given target are the same as |
| 292 | * when the roles are reversed, we pretend we are the target. |
| 293 | */ |
| 294 | if (channel == 'B') |
| 295 | our_id += 8; |
| 296 | *tstate = ahc->enabled_targets[our_id]; |
| 297 | return (&(*tstate)->transinfo[remote_id]); |
| 298 | } |
| 299 | |
| 300 | static __inline uint16_t |
| 301 | ahc_inw(struct ahc_softc *ahc, u_int port) |
| 302 | { |
| 303 | return ((ahc_inb(ahc, port+1) << 8) | ahc_inb(ahc, port)); |
| 304 | } |
| 305 | |
| 306 | static __inline void |
| 307 | ahc_outw(struct ahc_softc *ahc, u_int port, u_int value) |
| 308 | { |
| 309 | ahc_outb(ahc, port, value & 0xFF); |
| 310 | ahc_outb(ahc, port+1, (value >> 8) & 0xFF); |
| 311 | } |
| 312 | |
| 313 | static __inline uint32_t |
| 314 | ahc_inl(struct ahc_softc *ahc, u_int port) |
| 315 | { |
| 316 | return ((ahc_inb(ahc, port)) |
| 317 | | (ahc_inb(ahc, port+1) << 8) |
| 318 | | (ahc_inb(ahc, port+2) << 16) |
| 319 | | (ahc_inb(ahc, port+3) << 24)); |
| 320 | } |
| 321 | |
| 322 | static __inline void |
| 323 | ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value) |
| 324 | { |
| 325 | ahc_outb(ahc, port, (value) & 0xFF); |
| 326 | ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF); |
| 327 | ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF); |
| 328 | ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF); |
| 329 | } |
| 330 | |
| 331 | static __inline uint64_t |
| 332 | ahc_inq(struct ahc_softc *ahc, u_int port) |
| 333 | { |
| 334 | return ((ahc_inb(ahc, port)) |
| 335 | | (ahc_inb(ahc, port+1) << 8) |
| 336 | | (ahc_inb(ahc, port+2) << 16) |
| 337 | | (ahc_inb(ahc, port+3) << 24) |
| 338 | | (((uint64_t)ahc_inb(ahc, port+4)) << 32) |
| 339 | | (((uint64_t)ahc_inb(ahc, port+5)) << 40) |
| 340 | | (((uint64_t)ahc_inb(ahc, port+6)) << 48) |
| 341 | | (((uint64_t)ahc_inb(ahc, port+7)) << 56)); |
| 342 | } |
| 343 | |
| 344 | static __inline void |
| 345 | ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value) |
| 346 | { |
| 347 | ahc_outb(ahc, port, value & 0xFF); |
| 348 | ahc_outb(ahc, port+1, (value >> 8) & 0xFF); |
| 349 | ahc_outb(ahc, port+2, (value >> 16) & 0xFF); |
| 350 | ahc_outb(ahc, port+3, (value >> 24) & 0xFF); |
| 351 | ahc_outb(ahc, port+4, (value >> 32) & 0xFF); |
| 352 | ahc_outb(ahc, port+5, (value >> 40) & 0xFF); |
| 353 | ahc_outb(ahc, port+6, (value >> 48) & 0xFF); |
| 354 | ahc_outb(ahc, port+7, (value >> 56) & 0xFF); |
| 355 | } |
| 356 | |
| 357 | /* |
| 358 | * Get a free scb. If there are none, see if we can allocate a new SCB. |
| 359 | */ |
| 360 | static __inline struct scb * |
| 361 | ahc_get_scb(struct ahc_softc *ahc) |
| 362 | { |
| 363 | struct scb *scb; |
| 364 | |
| 365 | if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) { |
| 366 | ahc_alloc_scbs(ahc); |
| 367 | scb = SLIST_FIRST(&ahc->scb_data->free_scbs); |
| 368 | if (scb == NULL) |
| 369 | return (NULL); |
| 370 | } |
| 371 | SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle); |
| 372 | return (scb); |
| 373 | } |
| 374 | |
| 375 | /* |
| 376 | * Return an SCB resource to the free list. |
| 377 | */ |
| 378 | static __inline void |
| 379 | ahc_free_scb(struct ahc_softc *ahc, struct scb *scb) |
| 380 | { |
| 381 | struct hardware_scb *hscb; |
| 382 | |
| 383 | hscb = scb->hscb; |
| 384 | /* Clean up for the next user */ |
| 385 | ahc->scb_data->scbindex[hscb->tag] = NULL; |
| 386 | scb->flags = SCB_FREE; |
| 387 | hscb->control = 0; |
| 388 | |
| 389 | SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle); |
| 390 | |
| 391 | /* Notify the OSM that a resource is now available. */ |
| 392 | ahc_platform_scb_free(ahc, scb); |
| 393 | } |
| 394 | |
| 395 | static __inline struct scb * |
| 396 | ahc_lookup_scb(struct ahc_softc *ahc, u_int tag) |
| 397 | { |
| 398 | struct scb* scb; |
| 399 | |
| 400 | scb = ahc->scb_data->scbindex[tag]; |
| 401 | if (scb != NULL) |
| 402 | ahc_sync_scb(ahc, scb, |
| 403 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
| 404 | return (scb); |
| 405 | } |
| 406 | |
| 407 | static __inline void |
| 408 | ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb) |
| 409 | { |
| 410 | struct hardware_scb *q_hscb; |
| 411 | u_int saved_tag; |
| 412 | |
| 413 | /* |
| 414 | * Our queuing method is a bit tricky. The card |
| 415 | * knows in advance which HSCB to download, and we |
| 416 | * can't disappoint it. To achieve this, the next |
| 417 | * SCB to download is saved off in ahc->next_queued_scb. |
| 418 | * When we are called to queue "an arbitrary scb", |
| 419 | * we copy the contents of the incoming HSCB to the one |
| 420 | * the sequencer knows about, swap HSCB pointers and |
| 421 | * finally assign the SCB to the tag indexed location |
| 422 | * in the scb_array. This makes sure that we can still |
| 423 | * locate the correct SCB by SCB_TAG. |
| 424 | */ |
| 425 | q_hscb = ahc->next_queued_scb->hscb; |
| 426 | saved_tag = q_hscb->tag; |
| 427 | memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); |
| 428 | if ((scb->flags & SCB_CDB32_PTR) != 0) { |
| 429 | q_hscb->shared_data.cdb_ptr = |
| 430 | ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag) |
| 431 | + offsetof(struct hardware_scb, cdb32)); |
| 432 | } |
| 433 | q_hscb->tag = saved_tag; |
| 434 | q_hscb->next = scb->hscb->tag; |
| 435 | |
| 436 | /* Now swap HSCB pointers. */ |
| 437 | ahc->next_queued_scb->hscb = scb->hscb; |
| 438 | scb->hscb = q_hscb; |
| 439 | |
| 440 | /* Now define the mapping from tag to SCB in the scbindex */ |
| 441 | ahc->scb_data->scbindex[scb->hscb->tag] = scb; |
| 442 | } |
| 443 | |
| 444 | /* |
| 445 | * Tell the sequencer about a new transaction to execute. |
| 446 | */ |
| 447 | static __inline void |
| 448 | ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) |
| 449 | { |
| 450 | ahc_swap_with_next_hscb(ahc, scb); |
| 451 | |
| 452 | if (scb->hscb->tag == SCB_LIST_NULL |
| 453 | || scb->hscb->next == SCB_LIST_NULL) |
| 454 | panic("Attempt to queue invalid SCB tag %x:%x\n", |
| 455 | scb->hscb->tag, scb->hscb->next); |
| 456 | |
| 457 | /* |
| 458 | * Setup data "oddness". |
| 459 | */ |
| 460 | scb->hscb->lun &= LID; |
| 461 | if (ahc_get_transfer_length(scb) & 0x1) |
| 462 | scb->hscb->lun |= SCB_XFERLEN_ODD; |
| 463 | |
| 464 | /* |
| 465 | * Keep a history of SCBs we've downloaded in the qinfifo. |
| 466 | */ |
| 467 | ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; |
| 468 | |
| 469 | /* |
| 470 | * Make sure our data is consistent from the |
| 471 | * perspective of the adapter. |
| 472 | */ |
| 473 | ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
| 474 | |
| 475 | /* Tell the adapter about the newly queued SCB */ |
| 476 | if ((ahc->features & AHC_QUEUE_REGS) != 0) { |
| 477 | ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); |
| 478 | } else { |
| 479 | if ((ahc->features & AHC_AUTOPAUSE) == 0) |
| 480 | ahc_pause(ahc); |
| 481 | ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); |
| 482 | if ((ahc->features & AHC_AUTOPAUSE) == 0) |
| 483 | ahc_unpause(ahc); |
| 484 | } |
| 485 | } |
| 486 | |
| 487 | static __inline struct scsi_sense_data * |
| 488 | ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb) |
| 489 | { |
| 490 | int offset; |
| 491 | |
| 492 | offset = scb - ahc->scb_data->scbarray; |
| 493 | return (&ahc->scb_data->sense[offset]); |
| 494 | } |
| 495 | |
| 496 | static __inline uint32_t |
| 497 | ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb) |
| 498 | { |
| 499 | int offset; |
| 500 | |
| 501 | offset = scb - ahc->scb_data->scbarray; |
| 502 | return (ahc->scb_data->sense_busaddr |
| 503 | + (offset * sizeof(struct scsi_sense_data))); |
| 504 | } |
| 505 | |
| 506 | /************************** Interrupt Processing ******************************/ |
| 507 | static __inline void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op); |
| 508 | static __inline void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op); |
| 509 | static __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc); |
| 510 | static __inline int ahc_intr(struct ahc_softc *ahc); |
| 511 | |
| 512 | static __inline void |
| 513 | ahc_sync_qoutfifo(struct ahc_softc *ahc, int op) |
| 514 | { |
| 515 | ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, |
| 516 | /*offset*/0, /*len*/256, op); |
| 517 | } |
| 518 | |
| 519 | static __inline void |
| 520 | ahc_sync_tqinfifo(struct ahc_softc *ahc, int op) |
| 521 | { |
| 522 | #ifdef AHC_TARGET_MODE |
| 523 | if ((ahc->flags & AHC_TARGETROLE) != 0) { |
| 524 | ahc_dmamap_sync(ahc, ahc->shared_data_dmat, |
| 525 | ahc->shared_data_dmamap, |
| 526 | ahc_targetcmd_offset(ahc, 0), |
| 527 | sizeof(struct target_cmd) * AHC_TMODE_CMDS, |
| 528 | op); |
| 529 | } |
| 530 | #endif |
| 531 | } |
| 532 | |
| 533 | /* |
| 534 | * See if the firmware has posted any completed commands |
| 535 | * into our in-core command complete fifos. |
| 536 | */ |
| 537 | #define AHC_RUN_QOUTFIFO 0x1 |
| 538 | #define AHC_RUN_TQINFIFO 0x2 |
| 539 | static __inline u_int |
| 540 | ahc_check_cmdcmpltqueues(struct ahc_softc *ahc) |
| 541 | { |
| 542 | u_int retval; |
| 543 | |
| 544 | retval = 0; |
| 545 | ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, |
| 546 | /*offset*/ahc->qoutfifonext, /*len*/1, |
| 547 | BUS_DMASYNC_POSTREAD); |
| 548 | if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) |
| 549 | retval |= AHC_RUN_QOUTFIFO; |
| 550 | #ifdef AHC_TARGET_MODE |
| 551 | if ((ahc->flags & AHC_TARGETROLE) != 0 |
| 552 | && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) { |
| 553 | ahc_dmamap_sync(ahc, ahc->shared_data_dmat, |
| 554 | ahc->shared_data_dmamap, |
| 555 | ahc_targetcmd_offset(ahc, ahc->tqinfifofnext), |
| 556 | /*len*/sizeof(struct target_cmd), |
| 557 | BUS_DMASYNC_POSTREAD); |
| 558 | if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0) |
| 559 | retval |= AHC_RUN_TQINFIFO; |
| 560 | } |
| 561 | #endif |
| 562 | return (retval); |
| 563 | } |
| 564 | |
| 565 | /* |
| 566 | * Catch an interrupt from the adapter |
| 567 | */ |
| 568 | static __inline int |
| 569 | ahc_intr(struct ahc_softc *ahc) |
| 570 | { |
| 571 | u_int intstat; |
| 572 | |
| 573 | if ((ahc->pause & INTEN) == 0) { |
| 574 | /* |
| 575 | * Our interrupt is not enabled on the chip |
| 576 | * and may be disabled for re-entrancy reasons, |
| 577 | * so just return. This is likely just a shared |
| 578 | * interrupt. |
| 579 | */ |
| 580 | return (0); |
| 581 | } |
| 582 | /* |
| 583 | * Instead of directly reading the interrupt status register, |
| 584 | * infer the cause of the interrupt by checking our in-core |
| 585 | * completion queues. This avoids a costly PCI bus read in |
| 586 | * most cases. |
| 587 | */ |
| 588 | if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0 |
| 589 | && (ahc_check_cmdcmpltqueues(ahc) != 0)) |
| 590 | intstat = CMDCMPLT; |
| 591 | else { |
| 592 | intstat = ahc_inb(ahc, INTSTAT); |
| 593 | } |
| 594 | |
| 595 | if ((intstat & INT_PEND) == 0) { |
| 596 | #if AHC_PCI_CONFIG > 0 |
| 597 | if (ahc->unsolicited_ints > 500) { |
| 598 | ahc->unsolicited_ints = 0; |
| 599 | if ((ahc->chip & AHC_PCI) != 0 |
| 600 | && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) |
| 601 | ahc->bus_intr(ahc); |
| 602 | } |
| 603 | #endif |
| 604 | ahc->unsolicited_ints++; |
| 605 | return (0); |
| 606 | } |
| 607 | ahc->unsolicited_ints = 0; |
| 608 | |
| 609 | if (intstat & CMDCMPLT) { |
| 610 | ahc_outb(ahc, CLRINT, CLRCMDINT); |
| 611 | |
| 612 | /* |
| 613 | * Ensure that the chip sees that we've cleared |
| 614 | * this interrupt before we walk the output fifo. |
| 615 | * Otherwise, we may, due to posted bus writes, |
| 616 | * clear the interrupt after we finish the scan, |
| 617 | * and after the sequencer has added new entries |
| 618 | * and asserted the interrupt again. |
| 619 | */ |
| 620 | ahc_flush_device_writes(ahc); |
| 621 | ahc_run_qoutfifo(ahc); |
| 622 | #ifdef AHC_TARGET_MODE |
| 623 | if ((ahc->flags & AHC_TARGETROLE) != 0) |
| 624 | ahc_run_tqinfifo(ahc, /*paused*/FALSE); |
| 625 | #endif |
| 626 | } |
| 627 | |
| 628 | /* |
| 629 | * Handle statuses that may invalidate our cached |
| 630 | * copy of INTSTAT separately. |
| 631 | */ |
| 632 | if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) { |
| 633 | /* Hot eject. Do nothing */ |
| 634 | } else if (intstat & BRKADRINT) { |
| 635 | ahc_handle_brkadrint(ahc); |
| 636 | } else if ((intstat & (SEQINT|SCSIINT)) != 0) { |
| 637 | |
| 638 | ahc_pause_bug_fix(ahc); |
| 639 | |
| 640 | if ((intstat & SEQINT) != 0) |
| 641 | ahc_handle_seqint(ahc, intstat); |
| 642 | |
| 643 | if ((intstat & SCSIINT) != 0) |
| 644 | ahc_handle_scsiint(ahc, intstat); |
| 645 | } |
| 646 | return (1); |
| 647 | } |
| 648 | |
| 649 | #endif /* _AIC7XXX_INLINE_H_ */ |