Home | History | Annotate | Line # | Download | only in ic
aic79xx_inline.h revision 1.4
      1 /*	$NetBSD: aic79xx_inline.h,v 1.4 2003/07/26 06:15:57 thorpej Exp $	*/
      2 
      3 /*
      4  * Inline routines shareable across OS platforms.
      5  *
      6  * Copyright (c) 1994-2001 Justin T. Gibbs.
      7  * Copyright (c) 2000-2003 Adaptec Inc.
      8  * All rights reserved.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions, and the following disclaimer,
     15  *    without modification.
     16  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
     17  *    substantially similar to the "NO WARRANTY" disclaimer below
     18  *    ("Disclaimer") and any redistribution must be conditioned upon
     19  *    including a substantially similar Disclaimer requirement for further
     20  *    binary redistribution.
     21  * 3. Neither the names of the above-listed copyright holders nor the names
     22  *    of any contributors may be used to endorse or promote products derived
     23  *    from this software without specific prior written permission.
     24  *
     25  * Alternatively, this software may be distributed under the terms of the
     26  * GNU General Public License ("GPL") version 2 as published by the Free
     27  * Software Foundation.
     28  *
     29  * NO WARRANTY
     30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
     33  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     34  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     38  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
     39  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     40  * POSSIBILITY OF SUCH DAMAGES.
     41  *
     42  * //depot/aic7xxx/aic7xxx/aic79xx_inline.h#44 $
     43  *
     44  * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_inline.h,v 1.8 2003/03/06 23:58:34 gibbs Exp $
     45  */
     46 /*
     47  * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
     48  */
     49 
     50 #ifndef _AIC79XX_INLINE_H_
     51 #define _AIC79XX_INLINE_H_
     52 
     53 /******************************** Debugging ***********************************/
     54 static __inline char *ahd_name(struct ahd_softc *);
     55 
     56 static __inline char *
     57 ahd_name(struct ahd_softc *ahd)
     58 {
     59 	return (ahd->name);
     60 }
     61 
     62 /************************ Sequencer Execution Control *************************/
     63 static __inline void ahd_known_modes(struct ahd_softc *, ahd_mode, ahd_mode);
     64 static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *,
     65     ahd_mode, ahd_mode);
     66 static __inline void ahd_extract_mode_state(struct ahd_softc *,
     67     ahd_mode_state, ahd_mode *, ahd_mode *);
     68 static __inline void ahd_set_modes(struct ahd_softc *, ahd_mode, ahd_mode);
     69 static __inline void ahd_update_modes(struct ahd_softc *);
     70 static __inline void ahd_assert_modes(struct ahd_softc *, ahd_mode,
     71     ahd_mode, const char *, int);
     72 static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *);
     73 static __inline void ahd_restore_modes(struct ahd_softc *, ahd_mode_state);
     74 static __inline int  ahd_is_paused(struct ahd_softc *);
     75 static __inline void ahd_pause(struct ahd_softc *);
     76 static __inline void ahd_unpause(struct ahd_softc *);
     77 
     78 static __inline void
     79 ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
     80 {
     81 	ahd->src_mode = src;
     82 	ahd->dst_mode = dst;
     83 	ahd->saved_src_mode = src;
     84 	ahd->saved_dst_mode = dst;
     85 }
     86 
     87 static __inline ahd_mode_state
     88 ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
     89 {
     90 	return ((src << SRC_MODE_SHIFT) | (dst << DST_MODE_SHIFT));
     91 }
     92 
     93 static __inline void
     94 ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state,
     95 		       ahd_mode *src, ahd_mode *dst)
     96 {
     97 	*src = (state & SRC_MODE) >> SRC_MODE_SHIFT;
     98 	*dst = (state & DST_MODE) >> DST_MODE_SHIFT;
     99 }
    100 
    101 static __inline void
    102 ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
    103 {
    104 	if (ahd->src_mode == src && ahd->dst_mode == dst)
    105 		return;
    106 #ifdef AHD_DEBUG
    107 	if (ahd->src_mode == AHD_MODE_UNKNOWN
    108 	 || ahd->dst_mode == AHD_MODE_UNKNOWN)
    109 		panic("Setting mode prior to saving it.\n");
    110 	if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
    111 		printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
    112 		       ahd_build_mode_state(ahd, src, dst));
    113 #endif
    114 	ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
    115 	ahd->src_mode = src;
    116 	ahd->dst_mode = dst;
    117 }
    118 
    119 static __inline void
    120 ahd_update_modes(struct ahd_softc *ahd)
    121 {
    122 	ahd_mode_state mode_ptr;
    123 	ahd_mode src;
    124 	ahd_mode dst;
    125 
    126 	mode_ptr = ahd_inb(ahd, MODE_PTR);
    127 #ifdef AHD_DEBUG
    128 	if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
    129 		printf("Reading mode 0x%x\n", mode_ptr);
    130 #endif
    131 	ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
    132 	ahd_known_modes(ahd, src, dst);
    133 }
    134 
    135 static __inline void
    136 ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
    137 		 ahd_mode dstmode, const char *file, int line)
    138 {
    139 #ifdef AHD_DEBUG
    140 	if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
    141 	 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
    142 		panic("%s:%s:%d: Mode assertion failed.\n",
    143 		       ahd_name(ahd), file, line);
    144 	}
    145 #endif
    146 }
    147 
    148 static __inline ahd_mode_state
    149 ahd_save_modes(struct ahd_softc *ahd)
    150 {
    151 	if (ahd->src_mode == AHD_MODE_UNKNOWN
    152 	 || ahd->dst_mode == AHD_MODE_UNKNOWN)
    153 		ahd_update_modes(ahd);
    154 
    155 	return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
    156 }
    157 
    158 static __inline void
    159 ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
    160 {
    161 	ahd_mode src;
    162 	ahd_mode dst;
    163 
    164 	ahd_extract_mode_state(ahd, state, &src, &dst);
    165 	ahd_set_modes(ahd, src, dst);
    166 }
    167 
    168 #define AHD_ASSERT_MODES(ahd, source, dest) \
    169 	ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
    170 
    171 /*
    172  * Determine whether the sequencer has halted code execution.
    173  * Returns non-zero status if the sequencer is stopped.
    174  */
    175 static __inline int
    176 ahd_is_paused(struct ahd_softc *ahd)
    177 {
    178 	return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
    179 }
    180 
    181 /*
    182  * Request that the sequencer stop and wait, indefinitely, for it
    183  * to stop.  The sequencer will only acknowledge that it is paused
    184  * once it has reached an instruction boundary and PAUSEDIS is
    185  * cleared in the SEQCTL register.  The sequencer may use PAUSEDIS
    186  * for critical sections.
    187  */
    188 static __inline void
    189 ahd_pause(struct ahd_softc *ahd)
    190 {
    191 	ahd_outb(ahd, HCNTRL, ahd->pause);
    192 
    193 	/*
    194 	 * Since the sequencer can disable pausing in a critical section, we
    195 	 * must loop until it actually stops.
    196 	 */
    197 	while (ahd_is_paused(ahd) == 0)
    198 		;
    199 }
    200 
    201 /*
    202  * Allow the sequencer to continue program execution.
    203  * We check here to ensure that no additional interrupt
    204  * sources that would cause the sequencer to halt have been
    205  * asserted.  If, for example, a SCSI bus reset is detected
    206  * while we are fielding a different, pausing, interrupt type,
    207  * we don't want to release the sequencer before going back
    208  * into our interrupt handler and dealing with this new
    209  * condition.
    210  */
    211 static __inline void
    212 ahd_unpause(struct ahd_softc *ahd)
    213 {
    214 	/*
    215 	 * Automatically restore our modes to those saved
    216 	 * prior to the first change of the mode.
    217 	 */
    218 	if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
    219 	 && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
    220 		if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
    221 			ahd_reset_cmds_pending(ahd);
    222 		ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
    223 	}
    224 
    225 	if ((ahd_inb(ahd, INTSTAT) & ~(SWTMINT | CMDCMPLT)) == 0)
    226 		ahd_outb(ahd, HCNTRL, ahd->unpause);
    227 
    228 	ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
    229 }
    230 
    231 /*********************** Scatter Gather List Handling *************************/
    232 static __inline void	*ahd_sg_setup(struct ahd_softc *, struct scb *,
    233 			    void *, bus_addr_t, bus_size_t, int);
    234 static __inline void	 ahd_setup_scb_common(struct ahd_softc *, struct scb *);
    235 static __inline void	 ahd_setup_data_scb(struct ahd_softc *, struct scb *);
    236 static __inline void	 ahd_setup_noxfer_scb(struct ahd_softc *, struct scb *);
    237 
    238 static __inline void *
    239 ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
    240 	     void *sgptr, bus_addr_t addr, bus_size_t len, int last)
    241 {
    242 	scb->sg_count++;
    243 	if (sizeof(bus_addr_t) > 4
    244 	 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
    245 		struct ahd_dma64_seg *sg;
    246 
    247 		sg = (struct ahd_dma64_seg *)sgptr;
    248 		sg->addr = ahd_htole64(addr);
    249 		sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
    250 		return (sg + 1);
    251 	} else {
    252 		struct ahd_dma_seg *sg;
    253 
    254 		sg = (struct ahd_dma_seg *)sgptr;
    255 		sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
    256 		sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
    257 				    | (last ? AHD_DMA_LAST_SEG : 0));
    258 		return (sg + 1);
    259 	}
    260 }
    261 
    262 static __inline void
    263 ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
    264 {
    265 	/* XXX Handle target mode SCBs. */
    266 	scb->crc_retry_count = 0;
    267 	if ((scb->flags & SCB_PACKETIZED) != 0) {
    268 		/* XXX what about ACA??  It is type 4, but TAG_TYPE == 0x3. */
    269 		scb->hscb->task_attribute= scb->hscb->control & SCB_TAG_TYPE;
    270 		/*
    271 		 * For Rev A short lun workaround.
    272 		 */
    273 		memset(scb->hscb->pkt_long_lun, 0, sizeof(scb->hscb->pkt_long_lun));
    274 		scb->hscb->pkt_long_lun[6] = scb->hscb->lun;
    275 	}
    276 
    277 	if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
    278 	 || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
    279 		scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
    280 		    ahd_htole32(scb->sense_busaddr);
    281 }
    282 
    283 static __inline void
    284 ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
    285 {
    286 	/*
    287 	 * Copy the first SG into the "current" data ponter area.
    288 	 */
    289 	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
    290 		struct ahd_dma64_seg *sg;
    291 
    292 		sg = (struct ahd_dma64_seg *)scb->sg_list;
    293 		scb->hscb->dataptr = sg->addr;
    294 		scb->hscb->datacnt = sg->len;
    295 	} else {
    296 		struct ahd_dma_seg *sg;
    297 
    298 		sg = (struct ahd_dma_seg *)scb->sg_list;
    299 		scb->hscb->dataptr = sg->addr;
    300 		if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
    301 			uint64_t high_addr;
    302 
    303 			high_addr = ahd_le32toh(sg->len) & 0x7F000000;
    304 			scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
    305 		}
    306 		scb->hscb->datacnt = sg->len;
    307 	}
    308 	/*
    309 	 * Note where to find the SG entries in bus space.
    310 	 * We also set the full residual flag which the
    311 	 * sequencer will clear as soon as a data transfer
    312 	 * occurs.
    313 	 */
    314 	scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
    315 }
    316 
    317 static __inline void
    318 ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
    319 {
    320 	scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
    321 	scb->hscb->dataptr = 0;
    322 	scb->hscb->datacnt = 0;
    323 }
    324 
    325 /************************** Memory mapping routines ***************************/
    326 static __inline size_t	ahd_sg_size(struct ahd_softc *);
    327 static __inline void *
    328 			ahd_sg_bus_to_virt(struct ahd_softc *, struct scb *,
    329 			    uint32_t);
    330 static __inline uint32_t
    331 			ahd_sg_virt_to_bus(struct ahd_softc *, struct scb *,
    332 			    void *);
    333 static __inline void	ahd_sync_scb(struct ahd_softc *, struct scb *, int);
    334 static __inline void	ahd_sync_sglist(struct ahd_softc *, struct scb *, int);
    335 static __inline void	ahd_sync_sense(struct ahd_softc *, struct scb *, int);
    336 static __inline uint32_t
    337 			ahd_targetcmd_offset(struct ahd_softc *, u_int);
    338 
    339 static __inline size_t
    340 ahd_sg_size(struct ahd_softc *ahd)
    341 {
    342 	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
    343 		return (sizeof(struct ahd_dma64_seg));
    344 	return (sizeof(struct ahd_dma_seg));
    345 }
    346 
    347 static __inline void *
    348 ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
    349 {
    350 	bus_addr_t sg_offset;
    351 
    352 	/* sg_list_phys points to entry 1, not 0 */
    353 	sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
    354 	return ((uint8_t *)scb->sg_list + sg_offset);
    355 }
    356 
    357 static __inline uint32_t
    358 ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
    359 {
    360 	bus_addr_t sg_offset;
    361 
    362 	/* sg_list_phys points to entry 1, not 0 */
    363 	sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
    364 		  - ahd_sg_size(ahd);
    365 
    366 	return (scb->sg_list_busaddr + sg_offset);
    367 }
    368 
    369 static __inline void
    370 ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
    371 {
    372 	ahd_dmamap_sync(ahd, ahd->parent_dmat, scb->hscb_map->dmamap,
    373 			/*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
    374 			/*len*/sizeof(*scb->hscb), op);
    375 }
    376 
    377 static __inline void
    378 ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
    379 {
    380 	if (scb->sg_count == 0)
    381 		return;
    382 
    383 	ahd_dmamap_sync(ahd, ahd->parent_dmat, scb->sg_map->dmamap,
    384 			/*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
    385 			/*len*/ahd_sg_size(ahd) * scb->sg_count, op);
    386 }
    387 
    388 static __inline void
    389 ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
    390 {
    391 	ahd_dmamap_sync(ahd, ahd->parent_dmat,
    392 			scb->sense_map->dmamap,
    393 			/*offset*/scb->sense_busaddr,
    394 			/*len*/AHD_SENSE_BUFSIZE, op);
    395 }
    396 
    397 static __inline uint32_t
    398 ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
    399 {
    400 	return (((uint8_t *)&ahd->targetcmds[index])
    401 	       - (uint8_t *)ahd->qoutfifo);
    402 }
    403 
    404 /*********************** Miscelaneous Support Functions ***********************/
    405 static __inline void	ahd_complete_scb(struct ahd_softc *, struct scb *);
    406 static __inline void	ahd_update_residual(struct ahd_softc *, struct scb *);
    407 static __inline struct ahd_initiator_tinfo *
    408 			ahd_fetch_transinfo(struct ahd_softc *, char, u_int,
    409 			    u_int, struct ahd_tmode_tstate **);
    410 static __inline uint16_t
    411 			ahd_inw(struct ahd_softc *, u_int);
    412 static __inline void	ahd_outw(struct ahd_softc *, u_int, u_int);
    413 static __inline uint32_t
    414 			ahd_inl(struct ahd_softc *, u_int);
    415 static __inline void	ahd_outl(struct ahd_softc *, u_int, uint32_t);
    416 static __inline uint64_t
    417 			ahd_inq(struct ahd_softc *, u_int);
    418 static __inline void	ahd_outq(struct ahd_softc *, u_int, uint64_t);
    419 static __inline u_int	ahd_get_scbptr(struct ahd_softc *);
    420 static __inline void	ahd_set_scbptr(struct ahd_softc *, u_int);
    421 static __inline u_int	ahd_get_hnscb_qoff(struct ahd_softc *);
    422 static __inline void	ahd_set_hnscb_qoff(struct ahd_softc *, u_int);
    423 static __inline u_int	ahd_get_hescb_qoff(struct ahd_softc *);
    424 static __inline void	ahd_set_hescb_qoff(struct ahd_softc *, u_int);
    425 static __inline u_int	ahd_get_snscb_qoff(struct ahd_softc *);
    426 static __inline void	ahd_set_snscb_qoff(struct ahd_softc *, u_int);
    427 static __inline u_int	ahd_get_sescb_qoff(struct ahd_softc *);
    428 static __inline void	ahd_set_sescb_qoff(struct ahd_softc *, u_int);
    429 static __inline u_int	ahd_get_sdscb_qoff(struct ahd_softc *);
    430 static __inline void	ahd_set_sdscb_qoff(struct ahd_softc *, u_int);
    431 static __inline u_int	ahd_inb_scbram(struct ahd_softc *, u_int);
    432 static __inline u_int	ahd_inw_scbram(struct ahd_softc *, u_int);
    433 static __inline uint32_t
    434 			ahd_inl_scbram(struct ahd_softc *, u_int);
    435 static __inline void	ahd_swap_with_next_hscb(struct ahd_softc *,
    436 	struct scb *);
    437 static __inline void	ahd_queue_scb(struct ahd_softc *, struct scb *);
    438 static __inline uint8_t *
    439 			ahd_get_sense_buf(struct ahd_softc *, struct scb *);
    440 static __inline uint32_t
    441 			ahd_get_sense_bufaddr(struct ahd_softc *, struct scb *);
    442 static __inline void	ahd_post_scb(struct ahd_softc *, struct scb *);
    443 
    444 
    445 static __inline void
    446 ahd_post_scb(struct ahd_softc *ahd, struct scb *scb)
    447 {
    448 	uint32_t sgptr;
    449 
    450 	sgptr = ahd_le32toh(scb->hscb->sgptr);
    451 	if ((sgptr & SG_STATUS_VALID) != 0)
    452 		ahd_handle_scb_status(ahd, scb);
    453 	else
    454         	ahd_done(ahd, scb);
    455 }
    456 
    457 static __inline void
    458 ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb)
    459 {
    460 	uint32_t sgptr;
    461 
    462 	sgptr = ahd_le32toh(scb->hscb->sgptr);
    463 	if ((sgptr & SG_STATUS_VALID) != 0)
    464 		ahd_handle_scb_status(ahd, scb);
    465 	else
    466 		ahd_done(ahd, scb);
    467 }
    468 
    469 /*
    470  * Determine whether the sequencer reported a residual
    471  * for this SCB/transaction.
    472  */
    473 static __inline void
    474 ahd_update_residual(struct ahd_softc *ahd, struct scb *scb)
    475 {
    476 	uint32_t sgptr;
    477 
    478 	sgptr = ahd_le32toh(scb->hscb->sgptr);
    479 	if ((sgptr & SG_STATUS_VALID) != 0)
    480 		ahd_calc_residual(ahd, scb);
    481 }
    482 
    483 /*
    484  * Return pointers to the transfer negotiation information
    485  * for the specified our_id/remote_id pair.
    486  */
    487 static __inline struct ahd_initiator_tinfo *
    488 ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
    489 		    u_int remote_id, struct ahd_tmode_tstate **tstate)
    490 {
    491 	/*
    492 	 * Transfer data structures are stored from the perspective
    493 	 * of the target role.  Since the parameters for a connection
    494 	 * in the initiator role to a given target are the same as
    495 	 * when the roles are reversed, we pretend we are the target.
    496 	 */
    497 	if (channel == 'B')
    498 		our_id += 8;
    499 	*tstate = ahd->enabled_targets[our_id];
    500 	return (&(*tstate)->transinfo[remote_id]);
    501 }
    502 
    503 #define AHD_COPY_COL_IDX(dst, src)				\
    504 do {								\
    505 	dst->hscb->scsiid = src->hscb->scsiid;			\
    506 	dst->hscb->lun = src->hscb->lun;			\
    507 } while (0)
    508 
    509 static __inline uint16_t
    510 ahd_inw(struct ahd_softc *ahd, u_int port)
    511 {
    512 	return ((ahd_inb(ahd, port+1) << 8) | ahd_inb(ahd, port));
    513 }
    514 
    515 static __inline void
    516 ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
    517 {
    518 	ahd_outb(ahd, port, value & 0xFF);
    519 	ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
    520 }
    521 
    522 static __inline uint32_t
    523 ahd_inl(struct ahd_softc *ahd, u_int port)
    524 {
    525 	return ((ahd_inb(ahd, port))
    526 	      | (ahd_inb(ahd, port+1) << 8)
    527 	      | (ahd_inb(ahd, port+2) << 16)
    528 	      | (ahd_inb(ahd, port+3) << 24));
    529 }
    530 
    531 static __inline void
    532 ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
    533 {
    534 	ahd_outb(ahd, port, (value) & 0xFF);
    535 	ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
    536 	ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
    537 	ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
    538 }
    539 
    540 static __inline uint64_t
    541 ahd_inq(struct ahd_softc *ahd, u_int port)
    542 {
    543 	return ((ahd_inb(ahd, port))
    544 	      | (ahd_inb(ahd, port+1) << 8)
    545 	      | (ahd_inb(ahd, port+2) << 16)
    546 	      | (ahd_inb(ahd, port+3) << 24)
    547 	      | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
    548 	      | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
    549 	      | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
    550 	      | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
    551 }
    552 
    553 static __inline void
    554 ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
    555 {
    556 	ahd_outb(ahd, port, value & 0xFF);
    557 	ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
    558 	ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
    559 	ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
    560 	ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
    561 	ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
    562 	ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
    563 	ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
    564 }
    565 
    566 static __inline u_int
    567 ahd_get_scbptr(struct ahd_softc *ahd)
    568 {
    569 	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
    570 			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
    571 	return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
    572 }
    573 
    574 static __inline void
    575 ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
    576 {
    577 	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
    578 			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
    579 	ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
    580 	ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
    581 }
    582 
    583 static __inline u_int
    584 ahd_get_hnscb_qoff(struct ahd_softc *ahd)
    585 {
    586 	return (ahd_inw_atomic(ahd, HNSCB_QOFF));
    587 }
    588 
    589 static __inline void
    590 ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
    591 {
    592 	ahd_outw_atomic(ahd, HNSCB_QOFF, value);
    593 }
    594 
    595 static __inline u_int
    596 ahd_get_hescb_qoff(struct ahd_softc *ahd)
    597 {
    598 	return (ahd_inb(ahd, HESCB_QOFF));
    599 }
    600 
    601 static __inline void
    602 ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
    603 {
    604 	ahd_outb(ahd, HESCB_QOFF, value);
    605 }
    606 
    607 static __inline u_int
    608 ahd_get_snscb_qoff(struct ahd_softc *ahd)
    609 {
    610 	u_int oldvalue;
    611 
    612 	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
    613 	oldvalue = ahd_inw(ahd, SNSCB_QOFF);
    614 	ahd_outw(ahd, SNSCB_QOFF, oldvalue);
    615 	return (oldvalue);
    616 }
    617 
    618 static __inline void
    619 ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
    620 {
    621 	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
    622 	ahd_outw(ahd, SNSCB_QOFF, value);
    623 }
    624 
    625 static __inline u_int
    626 ahd_get_sescb_qoff(struct ahd_softc *ahd)
    627 {
    628 	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
    629 	return (ahd_inb(ahd, SESCB_QOFF));
    630 }
    631 
    632 static __inline void
    633 ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
    634 {
    635 	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
    636 	ahd_outb(ahd, SESCB_QOFF, value);
    637 }
    638 
    639 static __inline u_int
    640 ahd_get_sdscb_qoff(struct ahd_softc *ahd)
    641 {
    642 	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
    643 	return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
    644 }
    645 
    646 static __inline void
    647 ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
    648 {
    649 	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
    650 	ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
    651 	ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
    652 }
    653 
    654 static __inline u_int
    655 ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
    656 {
    657 	u_int value;
    658 
    659 	/*
    660 	 * Workaround PCI-X Rev A. hardware bug.
    661 	 * After a host read of SCB memory, the chip
    662 	 * may become confused into thinking prefetch
    663 	 * was required.  This starts the discard timer
    664 	 * running and can cause an unexpected discard
    665 	 * timer interrupt.  The work around is to read
    666 	 * a normal register prior to the exhaustion of
    667 	 * the discard timer.  The mode pointer register
    668 	 * has no side effects and so serves well for
    669 	 * this purpose.
    670 	 *
    671 	 * Razor #528
    672 	 */
    673 	value = ahd_inb(ahd, offset);
    674 	if ((ahd->flags & AHD_PCIX_SCBRAM_RD_BUG) != 0)
    675 		ahd_inb(ahd, MODE_PTR);
    676 	return (value);
    677 }
    678 
    679 static __inline u_int
    680 ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
    681 {
    682 	return (ahd_inb_scbram(ahd, offset)
    683 	      | (ahd_inb_scbram(ahd, offset+1) << 8));
    684 }
    685 
    686 static __inline uint32_t
    687 ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
    688 {
    689 	return (ahd_inb_scbram(ahd, offset)
    690 	      | (ahd_inb_scbram(ahd, offset+1) << 8)
    691 	      | (ahd_inb_scbram(ahd, offset+2) << 16)
    692 	      | (ahd_inb_scbram(ahd, offset+3) << 24));
    693 }
    694 
    695 static __inline struct scb *
    696 ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
    697 {
    698 	struct scb* scb;
    699 
    700 	if (tag >= AHD_SCB_MAX)
    701 		return (NULL);
    702 	scb = ahd->scb_data.scbindex[tag];
    703 	if (scb != NULL)
    704 		ahd_sync_scb(ahd, scb,
    705 			     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    706 	return (scb);
    707 }
    708 
    709 static __inline void
    710 ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
    711 {
    712 	struct hardware_scb *q_hscb;
    713 	struct map_node *q_hscb_map;
    714 	uint32_t saved_hscb_busaddr;
    715 
    716 	/*
    717 	 * Our queuing method is a bit tricky.  The card
    718 	 * knows in advance which HSCB (by address) to download,
    719 	 * and we can't disappoint it.  To achieve this, the next
    720 	 * HSCB to download is saved off in ahd->next_queued_hscb.
    721 	 * When we are called to queue "an arbitrary scb",
    722 	 * we copy the contents of the incoming HSCB to the one
    723 	 * the sequencer knows about, swap HSCB pointers and
    724 	 * finally assign the SCB to the tag indexed location
    725 	 * in the scb_array.  This makes sure that we can still
    726 	 * locate the correct SCB by SCB_TAG.
    727 	 */
    728 	q_hscb = ahd->next_queued_hscb;
    729 	q_hscb_map = ahd->next_queued_hscb_map;
    730 	saved_hscb_busaddr = q_hscb->hscb_busaddr;
    731 	memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
    732 	q_hscb->hscb_busaddr = saved_hscb_busaddr;
    733 	q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
    734 
    735 	/* Now swap HSCB pointers. */
    736 	ahd->next_queued_hscb = scb->hscb;
    737 	ahd->next_queued_hscb_map = scb->hscb_map;
    738 	scb->hscb = q_hscb;
    739 	scb->hscb_map = q_hscb_map;
    740 
    741 	KASSERT((vaddr_t)scb->hscb >= (vaddr_t)scb->hscb_map->vaddr &&
    742 		(vaddr_t)scb->hscb < (vaddr_t)scb->hscb_map->vaddr + PAGE_SIZE);
    743 
    744 	/* Now define the mapping from tag to SCB in the scbindex */
    745 	ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
    746 }
    747 
    748 /*
    749  * Tell the sequencer about a new transaction to execute.
    750  */
    751 static __inline void
    752 ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
    753 {
    754 	ahd_swap_with_next_hscb(ahd, scb);
    755 
    756 	if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
    757 		panic("Attempt to queue invalid SCB tag %x\n",
    758 		      SCB_GET_TAG(scb));
    759 
    760 	/*
    761 	 * Keep a history of SCBs we've downloaded in the qinfifo.
    762 	 */
    763 	ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
    764 	ahd->qinfifonext++;
    765 
    766 	if (scb->sg_count != 0)
    767 		ahd_setup_data_scb(ahd, scb);
    768 	else
    769 		ahd_setup_noxfer_scb(ahd, scb);
    770 	ahd_setup_scb_common(ahd, scb);
    771 
    772 	/*
    773 	 * Make sure our data is consistent from the
    774 	 * perspective of the adapter.
    775 	 */
    776 	ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    777 
    778 #ifdef AHD_DEBUG
    779 	if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
    780 		printf("%s: Queueing SCB 0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
    781 		       ahd_name(ahd),
    782 		       SCB_GET_TAG(scb), scb->hscb->hscb_busaddr,
    783 		       (u_int)((scb->hscb->dataptr >> 32) & 0xFFFFFFFF),
    784 		       (u_int)(scb->hscb->dataptr & 0xFFFFFFFF),
    785 		       scb->hscb->datacnt);
    786 	}
    787 #endif
    788 	/* Tell the adapter about the newly queued SCB */
    789 	ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
    790 }
    791 
    792 static __inline uint8_t *
    793 ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb)
    794 {
    795 	return (scb->sense_data);
    796 }
    797 
    798 static __inline uint32_t
    799 ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb)
    800 {
    801 	return (scb->sense_busaddr);
    802 }
    803 
    804 /************************** Interrupt Processing ******************************/
    805 static __inline void	ahd_sync_qoutfifo(struct ahd_softc *, int);
    806 static __inline void	ahd_sync_tqinfifo(struct ahd_softc *, int);
    807 static __inline u_int	ahd_check_cmdcmpltqueues(struct ahd_softc *);
    808 static __inline int	ahd_intr(void *);
    809 static __inline void	ahd_minphys(struct buf *);
    810 
    811 static __inline void
    812 ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
    813 {
    814 	ahd_dmamap_sync(ahd, ahd->parent_dmat, ahd->shared_data_map.dmamap,
    815 			/*offset*/0, /*len*/AHD_SCB_MAX * sizeof(uint16_t), op);
    816 }
    817 
    818 static __inline void
    819 ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
    820 {
    821 #ifdef AHD_TARGET_MODE
    822 	if ((ahd->flags & AHD_TARGETROLE) != 0) {
    823 		ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/,
    824 				ahd->shared_data_map.dmamap,
    825 				ahd_targetcmd_offset(ahd, 0),
    826 				sizeof(struct target_cmd) * AHD_TMODE_CMDS,
    827 				op);
    828 	}
    829 #endif
    830 }
    831 
    832 /*
    833  * See if the firmware has posted any completed commands
    834  * into our in-core command complete fifos.
    835  */
    836 #define AHD_RUN_QOUTFIFO 0x1
    837 #define AHD_RUN_TQINFIFO 0x2
    838 static __inline u_int
    839 ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
    840 {
    841 	u_int retval;
    842 
    843 	retval = 0;
    844 	ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/, ahd->shared_data_map.dmamap,
    845 			/*offset*/ahd->qoutfifonext, /*len*/2,
    846 			BUS_DMASYNC_POSTREAD);
    847 	if ((ahd->qoutfifo[ahd->qoutfifonext]
    848 	     & QOUTFIFO_ENTRY_VALID_LE) == ahd->qoutfifonext_valid_tag)
    849 		retval |= AHD_RUN_QOUTFIFO;
    850 #ifdef AHD_TARGET_MODE
    851 	if ((ahd->flags & AHD_TARGETROLE) != 0
    852 	 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
    853 		ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/,
    854 				ahd->shared_data_map.dmamap,
    855 				ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
    856 				/*len*/sizeof(struct target_cmd),
    857 				BUS_DMASYNC_POSTREAD);
    858 		if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
    859 			retval |= AHD_RUN_TQINFIFO;
    860 	}
    861 #endif
    862 	return (retval);
    863 }
    864 
    865 /*
    866  * Catch an interrupt from the adapter
    867  */
    868 static __inline int
    869 ahd_intr(void *arg)
    870 {
    871 	struct ahd_softc *ahd = (struct ahd_softc*)arg;
    872 	u_int	intstat;
    873 
    874 	if ((ahd->pause & INTEN) == 0) {
    875 		/*
    876 		 * Our interrupt is not enabled on the chip
    877 		 * and may be disabled for re-entrancy reasons,
    878 		 * so just return.  This is likely just a shared
    879 		 * interrupt.
    880 		 */
    881 		return 0;
    882 	}
    883 
    884 	/*
    885 	 * Instead of directly reading the interrupt status register,
    886 	 * infer the cause of the interrupt by checking our in-core
    887 	 * completion queues.  This avoids a costly PCI bus read in
    888 	 * most cases.
    889 	 */
    890 	if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
    891 	    && (ahd_check_cmdcmpltqueues(ahd) != 0))
    892 		intstat = CMDCMPLT;
    893 	else
    894 		intstat = ahd_inb(ahd, INTSTAT);
    895 
    896 	if (intstat & CMDCMPLT) {
    897 		ahd_outb(ahd, CLRINT, CLRCMDINT);
    898 
    899 		/*
    900 		 * Ensure that the chip sees that we've cleared
    901 		 * this interrupt before we walk the output fifo.
    902 		 * Otherwise, we may, due to posted bus writes,
    903 		 * clear the interrupt after we finish the scan,
    904 		 * and after the sequencer has added new entries
    905 		 * and asserted the interrupt again.
    906 		 */
    907 		if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
    908 			if (ahd_is_paused(ahd)) {
    909 				/*
    910 				 * Potentially lost SEQINT.
    911 				 * If SEQINTCODE is non-zero,
    912 				 * simulate the SEQINT.
    913 				 */
    914 				if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
    915 					intstat |= SEQINT;
    916 			}
    917 		} else {
    918 			ahd_flush_device_writes(ahd);
    919 		}
    920 		scsipi_channel_freeze(&ahd->sc_channel, 1);
    921 		ahd_run_qoutfifo(ahd);
    922 		scsipi_channel_thaw(&ahd->sc_channel, 1);
    923 		ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
    924 		ahd->cmdcmplt_total++;
    925 #ifdef AHD_TARGET_MODE
    926 		if ((ahd->flags & AHD_TARGETROLE) != 0)
    927 			ahd_run_tqinfifo(ahd, /*paused*/FALSE);
    928 #endif
    929 		if (intstat == CMDCMPLT)
    930 			return 1;
    931 	}
    932 
    933 	if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0)
    934 		/* Hot eject */
    935 		return 1;
    936 
    937 	if ((intstat & INT_PEND) == 0)
    938 		return 1;
    939 
    940 	if (intstat & HWERRINT) {
    941 		ahd_handle_hwerrint(ahd);
    942 		return 1;
    943 	}
    944 
    945 	if ((intstat & (PCIINT|SPLTINT)) != 0) {
    946 		ahd->bus_intr(ahd);
    947 		return 1;
    948 	}
    949 
    950 	if ((intstat & (SEQINT)) != 0) {
    951 		ahd_handle_seqint(ahd, intstat);
    952 		return 1;
    953 	}
    954 
    955 	if ((intstat & SCSIINT) != 0) {
    956 		ahd_handle_scsiint(ahd, intstat);
    957 		return 1;
    958 	}
    959 
    960 	return 1;
    961 }
    962 
    963 static __inline void
    964 ahd_minphys(bp)
    965         struct buf *bp;
    966 {
    967 /*
    968  * Even though the card can transfer up to 16megs per command
    969  * we are limited by the number of segments in the DMA segment
    970  * list that we can hold.  The worst case is that all pages are
    971  * discontinuous physically, hense the "page per segment" limit
    972  * enforced here.
    973  */
    974         if (bp->b_bcount > AHD_MAXTRANSFER_SIZE) {
    975                 bp->b_bcount = AHD_MAXTRANSFER_SIZE;
    976         }
    977         minphys(bp);
    978 }
    979 
    980 static __inline u_int32_t scsi_4btoul(u_int8_t *);
    981 
    982 static __inline u_int32_t
    983 scsi_4btoul(u_int8_t *bytes)
    984 {
    985         u_int32_t rv;
    986 
    987         rv = (bytes[0] << 24) |
    988              (bytes[1] << 16) |
    989              (bytes[2] << 8) |
    990              bytes[3];
    991         return (rv);
    992 }
    993 
    994 
    995 #endif  /* _AIC79XX_INLINE_H_ */
    996