1 1.24 andvar /* $NetBSD: aic79xx_inline.h,v 1.24 2021/08/13 20:47:55 andvar Exp $ */ 2 1.1 fvdl 3 1.1 fvdl /* 4 1.1 fvdl * Inline routines shareable across OS platforms. 5 1.1 fvdl * 6 1.1 fvdl * Copyright (c) 1994-2001 Justin T. Gibbs. 7 1.1 fvdl * Copyright (c) 2000-2003 Adaptec Inc. 8 1.1 fvdl * All rights reserved. 9 1.1 fvdl * 10 1.1 fvdl * Redistribution and use in source and binary forms, with or without 11 1.1 fvdl * modification, are permitted provided that the following conditions 12 1.1 fvdl * are met: 13 1.1 fvdl * 1. Redistributions of source code must retain the above copyright 14 1.1 fvdl * notice, this list of conditions, and the following disclaimer, 15 1.1 fvdl * without modification. 16 1.1 fvdl * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 1.1 fvdl * substantially similar to the "NO WARRANTY" disclaimer below 18 1.1 fvdl * ("Disclaimer") and any redistribution must be conditioned upon 19 1.1 fvdl * including a substantially similar Disclaimer requirement for further 20 1.1 fvdl * binary redistribution. 21 1.1 fvdl * 3. Neither the names of the above-listed copyright holders nor the names 22 1.1 fvdl * of any contributors may be used to endorse or promote products derived 23 1.1 fvdl * from this software without specific prior written permission. 24 1.1 fvdl * 25 1.1 fvdl * Alternatively, this software may be distributed under the terms of the 26 1.1 fvdl * GNU General Public License ("GPL") version 2 as published by the Free 27 1.1 fvdl * Software Foundation. 28 1.1 fvdl * 29 1.1 fvdl * NO WARRANTY 30 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 1.1 fvdl * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 1.1 fvdl * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 33 1.1 fvdl * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 1.1 fvdl * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 1.1 fvdl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 1.1 fvdl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 1.1 fvdl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 38 1.1 fvdl * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 39 1.1 fvdl * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 1.1 fvdl * POSSIBILITY OF SUCH DAMAGES. 41 1.1 fvdl * 42 1.8 thorpej * Id: //depot/aic7xxx/aic7xxx/aic79xx_inline.h#51 $ 43 1.1 fvdl * 44 1.8 thorpej * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_inline.h,v 1.12 2003/06/28 04:43:19 gibbs Exp $ 45 1.1 fvdl */ 46 1.1 fvdl /* 47 1.1 fvdl * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003 48 1.1 fvdl */ 49 1.1 fvdl 50 1.1 fvdl #ifndef _AIC79XX_INLINE_H_ 51 1.1 fvdl #define _AIC79XX_INLINE_H_ 52 1.1 fvdl 53 1.1 fvdl /******************************** Debugging ***********************************/ 54 1.16 dyoung static __inline const char *ahd_name(struct ahd_softc *); 55 1.1 fvdl 56 1.16 dyoung static __inline const char * 57 1.1 fvdl ahd_name(struct ahd_softc *ahd) 58 1.1 fvdl { 59 1.1 fvdl return (ahd->name); 60 1.1 fvdl } 61 1.1 fvdl 62 1.1 fvdl /************************ Sequencer Execution Control *************************/ 63 1.13 perry static __inline void ahd_known_modes(struct ahd_softc *, ahd_mode, ahd_mode); 64 1.13 perry static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *, 65 1.3 itojun ahd_mode, ahd_mode); 66 1.13 perry static __inline void ahd_extract_mode_state(struct ahd_softc *, 67 1.3 itojun ahd_mode_state, ahd_mode *, ahd_mode *); 68 1.13 perry static __inline void ahd_set_modes(struct ahd_softc *, ahd_mode, ahd_mode); 69 1.13 perry static __inline void ahd_update_modes(struct ahd_softc *); 70 1.13 perry static __inline void ahd_assert_modes(struct ahd_softc *, ahd_mode, 71 1.3 itojun ahd_mode, const char *, int); 72 1.13 perry static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *); 73 1.13 perry static __inline void ahd_restore_modes(struct ahd_softc *, ahd_mode_state); 74 1.13 perry static __inline int ahd_is_paused(struct ahd_softc *); 75 1.13 perry static __inline void ahd_pause(struct ahd_softc *); 76 1.13 perry static __inline void ahd_unpause(struct ahd_softc *); 77 1.1 fvdl 78 1.13 perry static __inline void 79 1.1 fvdl ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) 80 1.1 fvdl { 81 1.1 fvdl ahd->src_mode = src; 82 1.1 fvdl ahd->dst_mode = dst; 83 1.1 fvdl ahd->saved_src_mode = src; 84 1.1 fvdl ahd->saved_dst_mode = dst; 85 1.1 fvdl } 86 1.1 fvdl 87 1.13 perry static __inline ahd_mode_state 88 1.15 christos ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) 89 1.1 fvdl { 90 1.1 fvdl return ((src << SRC_MODE_SHIFT) | (dst << DST_MODE_SHIFT)); 91 1.1 fvdl } 92 1.1 fvdl 93 1.13 perry static __inline void 94 1.15 christos ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state, 95 1.1 fvdl ahd_mode *src, ahd_mode *dst) 96 1.1 fvdl { 97 1.1 fvdl *src = (state & SRC_MODE) >> SRC_MODE_SHIFT; 98 1.1 fvdl *dst = (state & DST_MODE) >> DST_MODE_SHIFT; 99 1.1 fvdl } 100 1.1 fvdl 101 1.13 perry static __inline void 102 1.1 fvdl ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) 103 1.1 fvdl { 104 1.1 fvdl if (ahd->src_mode == src && ahd->dst_mode == dst) 105 1.1 fvdl return; 106 1.1 fvdl #ifdef AHD_DEBUG 107 1.1 fvdl if (ahd->src_mode == AHD_MODE_UNKNOWN 108 1.1 fvdl || ahd->dst_mode == AHD_MODE_UNKNOWN) 109 1.1 fvdl panic("Setting mode prior to saving it.\n"); 110 1.1 fvdl if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) 111 1.1 fvdl printf("%s: Setting mode 0x%x\n", ahd_name(ahd), 112 1.1 fvdl ahd_build_mode_state(ahd, src, dst)); 113 1.1 fvdl #endif 114 1.1 fvdl ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst)); 115 1.1 fvdl ahd->src_mode = src; 116 1.1 fvdl ahd->dst_mode = dst; 117 1.1 fvdl } 118 1.1 fvdl 119 1.13 perry static __inline void 120 1.1 fvdl ahd_update_modes(struct ahd_softc *ahd) 121 1.1 fvdl { 122 1.1 fvdl ahd_mode_state mode_ptr; 123 1.1 fvdl ahd_mode src; 124 1.1 fvdl ahd_mode dst; 125 1.1 fvdl 126 1.1 fvdl mode_ptr = ahd_inb(ahd, MODE_PTR); 127 1.1 fvdl #ifdef AHD_DEBUG 128 1.1 fvdl if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) 129 1.1 fvdl printf("Reading mode 0x%x\n", mode_ptr); 130 1.1 fvdl #endif 131 1.1 fvdl ahd_extract_mode_state(ahd, mode_ptr, &src, &dst); 132 1.1 fvdl ahd_known_modes(ahd, src, dst); 133 1.1 fvdl } 134 1.1 fvdl 135 1.13 perry static __inline void 136 1.15 christos ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, 137 1.15 christos ahd_mode dstmode, const char *file, int line) 138 1.1 fvdl { 139 1.1 fvdl #ifdef AHD_DEBUG 140 1.1 fvdl if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0 141 1.1 fvdl || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) { 142 1.1 fvdl panic("%s:%s:%d: Mode assertion failed.\n", 143 1.1 fvdl ahd_name(ahd), file, line); 144 1.1 fvdl } 145 1.1 fvdl #endif 146 1.1 fvdl } 147 1.1 fvdl 148 1.13 perry static __inline ahd_mode_state 149 1.1 fvdl ahd_save_modes(struct ahd_softc *ahd) 150 1.1 fvdl { 151 1.1 fvdl if (ahd->src_mode == AHD_MODE_UNKNOWN 152 1.1 fvdl || ahd->dst_mode == AHD_MODE_UNKNOWN) 153 1.1 fvdl ahd_update_modes(ahd); 154 1.1 fvdl 155 1.1 fvdl return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode)); 156 1.1 fvdl } 157 1.1 fvdl 158 1.13 perry static __inline void 159 1.1 fvdl ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state) 160 1.1 fvdl { 161 1.1 fvdl ahd_mode src; 162 1.1 fvdl ahd_mode dst; 163 1.1 fvdl 164 1.1 fvdl ahd_extract_mode_state(ahd, state, &src, &dst); 165 1.1 fvdl ahd_set_modes(ahd, src, dst); 166 1.1 fvdl } 167 1.1 fvdl 168 1.1 fvdl #define AHD_ASSERT_MODES(ahd, source, dest) \ 169 1.1 fvdl ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__); 170 1.1 fvdl 171 1.1 fvdl /* 172 1.1 fvdl * Determine whether the sequencer has halted code execution. 173 1.1 fvdl * Returns non-zero status if the sequencer is stopped. 174 1.1 fvdl */ 175 1.13 perry static __inline int 176 1.1 fvdl ahd_is_paused(struct ahd_softc *ahd) 177 1.1 fvdl { 178 1.1 fvdl return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0); 179 1.1 fvdl } 180 1.1 fvdl 181 1.1 fvdl /* 182 1.1 fvdl * Request that the sequencer stop and wait, indefinitely, for it 183 1.1 fvdl * to stop. The sequencer will only acknowledge that it is paused 184 1.1 fvdl * once it has reached an instruction boundary and PAUSEDIS is 185 1.1 fvdl * cleared in the SEQCTL register. The sequencer may use PAUSEDIS 186 1.1 fvdl * for critical sections. 187 1.1 fvdl */ 188 1.13 perry static __inline void 189 1.1 fvdl ahd_pause(struct ahd_softc *ahd) 190 1.1 fvdl { 191 1.1 fvdl ahd_outb(ahd, HCNTRL, ahd->pause); 192 1.1 fvdl 193 1.1 fvdl /* 194 1.1 fvdl * Since the sequencer can disable pausing in a critical section, we 195 1.1 fvdl * must loop until it actually stops. 196 1.1 fvdl */ 197 1.1 fvdl while (ahd_is_paused(ahd) == 0) 198 1.1 fvdl ; 199 1.1 fvdl } 200 1.1 fvdl 201 1.1 fvdl /* 202 1.1 fvdl * Allow the sequencer to continue program execution. 203 1.1 fvdl * We check here to ensure that no additional interrupt 204 1.1 fvdl * sources that would cause the sequencer to halt have been 205 1.1 fvdl * asserted. If, for example, a SCSI bus reset is detected 206 1.1 fvdl * while we are fielding a different, pausing, interrupt type, 207 1.1 fvdl * we don't want to release the sequencer before going back 208 1.1 fvdl * into our interrupt handler and dealing with this new 209 1.1 fvdl * condition. 210 1.1 fvdl */ 211 1.13 perry static __inline void 212 1.1 fvdl ahd_unpause(struct ahd_softc *ahd) 213 1.1 fvdl { 214 1.1 fvdl /* 215 1.1 fvdl * Automatically restore our modes to those saved 216 1.1 fvdl * prior to the first change of the mode. 217 1.1 fvdl */ 218 1.1 fvdl if (ahd->saved_src_mode != AHD_MODE_UNKNOWN 219 1.1 fvdl && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) { 220 1.1 fvdl if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0) 221 1.1 fvdl ahd_reset_cmds_pending(ahd); 222 1.1 fvdl ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); 223 1.1 fvdl } 224 1.1 fvdl 225 1.5 thorpej if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0) 226 1.1 fvdl ahd_outb(ahd, HCNTRL, ahd->unpause); 227 1.1 fvdl 228 1.1 fvdl ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN); 229 1.1 fvdl } 230 1.1 fvdl 231 1.1 fvdl /*********************** Scatter Gather List Handling *************************/ 232 1.13 perry static __inline void *ahd_sg_setup(struct ahd_softc *, struct scb *, 233 1.3 itojun void *, bus_addr_t, bus_size_t, int); 234 1.13 perry static __inline void ahd_setup_scb_common(struct ahd_softc *, struct scb *); 235 1.13 perry static __inline void ahd_setup_data_scb(struct ahd_softc *, struct scb *); 236 1.13 perry static __inline void ahd_setup_noxfer_scb(struct ahd_softc *, struct scb *); 237 1.1 fvdl 238 1.13 perry static __inline void * 239 1.1 fvdl ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, 240 1.1 fvdl void *sgptr, bus_addr_t addr, bus_size_t len, int last) 241 1.1 fvdl { 242 1.1 fvdl scb->sg_count++; 243 1.1 fvdl if (sizeof(bus_addr_t) > 4 244 1.1 fvdl && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 245 1.1 fvdl struct ahd_dma64_seg *sg; 246 1.1 fvdl 247 1.1 fvdl sg = (struct ahd_dma64_seg *)sgptr; 248 1.1 fvdl sg->addr = ahd_htole64(addr); 249 1.1 fvdl sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0)); 250 1.1 fvdl return (sg + 1); 251 1.1 fvdl } else { 252 1.1 fvdl struct ahd_dma_seg *sg; 253 1.1 fvdl 254 1.1 fvdl sg = (struct ahd_dma_seg *)sgptr; 255 1.1 fvdl sg->addr = ahd_htole32(addr & 0xFFFFFFFF); 256 1.1 fvdl sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000) 257 1.1 fvdl | (last ? AHD_DMA_LAST_SEG : 0)); 258 1.1 fvdl return (sg + 1); 259 1.1 fvdl } 260 1.1 fvdl } 261 1.1 fvdl 262 1.13 perry static __inline void 263 1.15 christos ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb) 264 1.1 fvdl { 265 1.1 fvdl /* XXX Handle target mode SCBs. */ 266 1.1 fvdl scb->crc_retry_count = 0; 267 1.1 fvdl if ((scb->flags & SCB_PACKETIZED) != 0) { 268 1.1 fvdl /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */ 269 1.7 thorpej scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE; 270 1.7 thorpej } else { 271 1.7 thorpej if (ahd_get_transfer_length(scb) & 0x01) 272 1.7 thorpej scb->hscb->task_attribute = SCB_XFERLEN_ODD; 273 1.7 thorpej else 274 1.7 thorpej scb->hscb->task_attribute = 0; 275 1.1 fvdl } 276 1.1 fvdl 277 1.1 fvdl if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR 278 1.1 fvdl || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0) 279 1.1 fvdl scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr = 280 1.1 fvdl ahd_htole32(scb->sense_busaddr); 281 1.1 fvdl } 282 1.1 fvdl 283 1.13 perry static __inline void 284 1.1 fvdl ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb) 285 1.1 fvdl { 286 1.1 fvdl /* 287 1.24 andvar * Copy the first SG into the "current" data pointer area. 288 1.1 fvdl */ 289 1.1 fvdl if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 290 1.1 fvdl struct ahd_dma64_seg *sg; 291 1.1 fvdl 292 1.1 fvdl sg = (struct ahd_dma64_seg *)scb->sg_list; 293 1.1 fvdl scb->hscb->dataptr = sg->addr; 294 1.1 fvdl scb->hscb->datacnt = sg->len; 295 1.1 fvdl } else { 296 1.1 fvdl struct ahd_dma_seg *sg; 297 1.5 thorpej uint32_t *dataptr_words; 298 1.1 fvdl 299 1.1 fvdl sg = (struct ahd_dma_seg *)scb->sg_list; 300 1.5 thorpej dataptr_words = (uint32_t*)&scb->hscb->dataptr; 301 1.5 thorpej dataptr_words[0] = sg->addr; 302 1.5 thorpej dataptr_words[1] = 0; 303 1.1 fvdl if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { 304 1.1 fvdl uint64_t high_addr; 305 1.1 fvdl 306 1.1 fvdl high_addr = ahd_le32toh(sg->len) & 0x7F000000; 307 1.1 fvdl scb->hscb->dataptr |= ahd_htole64(high_addr << 8); 308 1.1 fvdl } 309 1.1 fvdl scb->hscb->datacnt = sg->len; 310 1.1 fvdl } 311 1.1 fvdl /* 312 1.1 fvdl * Note where to find the SG entries in bus space. 313 1.10 perry * We also set the full residual flag which the 314 1.1 fvdl * sequencer will clear as soon as a data transfer 315 1.1 fvdl * occurs. 316 1.1 fvdl */ 317 1.1 fvdl scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID); 318 1.1 fvdl } 319 1.1 fvdl 320 1.13 perry static __inline void 321 1.15 christos ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb) 322 1.1 fvdl { 323 1.1 fvdl scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL); 324 1.1 fvdl scb->hscb->dataptr = 0; 325 1.1 fvdl scb->hscb->datacnt = 0; 326 1.1 fvdl } 327 1.1 fvdl 328 1.1 fvdl /************************** Memory mapping routines ***************************/ 329 1.13 perry static __inline size_t ahd_sg_size(struct ahd_softc *); 330 1.13 perry static __inline void * 331 1.3 itojun ahd_sg_bus_to_virt(struct ahd_softc *, struct scb *, 332 1.3 itojun uint32_t); 333 1.13 perry static __inline uint32_t 334 1.3 itojun ahd_sg_virt_to_bus(struct ahd_softc *, struct scb *, 335 1.3 itojun void *); 336 1.13 perry static __inline void ahd_sync_scb(struct ahd_softc *, struct scb *, int); 337 1.13 perry static __inline void ahd_sync_sglist(struct ahd_softc *, struct scb *, int); 338 1.13 perry static __inline void ahd_sync_sense(struct ahd_softc *, struct scb *, int); 339 1.13 perry static __inline uint32_t 340 1.3 itojun ahd_targetcmd_offset(struct ahd_softc *, u_int); 341 1.1 fvdl 342 1.13 perry static __inline size_t 343 1.1 fvdl ahd_sg_size(struct ahd_softc *ahd) 344 1.1 fvdl { 345 1.1 fvdl if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) 346 1.1 fvdl return (sizeof(struct ahd_dma64_seg)); 347 1.1 fvdl return (sizeof(struct ahd_dma_seg)); 348 1.1 fvdl } 349 1.1 fvdl 350 1.13 perry static __inline void * 351 1.1 fvdl ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr) 352 1.1 fvdl { 353 1.1 fvdl bus_addr_t sg_offset; 354 1.1 fvdl 355 1.1 fvdl /* sg_list_phys points to entry 1, not 0 */ 356 1.1 fvdl sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd)); 357 1.1 fvdl return ((uint8_t *)scb->sg_list + sg_offset); 358 1.1 fvdl } 359 1.1 fvdl 360 1.13 perry static __inline uint32_t 361 1.1 fvdl ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg) 362 1.1 fvdl { 363 1.1 fvdl bus_addr_t sg_offset; 364 1.1 fvdl 365 1.1 fvdl /* sg_list_phys points to entry 1, not 0 */ 366 1.1 fvdl sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list) 367 1.1 fvdl - ahd_sg_size(ahd); 368 1.1 fvdl 369 1.1 fvdl return (scb->sg_list_busaddr + sg_offset); 370 1.1 fvdl } 371 1.1 fvdl 372 1.13 perry static __inline void 373 1.1 fvdl ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op) 374 1.1 fvdl { 375 1.10 perry ahd_dmamap_sync(ahd, ahd->parent_dmat, scb->hscb_map->dmamap, 376 1.1 fvdl /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr, 377 1.1 fvdl /*len*/sizeof(*scb->hscb), op); 378 1.1 fvdl } 379 1.1 fvdl 380 1.13 perry static __inline void 381 1.1 fvdl ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op) 382 1.1 fvdl { 383 1.1 fvdl if (scb->sg_count == 0) 384 1.1 fvdl return; 385 1.1 fvdl 386 1.1 fvdl ahd_dmamap_sync(ahd, ahd->parent_dmat, scb->sg_map->dmamap, 387 1.1 fvdl /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd), 388 1.1 fvdl /*len*/ahd_sg_size(ahd) * scb->sg_count, op); 389 1.1 fvdl } 390 1.1 fvdl 391 1.13 perry static __inline void 392 1.1 fvdl ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op) 393 1.1 fvdl { 394 1.10 perry ahd_dmamap_sync(ahd, ahd->parent_dmat, 395 1.1 fvdl scb->sense_map->dmamap, 396 1.22 kardel /*offset*/scb->sense_busaddr - scb->sense_map->physaddr, 397 1.1 fvdl /*len*/AHD_SENSE_BUFSIZE, op); 398 1.1 fvdl } 399 1.1 fvdl 400 1.13 perry static __inline uint32_t 401 1.1 fvdl ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index) 402 1.1 fvdl { 403 1.1 fvdl return (((uint8_t *)&ahd->targetcmds[index]) 404 1.1 fvdl - (uint8_t *)ahd->qoutfifo); 405 1.1 fvdl } 406 1.1 fvdl 407 1.9 wiz /*********************** Miscellaneous Support Functions ***********************/ 408 1.13 perry static __inline void ahd_complete_scb(struct ahd_softc *, struct scb *); 409 1.13 perry static __inline void ahd_update_residual(struct ahd_softc *, struct scb *); 410 1.13 perry static __inline struct ahd_initiator_tinfo * 411 1.3 itojun ahd_fetch_transinfo(struct ahd_softc *, char, u_int, 412 1.3 itojun u_int, struct ahd_tmode_tstate **); 413 1.13 perry static __inline uint16_t 414 1.3 itojun ahd_inw(struct ahd_softc *, u_int); 415 1.13 perry static __inline void ahd_outw(struct ahd_softc *, u_int, u_int); 416 1.13 perry static __inline uint32_t 417 1.3 itojun ahd_inl(struct ahd_softc *, u_int); 418 1.13 perry static __inline void ahd_outl(struct ahd_softc *, u_int, uint32_t); 419 1.13 perry static __inline uint64_t 420 1.3 itojun ahd_inq(struct ahd_softc *, u_int); 421 1.13 perry static __inline void ahd_outq(struct ahd_softc *, u_int, uint64_t); 422 1.13 perry static __inline u_int ahd_get_scbptr(struct ahd_softc *); 423 1.13 perry static __inline void ahd_set_scbptr(struct ahd_softc *, u_int); 424 1.13 perry static __inline u_int ahd_get_hnscb_qoff(struct ahd_softc *); 425 1.13 perry static __inline void ahd_set_hnscb_qoff(struct ahd_softc *, u_int); 426 1.13 perry static __inline u_int ahd_get_hescb_qoff(struct ahd_softc *); 427 1.13 perry static __inline void ahd_set_hescb_qoff(struct ahd_softc *, u_int); 428 1.13 perry static __inline u_int ahd_get_snscb_qoff(struct ahd_softc *); 429 1.13 perry static __inline void ahd_set_snscb_qoff(struct ahd_softc *, u_int); 430 1.13 perry static __inline u_int ahd_get_sescb_qoff(struct ahd_softc *); 431 1.13 perry static __inline void ahd_set_sescb_qoff(struct ahd_softc *, u_int); 432 1.13 perry static __inline u_int ahd_get_sdscb_qoff(struct ahd_softc *); 433 1.13 perry static __inline void ahd_set_sdscb_qoff(struct ahd_softc *, u_int); 434 1.13 perry static __inline u_int ahd_inb_scbram(struct ahd_softc *, u_int); 435 1.13 perry static __inline u_int ahd_inw_scbram(struct ahd_softc *, u_int); 436 1.13 perry static __inline uint32_t 437 1.3 itojun ahd_inl_scbram(struct ahd_softc *, u_int); 438 1.13 perry static __inline uint64_t 439 1.8 thorpej ahd_inq_scbram(struct ahd_softc *ahd, u_int offset); 440 1.13 perry static __inline void ahd_swap_with_next_hscb(struct ahd_softc *, 441 1.3 itojun struct scb *); 442 1.13 perry static __inline void ahd_queue_scb(struct ahd_softc *, struct scb *); 443 1.13 perry static __inline uint8_t * 444 1.3 itojun ahd_get_sense_buf(struct ahd_softc *, struct scb *); 445 1.13 perry static __inline uint32_t 446 1.3 itojun ahd_get_sense_bufaddr(struct ahd_softc *, struct scb *); 447 1.13 perry static __inline void ahd_post_scb(struct ahd_softc *, struct scb *); 448 1.1 fvdl 449 1.1 fvdl 450 1.13 perry static __inline void 451 1.1 fvdl ahd_post_scb(struct ahd_softc *ahd, struct scb *scb) 452 1.1 fvdl { 453 1.1 fvdl uint32_t sgptr; 454 1.1 fvdl 455 1.1 fvdl sgptr = ahd_le32toh(scb->hscb->sgptr); 456 1.1 fvdl if ((sgptr & SG_STATUS_VALID) != 0) 457 1.1 fvdl ahd_handle_scb_status(ahd, scb); 458 1.10 perry else 459 1.18 tsutsui ahd_done(ahd, scb); 460 1.1 fvdl } 461 1.1 fvdl 462 1.13 perry static __inline void 463 1.1 fvdl ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb) 464 1.1 fvdl { 465 1.1 fvdl uint32_t sgptr; 466 1.1 fvdl 467 1.1 fvdl sgptr = ahd_le32toh(scb->hscb->sgptr); 468 1.1 fvdl if ((sgptr & SG_STATUS_VALID) != 0) 469 1.1 fvdl ahd_handle_scb_status(ahd, scb); 470 1.10 perry else 471 1.1 fvdl ahd_done(ahd, scb); 472 1.1 fvdl } 473 1.1 fvdl 474 1.1 fvdl /* 475 1.1 fvdl * Determine whether the sequencer reported a residual 476 1.1 fvdl * for this SCB/transaction. 477 1.1 fvdl */ 478 1.13 perry static __inline void 479 1.1 fvdl ahd_update_residual(struct ahd_softc *ahd, struct scb *scb) 480 1.1 fvdl { 481 1.1 fvdl uint32_t sgptr; 482 1.1 fvdl 483 1.1 fvdl sgptr = ahd_le32toh(scb->hscb->sgptr); 484 1.1 fvdl if ((sgptr & SG_STATUS_VALID) != 0) 485 1.1 fvdl ahd_calc_residual(ahd, scb); 486 1.1 fvdl } 487 1.1 fvdl 488 1.1 fvdl /* 489 1.1 fvdl * Return pointers to the transfer negotiation information 490 1.1 fvdl * for the specified our_id/remote_id pair. 491 1.1 fvdl */ 492 1.13 perry static __inline struct ahd_initiator_tinfo * 493 1.1 fvdl ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id, 494 1.1 fvdl u_int remote_id, struct ahd_tmode_tstate **tstate) 495 1.1 fvdl { 496 1.1 fvdl /* 497 1.1 fvdl * Transfer data structures are stored from the perspective 498 1.1 fvdl * of the target role. Since the parameters for a connection 499 1.1 fvdl * in the initiator role to a given target are the same as 500 1.1 fvdl * when the roles are reversed, we pretend we are the target. 501 1.1 fvdl */ 502 1.1 fvdl if (channel == 'B') 503 1.1 fvdl our_id += 8; 504 1.1 fvdl *tstate = ahd->enabled_targets[our_id]; 505 1.1 fvdl return (&(*tstate)->transinfo[remote_id]); 506 1.1 fvdl } 507 1.1 fvdl 508 1.1 fvdl #define AHD_COPY_COL_IDX(dst, src) \ 509 1.1 fvdl do { \ 510 1.1 fvdl dst->hscb->scsiid = src->hscb->scsiid; \ 511 1.1 fvdl dst->hscb->lun = src->hscb->lun; \ 512 1.1 fvdl } while (0) 513 1.1 fvdl 514 1.13 perry static __inline uint16_t 515 1.1 fvdl ahd_inw(struct ahd_softc *ahd, u_int port) 516 1.1 fvdl { 517 1.1 fvdl return ((ahd_inb(ahd, port+1) << 8) | ahd_inb(ahd, port)); 518 1.1 fvdl } 519 1.1 fvdl 520 1.13 perry static __inline void 521 1.1 fvdl ahd_outw(struct ahd_softc *ahd, u_int port, u_int value) 522 1.1 fvdl { 523 1.1 fvdl ahd_outb(ahd, port, value & 0xFF); 524 1.1 fvdl ahd_outb(ahd, port+1, (value >> 8) & 0xFF); 525 1.1 fvdl } 526 1.1 fvdl 527 1.13 perry static __inline uint32_t 528 1.1 fvdl ahd_inl(struct ahd_softc *ahd, u_int port) 529 1.1 fvdl { 530 1.1 fvdl return ((ahd_inb(ahd, port)) 531 1.1 fvdl | (ahd_inb(ahd, port+1) << 8) 532 1.1 fvdl | (ahd_inb(ahd, port+2) << 16) 533 1.1 fvdl | (ahd_inb(ahd, port+3) << 24)); 534 1.1 fvdl } 535 1.1 fvdl 536 1.13 perry static __inline void 537 1.1 fvdl ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value) 538 1.1 fvdl { 539 1.1 fvdl ahd_outb(ahd, port, (value) & 0xFF); 540 1.1 fvdl ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF); 541 1.1 fvdl ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF); 542 1.1 fvdl ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF); 543 1.1 fvdl } 544 1.1 fvdl 545 1.13 perry static __inline uint64_t 546 1.1 fvdl ahd_inq(struct ahd_softc *ahd, u_int port) 547 1.1 fvdl { 548 1.1 fvdl return ((ahd_inb(ahd, port)) 549 1.1 fvdl | (ahd_inb(ahd, port+1) << 8) 550 1.1 fvdl | (ahd_inb(ahd, port+2) << 16) 551 1.23 msaitoh | (((uint64_t)ahd_inb(ahd, port+3)) << 24) 552 1.1 fvdl | (((uint64_t)ahd_inb(ahd, port+4)) << 32) 553 1.1 fvdl | (((uint64_t)ahd_inb(ahd, port+5)) << 40) 554 1.1 fvdl | (((uint64_t)ahd_inb(ahd, port+6)) << 48) 555 1.1 fvdl | (((uint64_t)ahd_inb(ahd, port+7)) << 56)); 556 1.1 fvdl } 557 1.1 fvdl 558 1.13 perry static __inline void 559 1.1 fvdl ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value) 560 1.1 fvdl { 561 1.1 fvdl ahd_outb(ahd, port, value & 0xFF); 562 1.1 fvdl ahd_outb(ahd, port+1, (value >> 8) & 0xFF); 563 1.1 fvdl ahd_outb(ahd, port+2, (value >> 16) & 0xFF); 564 1.1 fvdl ahd_outb(ahd, port+3, (value >> 24) & 0xFF); 565 1.1 fvdl ahd_outb(ahd, port+4, (value >> 32) & 0xFF); 566 1.1 fvdl ahd_outb(ahd, port+5, (value >> 40) & 0xFF); 567 1.1 fvdl ahd_outb(ahd, port+6, (value >> 48) & 0xFF); 568 1.1 fvdl ahd_outb(ahd, port+7, (value >> 56) & 0xFF); 569 1.1 fvdl } 570 1.1 fvdl 571 1.13 perry static __inline u_int 572 1.1 fvdl ahd_get_scbptr(struct ahd_softc *ahd) 573 1.1 fvdl { 574 1.1 fvdl AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 575 1.1 fvdl ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 576 1.1 fvdl return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8)); 577 1.1 fvdl } 578 1.1 fvdl 579 1.13 perry static __inline void 580 1.1 fvdl ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr) 581 1.1 fvdl { 582 1.1 fvdl AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 583 1.1 fvdl ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 584 1.1 fvdl ahd_outb(ahd, SCBPTR, scbptr & 0xFF); 585 1.1 fvdl ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF); 586 1.1 fvdl } 587 1.1 fvdl 588 1.13 perry static __inline u_int 589 1.1 fvdl ahd_get_hnscb_qoff(struct ahd_softc *ahd) 590 1.1 fvdl { 591 1.1 fvdl return (ahd_inw_atomic(ahd, HNSCB_QOFF)); 592 1.1 fvdl } 593 1.1 fvdl 594 1.13 perry static __inline void 595 1.1 fvdl ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value) 596 1.1 fvdl { 597 1.1 fvdl ahd_outw_atomic(ahd, HNSCB_QOFF, value); 598 1.1 fvdl } 599 1.1 fvdl 600 1.13 perry static __inline u_int 601 1.1 fvdl ahd_get_hescb_qoff(struct ahd_softc *ahd) 602 1.1 fvdl { 603 1.1 fvdl return (ahd_inb(ahd, HESCB_QOFF)); 604 1.1 fvdl } 605 1.1 fvdl 606 1.13 perry static __inline void 607 1.1 fvdl ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value) 608 1.1 fvdl { 609 1.1 fvdl ahd_outb(ahd, HESCB_QOFF, value); 610 1.1 fvdl } 611 1.1 fvdl 612 1.13 perry static __inline u_int 613 1.1 fvdl ahd_get_snscb_qoff(struct ahd_softc *ahd) 614 1.1 fvdl { 615 1.1 fvdl u_int oldvalue; 616 1.1 fvdl 617 1.1 fvdl AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 618 1.1 fvdl oldvalue = ahd_inw(ahd, SNSCB_QOFF); 619 1.1 fvdl ahd_outw(ahd, SNSCB_QOFF, oldvalue); 620 1.1 fvdl return (oldvalue); 621 1.1 fvdl } 622 1.1 fvdl 623 1.13 perry static __inline void 624 1.1 fvdl ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value) 625 1.1 fvdl { 626 1.1 fvdl AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 627 1.1 fvdl ahd_outw(ahd, SNSCB_QOFF, value); 628 1.1 fvdl } 629 1.1 fvdl 630 1.13 perry static __inline u_int 631 1.1 fvdl ahd_get_sescb_qoff(struct ahd_softc *ahd) 632 1.1 fvdl { 633 1.1 fvdl AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 634 1.1 fvdl return (ahd_inb(ahd, SESCB_QOFF)); 635 1.1 fvdl } 636 1.1 fvdl 637 1.13 perry static __inline void 638 1.1 fvdl ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value) 639 1.1 fvdl { 640 1.1 fvdl AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 641 1.1 fvdl ahd_outb(ahd, SESCB_QOFF, value); 642 1.1 fvdl } 643 1.1 fvdl 644 1.13 perry static __inline u_int 645 1.1 fvdl ahd_get_sdscb_qoff(struct ahd_softc *ahd) 646 1.1 fvdl { 647 1.1 fvdl AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 648 1.1 fvdl return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8)); 649 1.1 fvdl } 650 1.1 fvdl 651 1.13 perry static __inline void 652 1.1 fvdl ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value) 653 1.1 fvdl { 654 1.1 fvdl AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 655 1.1 fvdl ahd_outb(ahd, SDSCB_QOFF, value & 0xFF); 656 1.1 fvdl ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF); 657 1.1 fvdl } 658 1.1 fvdl 659 1.13 perry static __inline u_int 660 1.1 fvdl ahd_inb_scbram(struct ahd_softc *ahd, u_int offset) 661 1.1 fvdl { 662 1.1 fvdl u_int value; 663 1.1 fvdl 664 1.1 fvdl /* 665 1.1 fvdl * Workaround PCI-X Rev A. hardware bug. 666 1.1 fvdl * After a host read of SCB memory, the chip 667 1.1 fvdl * may become confused into thinking prefetch 668 1.1 fvdl * was required. This starts the discard timer 669 1.1 fvdl * running and can cause an unexpected discard 670 1.1 fvdl * timer interrupt. The work around is to read 671 1.1 fvdl * a normal register prior to the exhaustion of 672 1.1 fvdl * the discard timer. The mode pointer register 673 1.1 fvdl * has no side effects and so serves well for 674 1.1 fvdl * this purpose. 675 1.1 fvdl * 676 1.1 fvdl * Razor #528 677 1.1 fvdl */ 678 1.1 fvdl value = ahd_inb(ahd, offset); 679 1.1 fvdl if ((ahd->flags & AHD_PCIX_SCBRAM_RD_BUG) != 0) 680 1.1 fvdl ahd_inb(ahd, MODE_PTR); 681 1.1 fvdl return (value); 682 1.1 fvdl } 683 1.1 fvdl 684 1.13 perry static __inline u_int 685 1.1 fvdl ahd_inw_scbram(struct ahd_softc *ahd, u_int offset) 686 1.1 fvdl { 687 1.1 fvdl return (ahd_inb_scbram(ahd, offset) 688 1.1 fvdl | (ahd_inb_scbram(ahd, offset+1) << 8)); 689 1.1 fvdl } 690 1.1 fvdl 691 1.13 perry static __inline uint32_t 692 1.1 fvdl ahd_inl_scbram(struct ahd_softc *ahd, u_int offset) 693 1.1 fvdl { 694 1.8 thorpej return (ahd_inw_scbram(ahd, offset) 695 1.8 thorpej | (ahd_inw_scbram(ahd, offset+2) << 16)); 696 1.8 thorpej } 697 1.8 thorpej 698 1.13 perry static __inline uint64_t 699 1.8 thorpej ahd_inq_scbram(struct ahd_softc *ahd, u_int offset) 700 1.8 thorpej { 701 1.8 thorpej return (ahd_inl_scbram(ahd, offset) 702 1.8 thorpej | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32); 703 1.1 fvdl } 704 1.1 fvdl 705 1.13 perry static __inline struct scb * 706 1.1 fvdl ahd_lookup_scb(struct ahd_softc *ahd, u_int tag) 707 1.1 fvdl { 708 1.1 fvdl struct scb* scb; 709 1.1 fvdl 710 1.1 fvdl if (tag >= AHD_SCB_MAX) 711 1.1 fvdl return (NULL); 712 1.1 fvdl scb = ahd->scb_data.scbindex[tag]; 713 1.1 fvdl if (scb != NULL) 714 1.1 fvdl ahd_sync_scb(ahd, scb, 715 1.1 fvdl BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 716 1.1 fvdl return (scb); 717 1.1 fvdl } 718 1.1 fvdl 719 1.13 perry static __inline void 720 1.1 fvdl ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb) 721 1.1 fvdl { 722 1.1 fvdl struct hardware_scb *q_hscb; 723 1.4 thorpej struct map_node *q_hscb_map; 724 1.1 fvdl uint32_t saved_hscb_busaddr; 725 1.1 fvdl 726 1.1 fvdl /* 727 1.1 fvdl * Our queuing method is a bit tricky. The card 728 1.1 fvdl * knows in advance which HSCB (by address) to download, 729 1.1 fvdl * and we can't disappoint it. To achieve this, the next 730 1.1 fvdl * HSCB to download is saved off in ahd->next_queued_hscb. 731 1.1 fvdl * When we are called to queue "an arbitrary scb", 732 1.1 fvdl * we copy the contents of the incoming HSCB to the one 733 1.1 fvdl * the sequencer knows about, swap HSCB pointers and 734 1.1 fvdl * finally assign the SCB to the tag indexed location 735 1.1 fvdl * in the scb_array. This makes sure that we can still 736 1.1 fvdl * locate the correct SCB by SCB_TAG. 737 1.1 fvdl */ 738 1.1 fvdl q_hscb = ahd->next_queued_hscb; 739 1.4 thorpej q_hscb_map = ahd->next_queued_hscb_map; 740 1.1 fvdl saved_hscb_busaddr = q_hscb->hscb_busaddr; 741 1.1 fvdl memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); 742 1.1 fvdl q_hscb->hscb_busaddr = saved_hscb_busaddr; 743 1.1 fvdl q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; 744 1.1 fvdl 745 1.1 fvdl /* Now swap HSCB pointers. */ 746 1.1 fvdl ahd->next_queued_hscb = scb->hscb; 747 1.4 thorpej ahd->next_queued_hscb_map = scb->hscb_map; 748 1.1 fvdl scb->hscb = q_hscb; 749 1.4 thorpej scb->hscb_map = q_hscb_map; 750 1.4 thorpej 751 1.4 thorpej KASSERT((vaddr_t)scb->hscb >= (vaddr_t)scb->hscb_map->vaddr && 752 1.4 thorpej (vaddr_t)scb->hscb < (vaddr_t)scb->hscb_map->vaddr + PAGE_SIZE); 753 1.1 fvdl 754 1.1 fvdl /* Now define the mapping from tag to SCB in the scbindex */ 755 1.1 fvdl ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; 756 1.1 fvdl } 757 1.1 fvdl 758 1.1 fvdl /* 759 1.1 fvdl * Tell the sequencer about a new transaction to execute. 760 1.1 fvdl */ 761 1.13 perry static __inline void 762 1.1 fvdl ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb) 763 1.1 fvdl { 764 1.1 fvdl ahd_swap_with_next_hscb(ahd, scb); 765 1.1 fvdl 766 1.1 fvdl if (SCBID_IS_NULL(SCB_GET_TAG(scb))) 767 1.1 fvdl panic("Attempt to queue invalid SCB tag %x\n", 768 1.1 fvdl SCB_GET_TAG(scb)); 769 1.1 fvdl 770 1.1 fvdl /* 771 1.1 fvdl * Keep a history of SCBs we've downloaded in the qinfifo. 772 1.1 fvdl */ 773 1.1 fvdl ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); 774 1.1 fvdl ahd->qinfifonext++; 775 1.1 fvdl 776 1.1 fvdl if (scb->sg_count != 0) 777 1.1 fvdl ahd_setup_data_scb(ahd, scb); 778 1.1 fvdl else 779 1.1 fvdl ahd_setup_noxfer_scb(ahd, scb); 780 1.1 fvdl ahd_setup_scb_common(ahd, scb); 781 1.1 fvdl 782 1.1 fvdl /* 783 1.1 fvdl * Make sure our data is consistent from the 784 1.1 fvdl * perspective of the adapter. 785 1.1 fvdl */ 786 1.1 fvdl ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 787 1.1 fvdl 788 1.1 fvdl #ifdef AHD_DEBUG 789 1.1 fvdl if ((ahd_debug & AHD_SHOW_QUEUE) != 0) { 790 1.5 thorpej uint64_t host_dataptr; 791 1.5 thorpej 792 1.5 thorpej host_dataptr = ahd_le64toh(scb->hscb->dataptr); 793 1.1 fvdl printf("%s: Queueing SCB 0x%x bus addr 0x%x - 0x%x%x/0x%x\n", 794 1.1 fvdl ahd_name(ahd), 795 1.5 thorpej SCB_GET_TAG(scb), ahd_le32toh(scb->hscb->hscb_busaddr), 796 1.5 thorpej (u_int)((host_dataptr >> 32) & 0xFFFFFFFF), 797 1.5 thorpej (u_int)(host_dataptr & 0xFFFFFFFF), 798 1.5 thorpej ahd_le32toh(scb->hscb->datacnt)); 799 1.1 fvdl } 800 1.1 fvdl #endif 801 1.1 fvdl /* Tell the adapter about the newly queued SCB */ 802 1.1 fvdl ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); 803 1.1 fvdl } 804 1.1 fvdl 805 1.13 perry static __inline uint8_t * 806 1.15 christos ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb) 807 1.1 fvdl { 808 1.1 fvdl return (scb->sense_data); 809 1.1 fvdl } 810 1.1 fvdl 811 1.13 perry static __inline uint32_t 812 1.15 christos ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb) 813 1.1 fvdl { 814 1.1 fvdl return (scb->sense_busaddr); 815 1.1 fvdl } 816 1.1 fvdl 817 1.1 fvdl /************************** Interrupt Processing ******************************/ 818 1.13 perry static __inline void ahd_sync_qoutfifo(struct ahd_softc *, int); 819 1.13 perry static __inline void ahd_sync_tqinfifo(struct ahd_softc *, int); 820 1.13 perry static __inline u_int ahd_check_cmdcmpltqueues(struct ahd_softc *); 821 1.13 perry static __inline int ahd_intr(void *); 822 1.13 perry static __inline void ahd_minphys(struct buf *); 823 1.1 fvdl 824 1.13 perry static __inline void 825 1.1 fvdl ahd_sync_qoutfifo(struct ahd_softc *ahd, int op) 826 1.1 fvdl { 827 1.4 thorpej ahd_dmamap_sync(ahd, ahd->parent_dmat, ahd->shared_data_map.dmamap, 828 1.1 fvdl /*offset*/0, /*len*/AHD_SCB_MAX * sizeof(uint16_t), op); 829 1.1 fvdl } 830 1.1 fvdl 831 1.13 perry static __inline void 832 1.15 christos ahd_sync_tqinfifo(struct ahd_softc *ahd, int op) 833 1.1 fvdl { 834 1.1 fvdl #ifdef AHD_TARGET_MODE 835 1.1 fvdl if ((ahd->flags & AHD_TARGETROLE) != 0) { 836 1.1 fvdl ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/, 837 1.4 thorpej ahd->shared_data_map.dmamap, 838 1.1 fvdl ahd_targetcmd_offset(ahd, 0), 839 1.1 fvdl sizeof(struct target_cmd) * AHD_TMODE_CMDS, 840 1.1 fvdl op); 841 1.1 fvdl } 842 1.1 fvdl #endif 843 1.1 fvdl } 844 1.1 fvdl 845 1.1 fvdl /* 846 1.1 fvdl * See if the firmware has posted any completed commands 847 1.1 fvdl * into our in-core command complete fifos. 848 1.1 fvdl */ 849 1.1 fvdl #define AHD_RUN_QOUTFIFO 0x1 850 1.1 fvdl #define AHD_RUN_TQINFIFO 0x2 851 1.13 perry static __inline u_int 852 1.1 fvdl ahd_check_cmdcmpltqueues(struct ahd_softc *ahd) 853 1.1 fvdl { 854 1.1 fvdl u_int retval; 855 1.1 fvdl 856 1.1 fvdl retval = 0; 857 1.21 christos ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/, ahd->shared_data_map.dmamap, 858 1.1 fvdl /*offset*/ahd->qoutfifonext, /*len*/2, 859 1.1 fvdl BUS_DMASYNC_POSTREAD); 860 1.1 fvdl if ((ahd->qoutfifo[ahd->qoutfifonext] 861 1.1 fvdl & QOUTFIFO_ENTRY_VALID_LE) == ahd->qoutfifonext_valid_tag) 862 1.1 fvdl retval |= AHD_RUN_QOUTFIFO; 863 1.1 fvdl #ifdef AHD_TARGET_MODE 864 1.1 fvdl if ((ahd->flags & AHD_TARGETROLE) != 0 865 1.1 fvdl && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) { 866 1.1 fvdl ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/, 867 1.4 thorpej ahd->shared_data_map.dmamap, 868 1.1 fvdl ahd_targetcmd_offset(ahd, ahd->tqinfifofnext), 869 1.1 fvdl /*len*/sizeof(struct target_cmd), 870 1.1 fvdl BUS_DMASYNC_POSTREAD); 871 1.1 fvdl if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0) 872 1.1 fvdl retval |= AHD_RUN_TQINFIFO; 873 1.1 fvdl } 874 1.1 fvdl #endif 875 1.1 fvdl return (retval); 876 1.1 fvdl } 877 1.1 fvdl 878 1.1 fvdl /* 879 1.1 fvdl * Catch an interrupt from the adapter 880 1.1 fvdl */ 881 1.13 perry static __inline int 882 1.1 fvdl ahd_intr(void *arg) 883 1.1 fvdl { 884 1.19 tsutsui struct ahd_softc *ahd = arg; 885 1.1 fvdl u_int intstat; 886 1.1 fvdl 887 1.1 fvdl if ((ahd->pause & INTEN) == 0) { 888 1.1 fvdl /* 889 1.1 fvdl * Our interrupt is not enabled on the chip 890 1.1 fvdl * and may be disabled for re-entrancy reasons, 891 1.1 fvdl * so just return. This is likely just a shared 892 1.1 fvdl * interrupt. 893 1.1 fvdl */ 894 1.5 thorpej return (0); 895 1.1 fvdl } 896 1.1 fvdl 897 1.1 fvdl /* 898 1.1 fvdl * Instead of directly reading the interrupt status register, 899 1.1 fvdl * infer the cause of the interrupt by checking our in-core 900 1.1 fvdl * completion queues. This avoids a costly PCI bus read in 901 1.1 fvdl * most cases. 902 1.1 fvdl */ 903 1.1 fvdl if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0 904 1.1 fvdl && (ahd_check_cmdcmpltqueues(ahd) != 0)) 905 1.1 fvdl intstat = CMDCMPLT; 906 1.1 fvdl else 907 1.1 fvdl intstat = ahd_inb(ahd, INTSTAT); 908 1.1 fvdl 909 1.5 thorpej if ((intstat & INT_PEND) == 0) 910 1.5 thorpej return (0); 911 1.5 thorpej 912 1.1 fvdl if (intstat & CMDCMPLT) { 913 1.1 fvdl ahd_outb(ahd, CLRINT, CLRCMDINT); 914 1.1 fvdl 915 1.1 fvdl /* 916 1.1 fvdl * Ensure that the chip sees that we've cleared 917 1.1 fvdl * this interrupt before we walk the output fifo. 918 1.1 fvdl * Otherwise, we may, due to posted bus writes, 919 1.1 fvdl * clear the interrupt after we finish the scan, 920 1.1 fvdl * and after the sequencer has added new entries 921 1.1 fvdl * and asserted the interrupt again. 922 1.1 fvdl */ 923 1.1 fvdl if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { 924 1.1 fvdl if (ahd_is_paused(ahd)) { 925 1.1 fvdl /* 926 1.1 fvdl * Potentially lost SEQINT. 927 1.1 fvdl * If SEQINTCODE is non-zero, 928 1.1 fvdl * simulate the SEQINT. 929 1.1 fvdl */ 930 1.1 fvdl if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT) 931 1.1 fvdl intstat |= SEQINT; 932 1.1 fvdl } 933 1.1 fvdl } else { 934 1.1 fvdl ahd_flush_device_writes(ahd); 935 1.1 fvdl } 936 1.1 fvdl scsipi_channel_freeze(&ahd->sc_channel, 1); 937 1.1 fvdl ahd_run_qoutfifo(ahd); 938 1.1 fvdl scsipi_channel_thaw(&ahd->sc_channel, 1); 939 1.1 fvdl ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++; 940 1.1 fvdl ahd->cmdcmplt_total++; 941 1.1 fvdl #ifdef AHD_TARGET_MODE 942 1.1 fvdl if ((ahd->flags & AHD_TARGETROLE) != 0) 943 1.1 fvdl ahd_run_tqinfifo(ahd, /*paused*/FALSE); 944 1.1 fvdl #endif 945 1.1 fvdl if (intstat == CMDCMPLT) 946 1.1 fvdl return 1; 947 1.1 fvdl } 948 1.1 fvdl 949 1.5 thorpej /* 950 1.5 thorpej * Handle statuses that may invalidate our cached 951 1.5 thorpej * copy of INTSTAT separately. 952 1.5 thorpej */ 953 1.5 thorpej if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) { 954 1.5 thorpej /* Hot eject. Do nothing */ 955 1.5 thorpej } else if (intstat & HWERRINT) { 956 1.1 fvdl ahd_handle_hwerrint(ahd); 957 1.5 thorpej } else if ((intstat & (PCIINT|SPLTINT)) != 0) { 958 1.1 fvdl ahd->bus_intr(ahd); 959 1.5 thorpej } else { 960 1.1 fvdl 961 1.5 thorpej if ((intstat & SEQINT) != 0) 962 1.5 thorpej ahd_handle_seqint(ahd, intstat); 963 1.1 fvdl 964 1.5 thorpej if ((intstat & SCSIINT) != 0) 965 1.5 thorpej ahd_handle_scsiint(ahd, intstat); 966 1.1 fvdl } 967 1.1 fvdl 968 1.5 thorpej return (1); 969 1.1 fvdl } 970 1.1 fvdl 971 1.13 perry static __inline void 972 1.17 cegger ahd_minphys(struct buf *bp) 973 1.1 fvdl { 974 1.1 fvdl /* 975 1.1 fvdl * Even though the card can transfer up to 16megs per command 976 1.2 wiz * we are limited by the number of segments in the DMA segment 977 1.1 fvdl * list that we can hold. The worst case is that all pages are 978 1.9 wiz * discontinuous physically, hence the "page per segment" limit 979 1.1 fvdl * enforced here. 980 1.1 fvdl */ 981 1.18 tsutsui if (bp->b_bcount > AHD_MAXTRANSFER_SIZE) { 982 1.18 tsutsui bp->b_bcount = AHD_MAXTRANSFER_SIZE; 983 1.18 tsutsui } 984 1.18 tsutsui minphys(bp); 985 1.1 fvdl } 986 1.1 fvdl 987 1.13 perry static __inline u_int32_t scsi_4btoul(u_int8_t *); 988 1.1 fvdl 989 1.13 perry static __inline u_int32_t 990 1.1 fvdl scsi_4btoul(u_int8_t *bytes) 991 1.1 fvdl { 992 1.18 tsutsui u_int32_t rv; 993 1.1 fvdl 994 1.18 tsutsui rv = (bytes[0] << 24) | 995 1.18 tsutsui (bytes[1] << 16) | 996 1.18 tsutsui (bytes[2] << 8) | 997 1.18 tsutsui bytes[3]; 998 1.18 tsutsui return (rv); 999 1.1 fvdl } 1000 1.1 fvdl 1001 1.1 fvdl 1002 1.1 fvdl #endif /* _AIC79XX_INLINE_H_ */ 1003