aic79xx_inline.h revision 1.7 1 1.7 thorpej /* $NetBSD: aic79xx_inline.h,v 1.7 2003/08/29 01:28:52 thorpej Exp $ */
2 1.1 fvdl
3 1.1 fvdl /*
4 1.1 fvdl * Inline routines shareable across OS platforms.
5 1.1 fvdl *
6 1.1 fvdl * Copyright (c) 1994-2001 Justin T. Gibbs.
7 1.1 fvdl * Copyright (c) 2000-2003 Adaptec Inc.
8 1.1 fvdl * All rights reserved.
9 1.1 fvdl *
10 1.1 fvdl * Redistribution and use in source and binary forms, with or without
11 1.1 fvdl * modification, are permitted provided that the following conditions
12 1.1 fvdl * are met:
13 1.1 fvdl * 1. Redistributions of source code must retain the above copyright
14 1.1 fvdl * notice, this list of conditions, and the following disclaimer,
15 1.1 fvdl * without modification.
16 1.1 fvdl * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 1.1 fvdl * substantially similar to the "NO WARRANTY" disclaimer below
18 1.1 fvdl * ("Disclaimer") and any redistribution must be conditioned upon
19 1.1 fvdl * including a substantially similar Disclaimer requirement for further
20 1.1 fvdl * binary redistribution.
21 1.1 fvdl * 3. Neither the names of the above-listed copyright holders nor the names
22 1.1 fvdl * of any contributors may be used to endorse or promote products derived
23 1.1 fvdl * from this software without specific prior written permission.
24 1.1 fvdl *
25 1.1 fvdl * Alternatively, this software may be distributed under the terms of the
26 1.1 fvdl * GNU General Public License ("GPL") version 2 as published by the Free
27 1.1 fvdl * Software Foundation.
28 1.1 fvdl *
29 1.1 fvdl * NO WARRANTY
30 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 1.1 fvdl * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 1.1 fvdl * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
33 1.1 fvdl * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 1.1 fvdl * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 1.1 fvdl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 1.1 fvdl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 1.1 fvdl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38 1.1 fvdl * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
39 1.1 fvdl * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 1.1 fvdl * POSSIBILITY OF SUCH DAMAGES.
41 1.1 fvdl *
42 1.7 thorpej * Id: //depot/aic7xxx/aic7xxx/aic79xx_inline.h#50 $
43 1.1 fvdl *
44 1.7 thorpej * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_inline.h,v 1.11 2003/05/26 21:26:52 gibbs Exp $
45 1.1 fvdl */
46 1.1 fvdl /*
47 1.1 fvdl * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
48 1.1 fvdl */
49 1.1 fvdl
50 1.1 fvdl #ifndef _AIC79XX_INLINE_H_
51 1.1 fvdl #define _AIC79XX_INLINE_H_
52 1.1 fvdl
53 1.1 fvdl /******************************** Debugging ***********************************/
54 1.3 itojun static __inline char *ahd_name(struct ahd_softc *);
55 1.1 fvdl
56 1.1 fvdl static __inline char *
57 1.1 fvdl ahd_name(struct ahd_softc *ahd)
58 1.1 fvdl {
59 1.1 fvdl return (ahd->name);
60 1.1 fvdl }
61 1.1 fvdl
62 1.1 fvdl /************************ Sequencer Execution Control *************************/
63 1.3 itojun static __inline void ahd_known_modes(struct ahd_softc *, ahd_mode, ahd_mode);
64 1.3 itojun static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *,
65 1.3 itojun ahd_mode, ahd_mode);
66 1.3 itojun static __inline void ahd_extract_mode_state(struct ahd_softc *,
67 1.3 itojun ahd_mode_state, ahd_mode *, ahd_mode *);
68 1.3 itojun static __inline void ahd_set_modes(struct ahd_softc *, ahd_mode, ahd_mode);
69 1.3 itojun static __inline void ahd_update_modes(struct ahd_softc *);
70 1.3 itojun static __inline void ahd_assert_modes(struct ahd_softc *, ahd_mode,
71 1.3 itojun ahd_mode, const char *, int);
72 1.3 itojun static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *);
73 1.3 itojun static __inline void ahd_restore_modes(struct ahd_softc *, ahd_mode_state);
74 1.3 itojun static __inline int ahd_is_paused(struct ahd_softc *);
75 1.3 itojun static __inline void ahd_pause(struct ahd_softc *);
76 1.3 itojun static __inline void ahd_unpause(struct ahd_softc *);
77 1.1 fvdl
78 1.1 fvdl static __inline void
79 1.1 fvdl ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
80 1.1 fvdl {
81 1.1 fvdl ahd->src_mode = src;
82 1.1 fvdl ahd->dst_mode = dst;
83 1.1 fvdl ahd->saved_src_mode = src;
84 1.1 fvdl ahd->saved_dst_mode = dst;
85 1.1 fvdl }
86 1.1 fvdl
87 1.1 fvdl static __inline ahd_mode_state
88 1.1 fvdl ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
89 1.1 fvdl {
90 1.1 fvdl return ((src << SRC_MODE_SHIFT) | (dst << DST_MODE_SHIFT));
91 1.1 fvdl }
92 1.1 fvdl
93 1.1 fvdl static __inline void
94 1.1 fvdl ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state,
95 1.1 fvdl ahd_mode *src, ahd_mode *dst)
96 1.1 fvdl {
97 1.1 fvdl *src = (state & SRC_MODE) >> SRC_MODE_SHIFT;
98 1.1 fvdl *dst = (state & DST_MODE) >> DST_MODE_SHIFT;
99 1.1 fvdl }
100 1.1 fvdl
101 1.1 fvdl static __inline void
102 1.1 fvdl ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
103 1.1 fvdl {
104 1.1 fvdl if (ahd->src_mode == src && ahd->dst_mode == dst)
105 1.1 fvdl return;
106 1.1 fvdl #ifdef AHD_DEBUG
107 1.1 fvdl if (ahd->src_mode == AHD_MODE_UNKNOWN
108 1.1 fvdl || ahd->dst_mode == AHD_MODE_UNKNOWN)
109 1.1 fvdl panic("Setting mode prior to saving it.\n");
110 1.1 fvdl if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
111 1.1 fvdl printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
112 1.1 fvdl ahd_build_mode_state(ahd, src, dst));
113 1.1 fvdl #endif
114 1.1 fvdl ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
115 1.1 fvdl ahd->src_mode = src;
116 1.1 fvdl ahd->dst_mode = dst;
117 1.1 fvdl }
118 1.1 fvdl
119 1.1 fvdl static __inline void
120 1.1 fvdl ahd_update_modes(struct ahd_softc *ahd)
121 1.1 fvdl {
122 1.1 fvdl ahd_mode_state mode_ptr;
123 1.1 fvdl ahd_mode src;
124 1.1 fvdl ahd_mode dst;
125 1.1 fvdl
126 1.1 fvdl mode_ptr = ahd_inb(ahd, MODE_PTR);
127 1.1 fvdl #ifdef AHD_DEBUG
128 1.1 fvdl if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
129 1.1 fvdl printf("Reading mode 0x%x\n", mode_ptr);
130 1.1 fvdl #endif
131 1.1 fvdl ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
132 1.1 fvdl ahd_known_modes(ahd, src, dst);
133 1.1 fvdl }
134 1.1 fvdl
135 1.1 fvdl static __inline void
136 1.1 fvdl ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
137 1.1 fvdl ahd_mode dstmode, const char *file, int line)
138 1.1 fvdl {
139 1.1 fvdl #ifdef AHD_DEBUG
140 1.1 fvdl if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
141 1.1 fvdl || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
142 1.1 fvdl panic("%s:%s:%d: Mode assertion failed.\n",
143 1.1 fvdl ahd_name(ahd), file, line);
144 1.1 fvdl }
145 1.1 fvdl #endif
146 1.1 fvdl }
147 1.1 fvdl
148 1.1 fvdl static __inline ahd_mode_state
149 1.1 fvdl ahd_save_modes(struct ahd_softc *ahd)
150 1.1 fvdl {
151 1.1 fvdl if (ahd->src_mode == AHD_MODE_UNKNOWN
152 1.1 fvdl || ahd->dst_mode == AHD_MODE_UNKNOWN)
153 1.1 fvdl ahd_update_modes(ahd);
154 1.1 fvdl
155 1.1 fvdl return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
156 1.1 fvdl }
157 1.1 fvdl
158 1.1 fvdl static __inline void
159 1.1 fvdl ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
160 1.1 fvdl {
161 1.1 fvdl ahd_mode src;
162 1.1 fvdl ahd_mode dst;
163 1.1 fvdl
164 1.1 fvdl ahd_extract_mode_state(ahd, state, &src, &dst);
165 1.1 fvdl ahd_set_modes(ahd, src, dst);
166 1.1 fvdl }
167 1.1 fvdl
168 1.1 fvdl #define AHD_ASSERT_MODES(ahd, source, dest) \
169 1.1 fvdl ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
170 1.1 fvdl
171 1.1 fvdl /*
172 1.1 fvdl * Determine whether the sequencer has halted code execution.
173 1.1 fvdl * Returns non-zero status if the sequencer is stopped.
174 1.1 fvdl */
175 1.1 fvdl static __inline int
176 1.1 fvdl ahd_is_paused(struct ahd_softc *ahd)
177 1.1 fvdl {
178 1.1 fvdl return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
179 1.1 fvdl }
180 1.1 fvdl
181 1.1 fvdl /*
182 1.1 fvdl * Request that the sequencer stop and wait, indefinitely, for it
183 1.1 fvdl * to stop. The sequencer will only acknowledge that it is paused
184 1.1 fvdl * once it has reached an instruction boundary and PAUSEDIS is
185 1.1 fvdl * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
186 1.1 fvdl * for critical sections.
187 1.1 fvdl */
188 1.1 fvdl static __inline void
189 1.1 fvdl ahd_pause(struct ahd_softc *ahd)
190 1.1 fvdl {
191 1.1 fvdl ahd_outb(ahd, HCNTRL, ahd->pause);
192 1.1 fvdl
193 1.1 fvdl /*
194 1.1 fvdl * Since the sequencer can disable pausing in a critical section, we
195 1.1 fvdl * must loop until it actually stops.
196 1.1 fvdl */
197 1.1 fvdl while (ahd_is_paused(ahd) == 0)
198 1.1 fvdl ;
199 1.1 fvdl }
200 1.1 fvdl
201 1.1 fvdl /*
202 1.1 fvdl * Allow the sequencer to continue program execution.
203 1.1 fvdl * We check here to ensure that no additional interrupt
204 1.1 fvdl * sources that would cause the sequencer to halt have been
205 1.1 fvdl * asserted. If, for example, a SCSI bus reset is detected
206 1.1 fvdl * while we are fielding a different, pausing, interrupt type,
207 1.1 fvdl * we don't want to release the sequencer before going back
208 1.1 fvdl * into our interrupt handler and dealing with this new
209 1.1 fvdl * condition.
210 1.1 fvdl */
211 1.1 fvdl static __inline void
212 1.1 fvdl ahd_unpause(struct ahd_softc *ahd)
213 1.1 fvdl {
214 1.1 fvdl /*
215 1.1 fvdl * Automatically restore our modes to those saved
216 1.1 fvdl * prior to the first change of the mode.
217 1.1 fvdl */
218 1.1 fvdl if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
219 1.1 fvdl && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
220 1.1 fvdl if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
221 1.1 fvdl ahd_reset_cmds_pending(ahd);
222 1.1 fvdl ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
223 1.1 fvdl }
224 1.1 fvdl
225 1.5 thorpej if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
226 1.1 fvdl ahd_outb(ahd, HCNTRL, ahd->unpause);
227 1.1 fvdl
228 1.1 fvdl ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
229 1.1 fvdl }
230 1.1 fvdl
231 1.1 fvdl /*********************** Scatter Gather List Handling *************************/
232 1.3 itojun static __inline void *ahd_sg_setup(struct ahd_softc *, struct scb *,
233 1.3 itojun void *, bus_addr_t, bus_size_t, int);
234 1.3 itojun static __inline void ahd_setup_scb_common(struct ahd_softc *, struct scb *);
235 1.3 itojun static __inline void ahd_setup_data_scb(struct ahd_softc *, struct scb *);
236 1.3 itojun static __inline void ahd_setup_noxfer_scb(struct ahd_softc *, struct scb *);
237 1.1 fvdl
238 1.1 fvdl static __inline void *
239 1.1 fvdl ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
240 1.1 fvdl void *sgptr, bus_addr_t addr, bus_size_t len, int last)
241 1.1 fvdl {
242 1.1 fvdl scb->sg_count++;
243 1.1 fvdl if (sizeof(bus_addr_t) > 4
244 1.1 fvdl && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
245 1.1 fvdl struct ahd_dma64_seg *sg;
246 1.1 fvdl
247 1.1 fvdl sg = (struct ahd_dma64_seg *)sgptr;
248 1.1 fvdl sg->addr = ahd_htole64(addr);
249 1.1 fvdl sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
250 1.1 fvdl return (sg + 1);
251 1.1 fvdl } else {
252 1.1 fvdl struct ahd_dma_seg *sg;
253 1.1 fvdl
254 1.1 fvdl sg = (struct ahd_dma_seg *)sgptr;
255 1.1 fvdl sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
256 1.1 fvdl sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
257 1.1 fvdl | (last ? AHD_DMA_LAST_SEG : 0));
258 1.1 fvdl return (sg + 1);
259 1.1 fvdl }
260 1.1 fvdl }
261 1.1 fvdl
262 1.1 fvdl static __inline void
263 1.1 fvdl ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
264 1.1 fvdl {
265 1.1 fvdl /* XXX Handle target mode SCBs. */
266 1.1 fvdl scb->crc_retry_count = 0;
267 1.1 fvdl if ((scb->flags & SCB_PACKETIZED) != 0) {
268 1.1 fvdl /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */
269 1.7 thorpej scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
270 1.7 thorpej } else {
271 1.7 thorpej if (ahd_get_transfer_length(scb) & 0x01)
272 1.7 thorpej scb->hscb->task_attribute = SCB_XFERLEN_ODD;
273 1.7 thorpej else
274 1.7 thorpej scb->hscb->task_attribute = 0;
275 1.1 fvdl }
276 1.1 fvdl
277 1.1 fvdl if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
278 1.1 fvdl || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
279 1.1 fvdl scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
280 1.1 fvdl ahd_htole32(scb->sense_busaddr);
281 1.1 fvdl }
282 1.1 fvdl
283 1.1 fvdl static __inline void
284 1.1 fvdl ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
285 1.1 fvdl {
286 1.1 fvdl /*
287 1.1 fvdl * Copy the first SG into the "current" data ponter area.
288 1.1 fvdl */
289 1.1 fvdl if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
290 1.1 fvdl struct ahd_dma64_seg *sg;
291 1.1 fvdl
292 1.1 fvdl sg = (struct ahd_dma64_seg *)scb->sg_list;
293 1.1 fvdl scb->hscb->dataptr = sg->addr;
294 1.1 fvdl scb->hscb->datacnt = sg->len;
295 1.1 fvdl } else {
296 1.1 fvdl struct ahd_dma_seg *sg;
297 1.5 thorpej uint32_t *dataptr_words;
298 1.1 fvdl
299 1.1 fvdl sg = (struct ahd_dma_seg *)scb->sg_list;
300 1.5 thorpej dataptr_words = (uint32_t*)&scb->hscb->dataptr;
301 1.5 thorpej dataptr_words[0] = sg->addr;
302 1.5 thorpej dataptr_words[1] = 0;
303 1.1 fvdl if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
304 1.1 fvdl uint64_t high_addr;
305 1.1 fvdl
306 1.1 fvdl high_addr = ahd_le32toh(sg->len) & 0x7F000000;
307 1.1 fvdl scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
308 1.1 fvdl }
309 1.1 fvdl scb->hscb->datacnt = sg->len;
310 1.1 fvdl }
311 1.1 fvdl /*
312 1.1 fvdl * Note where to find the SG entries in bus space.
313 1.1 fvdl * We also set the full residual flag which the
314 1.1 fvdl * sequencer will clear as soon as a data transfer
315 1.1 fvdl * occurs.
316 1.1 fvdl */
317 1.1 fvdl scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
318 1.1 fvdl }
319 1.1 fvdl
320 1.1 fvdl static __inline void
321 1.1 fvdl ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
322 1.1 fvdl {
323 1.1 fvdl scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
324 1.1 fvdl scb->hscb->dataptr = 0;
325 1.1 fvdl scb->hscb->datacnt = 0;
326 1.1 fvdl }
327 1.1 fvdl
328 1.1 fvdl /************************** Memory mapping routines ***************************/
329 1.3 itojun static __inline size_t ahd_sg_size(struct ahd_softc *);
330 1.1 fvdl static __inline void *
331 1.3 itojun ahd_sg_bus_to_virt(struct ahd_softc *, struct scb *,
332 1.3 itojun uint32_t);
333 1.1 fvdl static __inline uint32_t
334 1.3 itojun ahd_sg_virt_to_bus(struct ahd_softc *, struct scb *,
335 1.3 itojun void *);
336 1.3 itojun static __inline void ahd_sync_scb(struct ahd_softc *, struct scb *, int);
337 1.3 itojun static __inline void ahd_sync_sglist(struct ahd_softc *, struct scb *, int);
338 1.3 itojun static __inline void ahd_sync_sense(struct ahd_softc *, struct scb *, int);
339 1.1 fvdl static __inline uint32_t
340 1.3 itojun ahd_targetcmd_offset(struct ahd_softc *, u_int);
341 1.1 fvdl
342 1.1 fvdl static __inline size_t
343 1.1 fvdl ahd_sg_size(struct ahd_softc *ahd)
344 1.1 fvdl {
345 1.1 fvdl if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
346 1.1 fvdl return (sizeof(struct ahd_dma64_seg));
347 1.1 fvdl return (sizeof(struct ahd_dma_seg));
348 1.1 fvdl }
349 1.1 fvdl
350 1.1 fvdl static __inline void *
351 1.1 fvdl ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
352 1.1 fvdl {
353 1.1 fvdl bus_addr_t sg_offset;
354 1.1 fvdl
355 1.1 fvdl /* sg_list_phys points to entry 1, not 0 */
356 1.1 fvdl sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
357 1.1 fvdl return ((uint8_t *)scb->sg_list + sg_offset);
358 1.1 fvdl }
359 1.1 fvdl
360 1.1 fvdl static __inline uint32_t
361 1.1 fvdl ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
362 1.1 fvdl {
363 1.1 fvdl bus_addr_t sg_offset;
364 1.1 fvdl
365 1.1 fvdl /* sg_list_phys points to entry 1, not 0 */
366 1.1 fvdl sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
367 1.1 fvdl - ahd_sg_size(ahd);
368 1.1 fvdl
369 1.1 fvdl return (scb->sg_list_busaddr + sg_offset);
370 1.1 fvdl }
371 1.1 fvdl
372 1.1 fvdl static __inline void
373 1.1 fvdl ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
374 1.1 fvdl {
375 1.1 fvdl ahd_dmamap_sync(ahd, ahd->parent_dmat, scb->hscb_map->dmamap,
376 1.1 fvdl /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
377 1.1 fvdl /*len*/sizeof(*scb->hscb), op);
378 1.1 fvdl }
379 1.1 fvdl
380 1.1 fvdl static __inline void
381 1.1 fvdl ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
382 1.1 fvdl {
383 1.1 fvdl if (scb->sg_count == 0)
384 1.1 fvdl return;
385 1.1 fvdl
386 1.1 fvdl ahd_dmamap_sync(ahd, ahd->parent_dmat, scb->sg_map->dmamap,
387 1.1 fvdl /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
388 1.1 fvdl /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
389 1.1 fvdl }
390 1.1 fvdl
391 1.1 fvdl static __inline void
392 1.1 fvdl ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
393 1.1 fvdl {
394 1.1 fvdl ahd_dmamap_sync(ahd, ahd->parent_dmat,
395 1.1 fvdl scb->sense_map->dmamap,
396 1.1 fvdl /*offset*/scb->sense_busaddr,
397 1.1 fvdl /*len*/AHD_SENSE_BUFSIZE, op);
398 1.1 fvdl }
399 1.1 fvdl
400 1.1 fvdl static __inline uint32_t
401 1.1 fvdl ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
402 1.1 fvdl {
403 1.1 fvdl return (((uint8_t *)&ahd->targetcmds[index])
404 1.1 fvdl - (uint8_t *)ahd->qoutfifo);
405 1.1 fvdl }
406 1.1 fvdl
407 1.1 fvdl /*********************** Miscelaneous Support Functions ***********************/
408 1.3 itojun static __inline void ahd_complete_scb(struct ahd_softc *, struct scb *);
409 1.3 itojun static __inline void ahd_update_residual(struct ahd_softc *, struct scb *);
410 1.1 fvdl static __inline struct ahd_initiator_tinfo *
411 1.3 itojun ahd_fetch_transinfo(struct ahd_softc *, char, u_int,
412 1.3 itojun u_int, struct ahd_tmode_tstate **);
413 1.1 fvdl static __inline uint16_t
414 1.3 itojun ahd_inw(struct ahd_softc *, u_int);
415 1.3 itojun static __inline void ahd_outw(struct ahd_softc *, u_int, u_int);
416 1.1 fvdl static __inline uint32_t
417 1.3 itojun ahd_inl(struct ahd_softc *, u_int);
418 1.3 itojun static __inline void ahd_outl(struct ahd_softc *, u_int, uint32_t);
419 1.1 fvdl static __inline uint64_t
420 1.3 itojun ahd_inq(struct ahd_softc *, u_int);
421 1.3 itojun static __inline void ahd_outq(struct ahd_softc *, u_int, uint64_t);
422 1.3 itojun static __inline u_int ahd_get_scbptr(struct ahd_softc *);
423 1.3 itojun static __inline void ahd_set_scbptr(struct ahd_softc *, u_int);
424 1.3 itojun static __inline u_int ahd_get_hnscb_qoff(struct ahd_softc *);
425 1.3 itojun static __inline void ahd_set_hnscb_qoff(struct ahd_softc *, u_int);
426 1.3 itojun static __inline u_int ahd_get_hescb_qoff(struct ahd_softc *);
427 1.3 itojun static __inline void ahd_set_hescb_qoff(struct ahd_softc *, u_int);
428 1.3 itojun static __inline u_int ahd_get_snscb_qoff(struct ahd_softc *);
429 1.3 itojun static __inline void ahd_set_snscb_qoff(struct ahd_softc *, u_int);
430 1.3 itojun static __inline u_int ahd_get_sescb_qoff(struct ahd_softc *);
431 1.3 itojun static __inline void ahd_set_sescb_qoff(struct ahd_softc *, u_int);
432 1.3 itojun static __inline u_int ahd_get_sdscb_qoff(struct ahd_softc *);
433 1.3 itojun static __inline void ahd_set_sdscb_qoff(struct ahd_softc *, u_int);
434 1.3 itojun static __inline u_int ahd_inb_scbram(struct ahd_softc *, u_int);
435 1.3 itojun static __inline u_int ahd_inw_scbram(struct ahd_softc *, u_int);
436 1.1 fvdl static __inline uint32_t
437 1.3 itojun ahd_inl_scbram(struct ahd_softc *, u_int);
438 1.3 itojun static __inline void ahd_swap_with_next_hscb(struct ahd_softc *,
439 1.3 itojun struct scb *);
440 1.3 itojun static __inline void ahd_queue_scb(struct ahd_softc *, struct scb *);
441 1.1 fvdl static __inline uint8_t *
442 1.3 itojun ahd_get_sense_buf(struct ahd_softc *, struct scb *);
443 1.1 fvdl static __inline uint32_t
444 1.3 itojun ahd_get_sense_bufaddr(struct ahd_softc *, struct scb *);
445 1.3 itojun static __inline void ahd_post_scb(struct ahd_softc *, struct scb *);
446 1.1 fvdl
447 1.1 fvdl
448 1.1 fvdl static __inline void
449 1.1 fvdl ahd_post_scb(struct ahd_softc *ahd, struct scb *scb)
450 1.1 fvdl {
451 1.1 fvdl uint32_t sgptr;
452 1.1 fvdl
453 1.1 fvdl sgptr = ahd_le32toh(scb->hscb->sgptr);
454 1.1 fvdl if ((sgptr & SG_STATUS_VALID) != 0)
455 1.1 fvdl ahd_handle_scb_status(ahd, scb);
456 1.1 fvdl else
457 1.1 fvdl ahd_done(ahd, scb);
458 1.1 fvdl }
459 1.1 fvdl
460 1.1 fvdl static __inline void
461 1.1 fvdl ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb)
462 1.1 fvdl {
463 1.1 fvdl uint32_t sgptr;
464 1.1 fvdl
465 1.1 fvdl sgptr = ahd_le32toh(scb->hscb->sgptr);
466 1.1 fvdl if ((sgptr & SG_STATUS_VALID) != 0)
467 1.1 fvdl ahd_handle_scb_status(ahd, scb);
468 1.1 fvdl else
469 1.1 fvdl ahd_done(ahd, scb);
470 1.1 fvdl }
471 1.1 fvdl
472 1.1 fvdl /*
473 1.1 fvdl * Determine whether the sequencer reported a residual
474 1.1 fvdl * for this SCB/transaction.
475 1.1 fvdl */
476 1.1 fvdl static __inline void
477 1.1 fvdl ahd_update_residual(struct ahd_softc *ahd, struct scb *scb)
478 1.1 fvdl {
479 1.1 fvdl uint32_t sgptr;
480 1.1 fvdl
481 1.1 fvdl sgptr = ahd_le32toh(scb->hscb->sgptr);
482 1.1 fvdl if ((sgptr & SG_STATUS_VALID) != 0)
483 1.1 fvdl ahd_calc_residual(ahd, scb);
484 1.1 fvdl }
485 1.1 fvdl
486 1.1 fvdl /*
487 1.1 fvdl * Return pointers to the transfer negotiation information
488 1.1 fvdl * for the specified our_id/remote_id pair.
489 1.1 fvdl */
490 1.1 fvdl static __inline struct ahd_initiator_tinfo *
491 1.1 fvdl ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
492 1.1 fvdl u_int remote_id, struct ahd_tmode_tstate **tstate)
493 1.1 fvdl {
494 1.1 fvdl /*
495 1.1 fvdl * Transfer data structures are stored from the perspective
496 1.1 fvdl * of the target role. Since the parameters for a connection
497 1.1 fvdl * in the initiator role to a given target are the same as
498 1.1 fvdl * when the roles are reversed, we pretend we are the target.
499 1.1 fvdl */
500 1.1 fvdl if (channel == 'B')
501 1.1 fvdl our_id += 8;
502 1.1 fvdl *tstate = ahd->enabled_targets[our_id];
503 1.1 fvdl return (&(*tstate)->transinfo[remote_id]);
504 1.1 fvdl }
505 1.1 fvdl
506 1.1 fvdl #define AHD_COPY_COL_IDX(dst, src) \
507 1.1 fvdl do { \
508 1.1 fvdl dst->hscb->scsiid = src->hscb->scsiid; \
509 1.1 fvdl dst->hscb->lun = src->hscb->lun; \
510 1.1 fvdl } while (0)
511 1.1 fvdl
512 1.1 fvdl static __inline uint16_t
513 1.1 fvdl ahd_inw(struct ahd_softc *ahd, u_int port)
514 1.1 fvdl {
515 1.1 fvdl return ((ahd_inb(ahd, port+1) << 8) | ahd_inb(ahd, port));
516 1.1 fvdl }
517 1.1 fvdl
518 1.1 fvdl static __inline void
519 1.1 fvdl ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
520 1.1 fvdl {
521 1.1 fvdl ahd_outb(ahd, port, value & 0xFF);
522 1.1 fvdl ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
523 1.1 fvdl }
524 1.1 fvdl
525 1.1 fvdl static __inline uint32_t
526 1.1 fvdl ahd_inl(struct ahd_softc *ahd, u_int port)
527 1.1 fvdl {
528 1.1 fvdl return ((ahd_inb(ahd, port))
529 1.1 fvdl | (ahd_inb(ahd, port+1) << 8)
530 1.1 fvdl | (ahd_inb(ahd, port+2) << 16)
531 1.1 fvdl | (ahd_inb(ahd, port+3) << 24));
532 1.1 fvdl }
533 1.1 fvdl
534 1.1 fvdl static __inline void
535 1.1 fvdl ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
536 1.1 fvdl {
537 1.1 fvdl ahd_outb(ahd, port, (value) & 0xFF);
538 1.1 fvdl ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
539 1.1 fvdl ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
540 1.1 fvdl ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
541 1.1 fvdl }
542 1.1 fvdl
543 1.1 fvdl static __inline uint64_t
544 1.1 fvdl ahd_inq(struct ahd_softc *ahd, u_int port)
545 1.1 fvdl {
546 1.1 fvdl return ((ahd_inb(ahd, port))
547 1.1 fvdl | (ahd_inb(ahd, port+1) << 8)
548 1.1 fvdl | (ahd_inb(ahd, port+2) << 16)
549 1.1 fvdl | (ahd_inb(ahd, port+3) << 24)
550 1.1 fvdl | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
551 1.1 fvdl | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
552 1.1 fvdl | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
553 1.1 fvdl | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
554 1.1 fvdl }
555 1.1 fvdl
556 1.1 fvdl static __inline void
557 1.1 fvdl ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
558 1.1 fvdl {
559 1.1 fvdl ahd_outb(ahd, port, value & 0xFF);
560 1.1 fvdl ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
561 1.1 fvdl ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
562 1.1 fvdl ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
563 1.1 fvdl ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
564 1.1 fvdl ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
565 1.1 fvdl ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
566 1.1 fvdl ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
567 1.1 fvdl }
568 1.1 fvdl
569 1.1 fvdl static __inline u_int
570 1.1 fvdl ahd_get_scbptr(struct ahd_softc *ahd)
571 1.1 fvdl {
572 1.1 fvdl AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
573 1.1 fvdl ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
574 1.1 fvdl return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
575 1.1 fvdl }
576 1.1 fvdl
577 1.1 fvdl static __inline void
578 1.1 fvdl ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
579 1.1 fvdl {
580 1.1 fvdl AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
581 1.1 fvdl ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
582 1.1 fvdl ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
583 1.1 fvdl ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
584 1.1 fvdl }
585 1.1 fvdl
586 1.1 fvdl static __inline u_int
587 1.1 fvdl ahd_get_hnscb_qoff(struct ahd_softc *ahd)
588 1.1 fvdl {
589 1.1 fvdl return (ahd_inw_atomic(ahd, HNSCB_QOFF));
590 1.1 fvdl }
591 1.1 fvdl
592 1.1 fvdl static __inline void
593 1.1 fvdl ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
594 1.1 fvdl {
595 1.1 fvdl ahd_outw_atomic(ahd, HNSCB_QOFF, value);
596 1.1 fvdl }
597 1.1 fvdl
598 1.1 fvdl static __inline u_int
599 1.1 fvdl ahd_get_hescb_qoff(struct ahd_softc *ahd)
600 1.1 fvdl {
601 1.1 fvdl return (ahd_inb(ahd, HESCB_QOFF));
602 1.1 fvdl }
603 1.1 fvdl
604 1.1 fvdl static __inline void
605 1.1 fvdl ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
606 1.1 fvdl {
607 1.1 fvdl ahd_outb(ahd, HESCB_QOFF, value);
608 1.1 fvdl }
609 1.1 fvdl
610 1.1 fvdl static __inline u_int
611 1.1 fvdl ahd_get_snscb_qoff(struct ahd_softc *ahd)
612 1.1 fvdl {
613 1.1 fvdl u_int oldvalue;
614 1.1 fvdl
615 1.1 fvdl AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
616 1.1 fvdl oldvalue = ahd_inw(ahd, SNSCB_QOFF);
617 1.1 fvdl ahd_outw(ahd, SNSCB_QOFF, oldvalue);
618 1.1 fvdl return (oldvalue);
619 1.1 fvdl }
620 1.1 fvdl
621 1.1 fvdl static __inline void
622 1.1 fvdl ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
623 1.1 fvdl {
624 1.1 fvdl AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
625 1.1 fvdl ahd_outw(ahd, SNSCB_QOFF, value);
626 1.1 fvdl }
627 1.1 fvdl
628 1.1 fvdl static __inline u_int
629 1.1 fvdl ahd_get_sescb_qoff(struct ahd_softc *ahd)
630 1.1 fvdl {
631 1.1 fvdl AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
632 1.1 fvdl return (ahd_inb(ahd, SESCB_QOFF));
633 1.1 fvdl }
634 1.1 fvdl
635 1.1 fvdl static __inline void
636 1.1 fvdl ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
637 1.1 fvdl {
638 1.1 fvdl AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
639 1.1 fvdl ahd_outb(ahd, SESCB_QOFF, value);
640 1.1 fvdl }
641 1.1 fvdl
642 1.1 fvdl static __inline u_int
643 1.1 fvdl ahd_get_sdscb_qoff(struct ahd_softc *ahd)
644 1.1 fvdl {
645 1.1 fvdl AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
646 1.1 fvdl return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
647 1.1 fvdl }
648 1.1 fvdl
649 1.1 fvdl static __inline void
650 1.1 fvdl ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
651 1.1 fvdl {
652 1.1 fvdl AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
653 1.1 fvdl ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
654 1.1 fvdl ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
655 1.1 fvdl }
656 1.1 fvdl
657 1.1 fvdl static __inline u_int
658 1.1 fvdl ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
659 1.1 fvdl {
660 1.1 fvdl u_int value;
661 1.1 fvdl
662 1.1 fvdl /*
663 1.1 fvdl * Workaround PCI-X Rev A. hardware bug.
664 1.1 fvdl * After a host read of SCB memory, the chip
665 1.1 fvdl * may become confused into thinking prefetch
666 1.1 fvdl * was required. This starts the discard timer
667 1.1 fvdl * running and can cause an unexpected discard
668 1.1 fvdl * timer interrupt. The work around is to read
669 1.1 fvdl * a normal register prior to the exhaustion of
670 1.1 fvdl * the discard timer. The mode pointer register
671 1.1 fvdl * has no side effects and so serves well for
672 1.1 fvdl * this purpose.
673 1.1 fvdl *
674 1.1 fvdl * Razor #528
675 1.1 fvdl */
676 1.1 fvdl value = ahd_inb(ahd, offset);
677 1.1 fvdl if ((ahd->flags & AHD_PCIX_SCBRAM_RD_BUG) != 0)
678 1.1 fvdl ahd_inb(ahd, MODE_PTR);
679 1.1 fvdl return (value);
680 1.1 fvdl }
681 1.1 fvdl
682 1.1 fvdl static __inline u_int
683 1.1 fvdl ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
684 1.1 fvdl {
685 1.1 fvdl return (ahd_inb_scbram(ahd, offset)
686 1.1 fvdl | (ahd_inb_scbram(ahd, offset+1) << 8));
687 1.1 fvdl }
688 1.1 fvdl
689 1.1 fvdl static __inline uint32_t
690 1.1 fvdl ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
691 1.1 fvdl {
692 1.1 fvdl return (ahd_inb_scbram(ahd, offset)
693 1.1 fvdl | (ahd_inb_scbram(ahd, offset+1) << 8)
694 1.1 fvdl | (ahd_inb_scbram(ahd, offset+2) << 16)
695 1.1 fvdl | (ahd_inb_scbram(ahd, offset+3) << 24));
696 1.1 fvdl }
697 1.1 fvdl
698 1.1 fvdl static __inline struct scb *
699 1.1 fvdl ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
700 1.1 fvdl {
701 1.1 fvdl struct scb* scb;
702 1.1 fvdl
703 1.1 fvdl if (tag >= AHD_SCB_MAX)
704 1.1 fvdl return (NULL);
705 1.1 fvdl scb = ahd->scb_data.scbindex[tag];
706 1.1 fvdl if (scb != NULL)
707 1.1 fvdl ahd_sync_scb(ahd, scb,
708 1.1 fvdl BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
709 1.1 fvdl return (scb);
710 1.1 fvdl }
711 1.1 fvdl
712 1.1 fvdl static __inline void
713 1.1 fvdl ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
714 1.1 fvdl {
715 1.1 fvdl struct hardware_scb *q_hscb;
716 1.4 thorpej struct map_node *q_hscb_map;
717 1.1 fvdl uint32_t saved_hscb_busaddr;
718 1.1 fvdl
719 1.1 fvdl /*
720 1.1 fvdl * Our queuing method is a bit tricky. The card
721 1.1 fvdl * knows in advance which HSCB (by address) to download,
722 1.1 fvdl * and we can't disappoint it. To achieve this, the next
723 1.1 fvdl * HSCB to download is saved off in ahd->next_queued_hscb.
724 1.1 fvdl * When we are called to queue "an arbitrary scb",
725 1.1 fvdl * we copy the contents of the incoming HSCB to the one
726 1.1 fvdl * the sequencer knows about, swap HSCB pointers and
727 1.1 fvdl * finally assign the SCB to the tag indexed location
728 1.1 fvdl * in the scb_array. This makes sure that we can still
729 1.1 fvdl * locate the correct SCB by SCB_TAG.
730 1.1 fvdl */
731 1.1 fvdl q_hscb = ahd->next_queued_hscb;
732 1.4 thorpej q_hscb_map = ahd->next_queued_hscb_map;
733 1.1 fvdl saved_hscb_busaddr = q_hscb->hscb_busaddr;
734 1.1 fvdl memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
735 1.1 fvdl q_hscb->hscb_busaddr = saved_hscb_busaddr;
736 1.1 fvdl q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
737 1.1 fvdl
738 1.1 fvdl /* Now swap HSCB pointers. */
739 1.1 fvdl ahd->next_queued_hscb = scb->hscb;
740 1.4 thorpej ahd->next_queued_hscb_map = scb->hscb_map;
741 1.1 fvdl scb->hscb = q_hscb;
742 1.4 thorpej scb->hscb_map = q_hscb_map;
743 1.4 thorpej
744 1.4 thorpej KASSERT((vaddr_t)scb->hscb >= (vaddr_t)scb->hscb_map->vaddr &&
745 1.4 thorpej (vaddr_t)scb->hscb < (vaddr_t)scb->hscb_map->vaddr + PAGE_SIZE);
746 1.1 fvdl
747 1.1 fvdl /* Now define the mapping from tag to SCB in the scbindex */
748 1.1 fvdl ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
749 1.1 fvdl }
750 1.1 fvdl
751 1.1 fvdl /*
752 1.1 fvdl * Tell the sequencer about a new transaction to execute.
753 1.1 fvdl */
754 1.1 fvdl static __inline void
755 1.1 fvdl ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
756 1.1 fvdl {
757 1.1 fvdl ahd_swap_with_next_hscb(ahd, scb);
758 1.1 fvdl
759 1.1 fvdl if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
760 1.1 fvdl panic("Attempt to queue invalid SCB tag %x\n",
761 1.1 fvdl SCB_GET_TAG(scb));
762 1.1 fvdl
763 1.1 fvdl /*
764 1.1 fvdl * Keep a history of SCBs we've downloaded in the qinfifo.
765 1.1 fvdl */
766 1.1 fvdl ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
767 1.1 fvdl ahd->qinfifonext++;
768 1.1 fvdl
769 1.1 fvdl if (scb->sg_count != 0)
770 1.1 fvdl ahd_setup_data_scb(ahd, scb);
771 1.1 fvdl else
772 1.1 fvdl ahd_setup_noxfer_scb(ahd, scb);
773 1.1 fvdl ahd_setup_scb_common(ahd, scb);
774 1.1 fvdl
775 1.1 fvdl /*
776 1.1 fvdl * Make sure our data is consistent from the
777 1.1 fvdl * perspective of the adapter.
778 1.1 fvdl */
779 1.1 fvdl ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
780 1.1 fvdl
781 1.1 fvdl #ifdef AHD_DEBUG
782 1.1 fvdl if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
783 1.5 thorpej uint64_t host_dataptr;
784 1.5 thorpej
785 1.5 thorpej host_dataptr = ahd_le64toh(scb->hscb->dataptr);
786 1.1 fvdl printf("%s: Queueing SCB 0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
787 1.1 fvdl ahd_name(ahd),
788 1.5 thorpej SCB_GET_TAG(scb), ahd_le32toh(scb->hscb->hscb_busaddr),
789 1.5 thorpej (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
790 1.5 thorpej (u_int)(host_dataptr & 0xFFFFFFFF),
791 1.5 thorpej ahd_le32toh(scb->hscb->datacnt));
792 1.1 fvdl }
793 1.1 fvdl #endif
794 1.1 fvdl /* Tell the adapter about the newly queued SCB */
795 1.1 fvdl ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
796 1.1 fvdl }
797 1.1 fvdl
798 1.1 fvdl static __inline uint8_t *
799 1.1 fvdl ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb)
800 1.1 fvdl {
801 1.1 fvdl return (scb->sense_data);
802 1.1 fvdl }
803 1.1 fvdl
804 1.1 fvdl static __inline uint32_t
805 1.1 fvdl ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb)
806 1.1 fvdl {
807 1.1 fvdl return (scb->sense_busaddr);
808 1.1 fvdl }
809 1.1 fvdl
810 1.1 fvdl /************************** Interrupt Processing ******************************/
811 1.3 itojun static __inline void ahd_sync_qoutfifo(struct ahd_softc *, int);
812 1.3 itojun static __inline void ahd_sync_tqinfifo(struct ahd_softc *, int);
813 1.3 itojun static __inline u_int ahd_check_cmdcmpltqueues(struct ahd_softc *);
814 1.3 itojun static __inline int ahd_intr(void *);
815 1.3 itojun static __inline void ahd_minphys(struct buf *);
816 1.1 fvdl
817 1.1 fvdl static __inline void
818 1.1 fvdl ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
819 1.1 fvdl {
820 1.4 thorpej ahd_dmamap_sync(ahd, ahd->parent_dmat, ahd->shared_data_map.dmamap,
821 1.1 fvdl /*offset*/0, /*len*/AHD_SCB_MAX * sizeof(uint16_t), op);
822 1.1 fvdl }
823 1.1 fvdl
824 1.1 fvdl static __inline void
825 1.1 fvdl ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
826 1.1 fvdl {
827 1.1 fvdl #ifdef AHD_TARGET_MODE
828 1.1 fvdl if ((ahd->flags & AHD_TARGETROLE) != 0) {
829 1.1 fvdl ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/,
830 1.4 thorpej ahd->shared_data_map.dmamap,
831 1.1 fvdl ahd_targetcmd_offset(ahd, 0),
832 1.1 fvdl sizeof(struct target_cmd) * AHD_TMODE_CMDS,
833 1.1 fvdl op);
834 1.1 fvdl }
835 1.1 fvdl #endif
836 1.1 fvdl }
837 1.1 fvdl
838 1.1 fvdl /*
839 1.1 fvdl * See if the firmware has posted any completed commands
840 1.1 fvdl * into our in-core command complete fifos.
841 1.1 fvdl */
842 1.1 fvdl #define AHD_RUN_QOUTFIFO 0x1
843 1.1 fvdl #define AHD_RUN_TQINFIFO 0x2
844 1.1 fvdl static __inline u_int
845 1.1 fvdl ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
846 1.1 fvdl {
847 1.1 fvdl u_int retval;
848 1.1 fvdl
849 1.1 fvdl retval = 0;
850 1.4 thorpej ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/, ahd->shared_data_map.dmamap,
851 1.1 fvdl /*offset*/ahd->qoutfifonext, /*len*/2,
852 1.1 fvdl BUS_DMASYNC_POSTREAD);
853 1.1 fvdl if ((ahd->qoutfifo[ahd->qoutfifonext]
854 1.1 fvdl & QOUTFIFO_ENTRY_VALID_LE) == ahd->qoutfifonext_valid_tag)
855 1.1 fvdl retval |= AHD_RUN_QOUTFIFO;
856 1.1 fvdl #ifdef AHD_TARGET_MODE
857 1.1 fvdl if ((ahd->flags & AHD_TARGETROLE) != 0
858 1.1 fvdl && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
859 1.1 fvdl ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/,
860 1.4 thorpej ahd->shared_data_map.dmamap,
861 1.1 fvdl ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
862 1.1 fvdl /*len*/sizeof(struct target_cmd),
863 1.1 fvdl BUS_DMASYNC_POSTREAD);
864 1.1 fvdl if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
865 1.1 fvdl retval |= AHD_RUN_TQINFIFO;
866 1.1 fvdl }
867 1.1 fvdl #endif
868 1.1 fvdl return (retval);
869 1.1 fvdl }
870 1.1 fvdl
871 1.1 fvdl /*
872 1.1 fvdl * Catch an interrupt from the adapter
873 1.1 fvdl */
874 1.1 fvdl static __inline int
875 1.1 fvdl ahd_intr(void *arg)
876 1.1 fvdl {
877 1.1 fvdl struct ahd_softc *ahd = (struct ahd_softc*)arg;
878 1.1 fvdl u_int intstat;
879 1.1 fvdl
880 1.1 fvdl if ((ahd->pause & INTEN) == 0) {
881 1.1 fvdl /*
882 1.1 fvdl * Our interrupt is not enabled on the chip
883 1.1 fvdl * and may be disabled for re-entrancy reasons,
884 1.1 fvdl * so just return. This is likely just a shared
885 1.1 fvdl * interrupt.
886 1.1 fvdl */
887 1.5 thorpej return (0);
888 1.1 fvdl }
889 1.1 fvdl
890 1.1 fvdl /*
891 1.1 fvdl * Instead of directly reading the interrupt status register,
892 1.1 fvdl * infer the cause of the interrupt by checking our in-core
893 1.1 fvdl * completion queues. This avoids a costly PCI bus read in
894 1.1 fvdl * most cases.
895 1.1 fvdl */
896 1.1 fvdl if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
897 1.1 fvdl && (ahd_check_cmdcmpltqueues(ahd) != 0))
898 1.1 fvdl intstat = CMDCMPLT;
899 1.1 fvdl else
900 1.1 fvdl intstat = ahd_inb(ahd, INTSTAT);
901 1.1 fvdl
902 1.5 thorpej if ((intstat & INT_PEND) == 0)
903 1.5 thorpej return (0);
904 1.5 thorpej
905 1.1 fvdl if (intstat & CMDCMPLT) {
906 1.1 fvdl ahd_outb(ahd, CLRINT, CLRCMDINT);
907 1.1 fvdl
908 1.1 fvdl /*
909 1.1 fvdl * Ensure that the chip sees that we've cleared
910 1.1 fvdl * this interrupt before we walk the output fifo.
911 1.1 fvdl * Otherwise, we may, due to posted bus writes,
912 1.1 fvdl * clear the interrupt after we finish the scan,
913 1.1 fvdl * and after the sequencer has added new entries
914 1.1 fvdl * and asserted the interrupt again.
915 1.1 fvdl */
916 1.1 fvdl if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
917 1.1 fvdl if (ahd_is_paused(ahd)) {
918 1.1 fvdl /*
919 1.1 fvdl * Potentially lost SEQINT.
920 1.1 fvdl * If SEQINTCODE is non-zero,
921 1.1 fvdl * simulate the SEQINT.
922 1.1 fvdl */
923 1.1 fvdl if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
924 1.1 fvdl intstat |= SEQINT;
925 1.1 fvdl }
926 1.1 fvdl } else {
927 1.1 fvdl ahd_flush_device_writes(ahd);
928 1.1 fvdl }
929 1.1 fvdl scsipi_channel_freeze(&ahd->sc_channel, 1);
930 1.1 fvdl ahd_run_qoutfifo(ahd);
931 1.1 fvdl scsipi_channel_thaw(&ahd->sc_channel, 1);
932 1.1 fvdl ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
933 1.1 fvdl ahd->cmdcmplt_total++;
934 1.1 fvdl #ifdef AHD_TARGET_MODE
935 1.1 fvdl if ((ahd->flags & AHD_TARGETROLE) != 0)
936 1.1 fvdl ahd_run_tqinfifo(ahd, /*paused*/FALSE);
937 1.1 fvdl #endif
938 1.1 fvdl if (intstat == CMDCMPLT)
939 1.1 fvdl return 1;
940 1.1 fvdl }
941 1.1 fvdl
942 1.5 thorpej /*
943 1.5 thorpej * Handle statuses that may invalidate our cached
944 1.5 thorpej * copy of INTSTAT separately.
945 1.5 thorpej */
946 1.5 thorpej if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
947 1.5 thorpej /* Hot eject. Do nothing */
948 1.5 thorpej } else if (intstat & HWERRINT) {
949 1.1 fvdl ahd_handle_hwerrint(ahd);
950 1.5 thorpej } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
951 1.1 fvdl ahd->bus_intr(ahd);
952 1.5 thorpej } else {
953 1.1 fvdl
954 1.5 thorpej if ((intstat & SEQINT) != 0)
955 1.5 thorpej ahd_handle_seqint(ahd, intstat);
956 1.1 fvdl
957 1.5 thorpej if ((intstat & SCSIINT) != 0)
958 1.5 thorpej ahd_handle_scsiint(ahd, intstat);
959 1.1 fvdl }
960 1.1 fvdl
961 1.5 thorpej return (1);
962 1.1 fvdl }
963 1.1 fvdl
964 1.1 fvdl static __inline void
965 1.1 fvdl ahd_minphys(bp)
966 1.1 fvdl struct buf *bp;
967 1.1 fvdl {
968 1.1 fvdl /*
969 1.1 fvdl * Even though the card can transfer up to 16megs per command
970 1.2 wiz * we are limited by the number of segments in the DMA segment
971 1.1 fvdl * list that we can hold. The worst case is that all pages are
972 1.1 fvdl * discontinuous physically, hense the "page per segment" limit
973 1.1 fvdl * enforced here.
974 1.1 fvdl */
975 1.1 fvdl if (bp->b_bcount > AHD_MAXTRANSFER_SIZE) {
976 1.1 fvdl bp->b_bcount = AHD_MAXTRANSFER_SIZE;
977 1.1 fvdl }
978 1.1 fvdl minphys(bp);
979 1.1 fvdl }
980 1.1 fvdl
981 1.3 itojun static __inline u_int32_t scsi_4btoul(u_int8_t *);
982 1.1 fvdl
983 1.1 fvdl static __inline u_int32_t
984 1.1 fvdl scsi_4btoul(u_int8_t *bytes)
985 1.1 fvdl {
986 1.1 fvdl u_int32_t rv;
987 1.1 fvdl
988 1.1 fvdl rv = (bytes[0] << 24) |
989 1.1 fvdl (bytes[1] << 16) |
990 1.1 fvdl (bytes[2] << 8) |
991 1.1 fvdl bytes[3];
992 1.1 fvdl return (rv);
993 1.1 fvdl }
994 1.1 fvdl
995 1.1 fvdl
996 1.1 fvdl #endif /* _AIC79XX_INLINE_H_ */
997