fwohci.c revision 1.69 1 /* $NetBSD: fwohci.c,v 1.69 2003/01/01 00:10:19 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * IEEE1394 Open Host Controller Interface
41 * based on OHCI Specification 1.1 (January 6, 2000)
42 * The first version to support network interface part is wrtten by
43 * Atsushi Onoe <onoe (at) netbsd.org>.
44 */
45
46 /*
47 * The first version to support isochronous acquisition part is wrtten
48 * by HAYAKAWA Koichi <haya (at) netbsd.org>.
49 */
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: fwohci.c,v 1.69 2003/01/01 00:10:19 thorpej Exp $");
53
54 #define FWOHCI_WAIT_DEBUG 1
55
56 #define FWOHCI_IT_BUFNUM 4
57
58 #include "opt_inet.h"
59 #include "fwiso.h"
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kthread.h>
64 #include <sys/socket.h>
65 #include <sys/callout.h>
66 #include <sys/device.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/mbuf.h>
70 #include <sys/poll.h>
71 #include <sys/select.h>
72
73 #if __NetBSD_Version__ >= 105010000
74 #include <uvm/uvm_extern.h>
75 #else
76 #include <vm/vm.h>
77 #endif
78
79 #include <machine/bus.h>
80 #include <machine/intr.h>
81
82 #include <dev/ieee1394/ieee1394reg.h>
83 #include <dev/ieee1394/fwohcireg.h>
84
85 #include <dev/ieee1394/ieee1394var.h>
86 #include <dev/ieee1394/fwohcivar.h>
87 #include <dev/ieee1394/fwisovar.h>
88
89 static const char * const ieee1394_speeds[] = { IEEE1394_SPD_STRINGS };
90
91 #if 0
92 static int fwohci_dnamem_alloc(struct fwohci_softc *sc, int size,
93 int alignment, bus_dmamap_t *mapp, caddr_t *kvap, int flags);
94 #endif
95 static void fwohci_create_event_thread(void *);
96 static void fwohci_thread_init(void *);
97
98 static void fwohci_event_thread(struct fwohci_softc *);
99 static void fwohci_hw_init(struct fwohci_softc *);
100 static void fwohci_power(int, void *);
101 static void fwohci_shutdown(void *);
102
103 static int fwohci_desc_alloc(struct fwohci_softc *);
104 static struct fwohci_desc *fwohci_desc_get(struct fwohci_softc *, int);
105 static void fwohci_desc_put(struct fwohci_softc *, struct fwohci_desc *, int);
106
107 static int fwohci_ctx_alloc(struct fwohci_softc *, struct fwohci_ctx **,
108 int, int, int);
109 static void fwohci_ctx_free(struct fwohci_softc *, struct fwohci_ctx *);
110 static void fwohci_ctx_init(struct fwohci_softc *, struct fwohci_ctx *);
111
112 static int fwohci_misc_dmabuf_alloc(bus_dma_tag_t, int, int,
113 bus_dma_segment_t *, bus_dmamap_t *, void **, const char *);
114 static void fwohci_misc_dmabuf_free(bus_dma_tag_t, int, int,
115 bus_dma_segment_t *, bus_dmamap_t *, caddr_t);
116
117 static struct fwohci_ir_ctx *fwohci_ir_ctx_construct(struct fwohci_softc *,
118 int, int, int, int, int, int);
119 static void fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *);
120
121 static int fwohci_ir_buf_setup(struct fwohci_ir_ctx *);
122 static int fwohci_ir_init(struct fwohci_ir_ctx *);
123 static int fwohci_ir_start(struct fwohci_ir_ctx *);
124 static void fwohci_ir_intr(struct fwohci_softc *, struct fwohci_ir_ctx *);
125 static int fwohci_ir_stop(struct fwohci_ir_ctx *);
126 static int fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *);
127 #ifdef USEDRAIN
128 static int fwohci_ir_ctx_drain(struct fwohci_ir_ctx *);
129 #endif /* USEDRAIN */
130
131 static int fwohci_it_desc_alloc(struct fwohci_it_ctx *);
132 static void fwohci_it_desc_free(struct fwohci_it_ctx *itc);
133 struct fwohci_it_ctx *fwohci_it_ctx_construct(struct fwohci_softc *,
134 int, int, int, int);
135 void fwohci_it_ctx_destruct(struct fwohci_it_ctx *);
136 int fwohci_it_ctx_writedata(ieee1394_it_tag_t, int,
137 struct ieee1394_it_datalist *, int);
138 static void fwohci_it_ctx_run(struct fwohci_it_ctx *);
139 int fwohci_it_ctx_flush(ieee1394_it_tag_t);
140 static void fwohci_it_intr(struct fwohci_softc *, struct fwohci_it_ctx *);
141
142 int fwohci_itd_construct(struct fwohci_it_ctx *, struct fwohci_it_dmabuf *,
143 int, struct fwohci_desc *, bus_addr_t, int, int, paddr_t);
144 void fwohci_itd_destruct(struct fwohci_it_dmabuf *);
145 static int fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *);
146 static void fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *);
147 int fwohci_itd_link(struct fwohci_it_dmabuf *, struct fwohci_it_dmabuf *);
148 int fwohci_itd_unlink(struct fwohci_it_dmabuf *);
149 int fwohci_itd_writedata(struct fwohci_it_dmabuf *, int,
150 struct ieee1394_it_datalist *);
151 int fwohci_itd_isfilled(struct fwohci_it_dmabuf *);
152
153 static int fwohci_buf_alloc(struct fwohci_softc *, struct fwohci_buf *);
154 static void fwohci_buf_free(struct fwohci_softc *, struct fwohci_buf *);
155 static void fwohci_buf_init_rx(struct fwohci_softc *);
156 static void fwohci_buf_start_rx(struct fwohci_softc *);
157 static void fwohci_buf_stop_tx(struct fwohci_softc *);
158 static void fwohci_buf_stop_rx(struct fwohci_softc *);
159 static void fwohci_buf_next(struct fwohci_softc *, struct fwohci_ctx *);
160 static int fwohci_buf_pktget(struct fwohci_softc *, struct fwohci_buf **,
161 caddr_t *, int);
162 static int fwohci_buf_input(struct fwohci_softc *, struct fwohci_ctx *,
163 struct fwohci_pkt *);
164 static int fwohci_buf_input_ppb(struct fwohci_softc *, struct fwohci_ctx *,
165 struct fwohci_pkt *);
166
167 static u_int8_t fwohci_phy_read(struct fwohci_softc *, u_int8_t);
168 static void fwohci_phy_write(struct fwohci_softc *, u_int8_t, u_int8_t);
169 static void fwohci_phy_busreset(struct fwohci_softc *);
170 static void fwohci_phy_input(struct fwohci_softc *, struct fwohci_pkt *);
171
172 static int fwohci_handler_set(struct fwohci_softc *, int, u_int32_t, u_int32_t,
173 u_int32_t, int (*)(struct fwohci_softc *, void *, struct fwohci_pkt *),
174 void *);
175
176 ieee1394_ir_tag_t fwohci_ir_ctx_set(struct device *, int, int, int, int, int);
177 int fwohci_ir_ctx_clear(struct device *, ieee1394_ir_tag_t);
178 int fwohci_ir_read(struct device *, ieee1394_ir_tag_t, struct uio *,
179 int, int);
180 int fwohci_ir_wait(struct device *, ieee1394_ir_tag_t, void *, char *name);
181 int fwohci_ir_select(struct device *, ieee1394_ir_tag_t, struct proc *);
182
183
184
185 ieee1394_it_tag_t fwohci_it_set(struct ieee1394_softc *, int, int);
186 static ieee1394_it_tag_t fwohci_it_ctx_set(struct fwohci_softc *, int, int, int);
187 int fwohci_it_ctx_clear(ieee1394_it_tag_t *);
188
189 static void fwohci_arrq_input(struct fwohci_softc *, struct fwohci_ctx *);
190 static void fwohci_arrs_input(struct fwohci_softc *, struct fwohci_ctx *);
191 static void fwohci_as_input(struct fwohci_softc *, struct fwohci_ctx *);
192
193 static int fwohci_at_output(struct fwohci_softc *, struct fwohci_ctx *,
194 struct fwohci_pkt *);
195 static void fwohci_at_done(struct fwohci_softc *, struct fwohci_ctx *, int);
196 static void fwohci_atrs_output(struct fwohci_softc *, int, struct fwohci_pkt *,
197 struct fwohci_pkt *);
198
199 static int fwohci_guidrom_init(struct fwohci_softc *);
200 static void fwohci_configrom_init(struct fwohci_softc *);
201 static int fwohci_configrom_input(struct fwohci_softc *, void *,
202 struct fwohci_pkt *);
203 static void fwohci_selfid_init(struct fwohci_softc *);
204 static int fwohci_selfid_input(struct fwohci_softc *);
205
206 static void fwohci_csr_init(struct fwohci_softc *);
207 static int fwohci_csr_input(struct fwohci_softc *, void *,
208 struct fwohci_pkt *);
209
210 static void fwohci_uid_collect(struct fwohci_softc *);
211 static void fwohci_uid_req(struct fwohci_softc *, int);
212 static int fwohci_uid_input(struct fwohci_softc *, void *,
213 struct fwohci_pkt *);
214 static int fwohci_uid_lookup(struct fwohci_softc *, const u_int8_t *);
215 static void fwohci_check_nodes(struct fwohci_softc *);
216
217 static int fwohci_if_inreg(struct device *, u_int32_t, u_int32_t,
218 void (*)(struct device *, struct mbuf *));
219 static int fwohci_if_input(struct fwohci_softc *, void *, struct fwohci_pkt *);
220 static int fwohci_if_input_iso(struct fwohci_softc *, void *, struct fwohci_pkt *);
221
222 static int fwohci_if_output(struct device *, struct mbuf *,
223 void (*)(struct device *, struct mbuf *));
224 static int fwohci_if_setiso(struct device *, u_int32_t, u_int32_t, u_int32_t,
225 void (*)(struct device *, struct mbuf *));
226 static int fwohci_read(struct ieee1394_abuf *);
227 static int fwohci_write(struct ieee1394_abuf *);
228 static int fwohci_read_resp(struct fwohci_softc *, void *, struct fwohci_pkt *);
229 static int fwohci_write_ack(struct fwohci_softc *, void *, struct fwohci_pkt *);
230 static int fwohci_read_multi_resp(struct fwohci_softc *, void *,
231 struct fwohci_pkt *);
232 static int fwohci_inreg(struct ieee1394_abuf *, int);
233 static int fwohci_unreg(struct ieee1394_abuf *, int);
234 static int fwohci_parse_input(struct fwohci_softc *, void *,
235 struct fwohci_pkt *);
236 static int fwohci_submatch(struct device *, struct cfdata *, void *);
237
238 /* XXX */
239 u_int16_t fwohci_cycletimer(struct fwohci_softc *);
240 u_int16_t fwohci_it_cycletimer(ieee1394_it_tag_t);
241
242 #ifdef FW_DEBUG
243 static void fwohci_show_intr(struct fwohci_softc *, u_int32_t);
244 static void fwohci_show_phypkt(struct fwohci_softc *, u_int32_t);
245
246 /* 1 is normal debug, 2 is verbose debug, 3 is complete (packet dumps). */
247
248 #define DPRINTF(x) if (fwdebug) printf x
249 #define DPRINTFN(n,x) if (fwdebug>(n)) printf x
250 int fwdebug = 1;
251 #else
252 #define DPRINTF(x)
253 #define DPRINTFN(n,x)
254 #endif
255
256 #define OHCI_ITHEADER_SPD_MASK 0x00070000
257 #define OHCI_ITHEADER_SPD_BITPOS 16
258 #define OHCI_ITHEADER_TAG_MASK 0x0000c000
259 #define OHCI_ITHEADER_TAG_BITPOS 14
260 #define OHCI_ITHEADER_CHAN_MASK 0x00003f00
261 #define OHCI_ITHEADER_CHAN_BITPOS 8
262 #define OHCI_ITHEADER_TCODE_MASK 0x000000f0
263 #define OHCI_ITHEADER_TCODE_BITPOS 4
264 #define OHCI_ITHEADER_SY_MASK 0x0000000f
265 #define OHCI_ITHEADER_SY_BITPOS 0
266
267 #define OHCI_ITHEADER_VAL(fld, val) \
268 (OHCI_ITHEADER_##fld##_MASK & ((val) << OHCI_ITHEADER_##fld##_BITPOS))
269
270 int
271 fwohci_init(struct fwohci_softc *sc, const struct evcnt *ev)
272 {
273 int i;
274 u_int32_t val;
275 #if 0
276 int error;
277 #endif
278
279 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ev,
280 sc->sc_sc1394.sc1394_dev.dv_xname, "intr");
281
282 evcnt_attach_dynamic(&sc->sc_isocnt, EVCNT_TYPE_MISC, ev,
283 sc->sc_sc1394.sc1394_dev.dv_xname, "isorcvs");
284 evcnt_attach_dynamic(&sc->sc_ascnt, EVCNT_TYPE_MISC, ev,
285 sc->sc_sc1394.sc1394_dev.dv_xname, "asrcvs");
286 evcnt_attach_dynamic(&sc->sc_itintrcnt, EVCNT_TYPE_INTR, ev,
287 sc->sc_sc1394.sc1394_dev.dv_xname, "itintr");
288
289 /*
290 * Wait for reset completion
291 */
292 for (i = 0; i < OHCI_LOOP; i++) {
293 val = OHCI_CSR_READ(sc, OHCI_REG_HCControlClear);
294 if ((val & OHCI_HCControl_SoftReset) == 0)
295 break;
296 DELAY(10);
297 }
298
299 /* What dialect of OHCI is this device?
300 */
301 val = OHCI_CSR_READ(sc, OHCI_REG_Version);
302 printf("%s: OHCI %u.%u", sc->sc_sc1394.sc1394_dev.dv_xname,
303 OHCI_Version_GET_Version(val), OHCI_Version_GET_Revision(val));
304
305 LIST_INIT(&sc->sc_nodelist);
306
307 if (fwohci_guidrom_init(sc) != 0) {
308 printf("\n%s: fatal: no global UID ROM\n",
309 sc->sc_sc1394.sc1394_dev.dv_xname);
310 return -1;
311 }
312
313 printf(", %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
314 sc->sc_sc1394.sc1394_guid[0], sc->sc_sc1394.sc1394_guid[1],
315 sc->sc_sc1394.sc1394_guid[2], sc->sc_sc1394.sc1394_guid[3],
316 sc->sc_sc1394.sc1394_guid[4], sc->sc_sc1394.sc1394_guid[5],
317 sc->sc_sc1394.sc1394_guid[6], sc->sc_sc1394.sc1394_guid[7]);
318
319 /* Get the maximum link speed and receive size
320 */
321 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions);
322 sc->sc_sc1394.sc1394_link_speed =
323 OHCI_BITVAL(val, OHCI_BusOptions_LinkSpd);
324 if (sc->sc_sc1394.sc1394_link_speed < IEEE1394_SPD_MAX) {
325 printf(", %s",
326 ieee1394_speeds[sc->sc_sc1394.sc1394_link_speed]);
327 } else {
328 printf(", unknown speed %u", sc->sc_sc1394.sc1394_link_speed);
329 }
330
331 /* MaxRec is encoded as log2(max_rec_octets)-1
332 */
333 sc->sc_sc1394.sc1394_max_receive =
334 1 << (OHCI_BITVAL(val, OHCI_BusOptions_MaxRec) + 1);
335 printf(", %u max_rec", sc->sc_sc1394.sc1394_max_receive);
336
337 /*
338 * Count how many isochronous receive ctx we have.
339 */
340 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskSet, ~0);
341 val = OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntMaskClear);
342 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskClear, ~0);
343 for (i = 0; val != 0; val >>= 1) {
344 if (val & 0x1)
345 i++;
346 }
347 sc->sc_isoctx = i;
348 printf(", %d ir_ctx", sc->sc_isoctx);
349
350 /*
351 * Count how many isochronous transmit ctx we have.
352 */
353 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskSet, ~0);
354 val = OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntMaskClear);
355 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskClear, ~0);
356 for (i = 0; val != 0; val >>= 1) {
357 if (val & 0x1) {
358 i++;
359 OHCI_SYNC_TX_DMA_WRITE(sc, i,OHCI_SUBREG_CommandPtr,0);
360 }
361 }
362 sc->sc_itctx = i;
363
364 printf(", %d it_ctx", sc->sc_itctx);
365
366 printf("\n");
367
368 #if 0
369 error = fwohci_dnamem_alloc(sc, OHCI_CONFIG_SIZE,
370 OHCI_CONFIG_ALIGNMENT, &sc->sc_configrom_map,
371 (caddr_t *) &sc->sc_configrom, BUS_DMA_WAITOK|BUS_DMA_COHERENT);
372 return error;
373 #endif
374
375 sc->sc_dying = 0;
376 sc->sc_nodeid = 0xffff; /* invalid */
377
378 sc->sc_sc1394.sc1394_callback.sc1394_read = fwohci_read;
379 sc->sc_sc1394.sc1394_callback.sc1394_write = fwohci_write;
380 sc->sc_sc1394.sc1394_callback.sc1394_inreg = fwohci_inreg;
381 sc->sc_sc1394.sc1394_callback.sc1394_unreg = fwohci_unreg;
382
383 kthread_create(fwohci_create_event_thread, sc);
384 return 0;
385 }
386
387 static int
388 fwohci_if_setiso(struct device *self, u_int32_t channel, u_int32_t tag,
389 u_int32_t direction, void (*handler)(struct device *, struct mbuf *))
390 {
391 struct fwohci_softc *sc = (struct fwohci_softc *)self;
392 int retval;
393 int s;
394
395 if (direction == 1) {
396 return EIO;
397 }
398
399 s = splnet();
400 retval = fwohci_handler_set(sc, IEEE1394_TCODE_STREAM_DATA,
401 channel, 1 << tag, 0, fwohci_if_input_iso, handler);
402 splx(s);
403
404 if (!retval) {
405 printf("%s: dummy iso handler set\n",
406 sc->sc_sc1394.sc1394_dev.dv_xname);
407 } else {
408 printf("%s: dummy iso handler cannot set\n",
409 sc->sc_sc1394.sc1394_dev.dv_xname);
410 }
411
412 return retval;
413 }
414
415 int
416 fwohci_intr(void *arg)
417 {
418 struct fwohci_softc * const sc = arg;
419 int progress = 0;
420 u_int32_t intmask, iso;
421
422 for (;;) {
423 intmask = OHCI_CSR_READ(sc, OHCI_REG_IntEventClear);
424
425 /*
426 * On a bus reset, everything except bus reset gets
427 * cleared. That can't get cleared until the selfid
428 * phase completes (which happens outside the
429 * interrupt routines). So if just a bus reset is left
430 * in the mask and it's already in the sc_intmask,
431 * just return.
432 */
433
434 if ((intmask == 0) ||
435 (progress && (intmask == OHCI_Int_BusReset) &&
436 (sc->sc_intmask & OHCI_Int_BusReset))) {
437 if (progress)
438 wakeup(fwohci_event_thread);
439 return progress;
440 }
441 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear,
442 intmask & ~OHCI_Int_BusReset);
443 #ifdef FW_DEBUG
444 if (fwdebug > 1)
445 fwohci_show_intr(sc, intmask);
446 #endif
447
448 if (intmask & OHCI_Int_BusReset) {
449 /*
450 * According to OHCI spec 6.1.1 "busReset",
451 * All asynchronous transmit must be stopped before
452 * clearing BusReset. Moreover, the BusReset
453 * interrupt bit should not be cleared during the
454 * SelfID phase. Thus we turned off interrupt mask
455 * bit of BusReset instead until SelfID completion
456 * or SelfID timeout.
457 */
458 intmask &= OHCI_Int_SelfIDComplete;
459 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear,
460 OHCI_Int_BusReset);
461 sc->sc_intmask = OHCI_Int_BusReset;
462 }
463 sc->sc_intmask |= intmask;
464
465 if (intmask & OHCI_Int_IsochTx) {
466 int i;
467
468 iso = OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntEventClear);
469 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntEventClear, iso);
470
471 sc->sc_itintrcnt.ev_count++;
472 for (i = 0; i < sc->sc_itctx; ++i) {
473 if ((iso & (1<<i)) == 0 ||
474 sc->sc_ctx_it[i] == NULL) {
475 continue;
476 }
477
478 fwohci_it_intr(sc, sc->sc_ctx_it[i]);
479 }
480 }
481 if (intmask & OHCI_Int_IsochRx) {
482 int i;
483
484 iso = OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntEventClear);
485 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear, iso);
486
487 for (i = 0; i < sc->sc_isoctx; i++) {
488 if ((iso & (1 << i))
489 && sc->sc_ctx_ir[i] != NULL) {
490 iso &= ~(1 << i);
491 fwohci_ir_intr(sc, sc->sc_ctx_ir[i]);
492 }
493 }
494
495 if (iso == 0) {
496 sc->sc_intmask &= ~OHCI_Int_IsochRx;
497 }
498 sc->sc_iso |= iso;
499 }
500
501 if (!progress) {
502 sc->sc_intrcnt.ev_count++;
503 progress = 1;
504 }
505 }
506 }
507
508 static void
509 fwohci_create_event_thread(void *arg)
510 {
511 struct fwohci_softc *sc = arg;
512
513 if (kthread_create1(fwohci_thread_init, sc, &sc->sc_event_thread, "%s",
514 sc->sc_sc1394.sc1394_dev.dv_xname)) {
515 printf("%s: unable to create event thread\n",
516 sc->sc_sc1394.sc1394_dev.dv_xname);
517 panic("fwohci_create_event_thread");
518 }
519 }
520
521 static void
522 fwohci_thread_init(void *arg)
523 {
524 struct fwohci_softc *sc = arg;
525 int i;
526
527 /*
528 * Allocate descriptors
529 */
530 if (fwohci_desc_alloc(sc)) {
531 printf("%s: not enabling interrupts\n",
532 sc->sc_sc1394.sc1394_dev.dv_xname);
533 kthread_exit(1);
534 }
535
536 /*
537 * Enable Link Power
538 */
539
540 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LPS);
541
542 /*
543 * Allocate DMA Context
544 */
545 fwohci_ctx_alloc(sc, &sc->sc_ctx_arrq, OHCI_BUF_ARRQ_CNT,
546 OHCI_CTX_ASYNC_RX_REQUEST, FWOHCI_CTX_ASYNC);
547 fwohci_ctx_alloc(sc, &sc->sc_ctx_arrs, OHCI_BUF_ARRS_CNT,
548 OHCI_CTX_ASYNC_RX_RESPONSE, FWOHCI_CTX_ASYNC);
549 fwohci_ctx_alloc(sc, &sc->sc_ctx_atrq, 0, OHCI_CTX_ASYNC_TX_REQUEST,
550 FWOHCI_CTX_ASYNC);
551 fwohci_ctx_alloc(sc, &sc->sc_ctx_atrs, 0, OHCI_CTX_ASYNC_TX_RESPONSE,
552 FWOHCI_CTX_ASYNC);
553 sc->sc_ctx_as = malloc(sizeof(sc->sc_ctx_as[0]) * sc->sc_isoctx,
554 M_DEVBUF, M_WAITOK);
555 if (sc->sc_ctx_as == NULL) {
556 printf("no asynchronous stream\n");
557 } else {
558 for (i = 0; i < sc->sc_isoctx; i++)
559 sc->sc_ctx_as[i] = NULL;
560 }
561 sc->sc_ctx_ir = malloc(sizeof(sc->sc_ctx_ir[0]) * sc->sc_isoctx,
562 M_DEVBUF, M_WAITOK|M_ZERO);
563 sc->sc_ctx_it = malloc(sizeof(sc->sc_ctx_it[0]) * sc->sc_itctx,
564 M_DEVBUF, M_WAITOK|M_ZERO);
565
566 /*
567 * Allocate buffer for configuration ROM and SelfID buffer
568 */
569 fwohci_buf_alloc(sc, &sc->sc_buf_cnfrom);
570 fwohci_buf_alloc(sc, &sc->sc_buf_selfid);
571
572 callout_init(&sc->sc_selfid_callout);
573
574 sc->sc_sc1394.sc1394_ifinreg = fwohci_if_inreg;
575 sc->sc_sc1394.sc1394_ifoutput = fwohci_if_output;
576 sc->sc_sc1394.sc1394_ifsetiso = fwohci_if_setiso;
577
578 sc->sc_sc1394.sc1394_ir_open = fwohci_ir_ctx_set;
579 sc->sc_sc1394.sc1394_ir_close = fwohci_ir_ctx_clear;
580 sc->sc_sc1394.sc1394_ir_read = fwohci_ir_read;
581 sc->sc_sc1394.sc1394_ir_wait = fwohci_ir_wait;
582 sc->sc_sc1394.sc1394_ir_select = fwohci_ir_select;
583
584 #if 0
585 sc->sc_sc1394.sc1394_it_open = fwohci_it_open;
586 sc->sc_sc1394.sc1394_it_write = fwohci_it_write;
587 sc->sc_sc1394.sc1394_it_close = fwohci_it_close;
588 /* XXX: need fwohci_it_flush? */
589 #endif
590
591 /*
592 * establish hooks for shutdown and suspend/resume
593 */
594 sc->sc_shutdownhook = shutdownhook_establish(fwohci_shutdown, sc);
595 sc->sc_powerhook = powerhook_establish(fwohci_power, sc);
596
597 sc->sc_sc1394.sc1394_if = config_found(&sc->sc_sc1394.sc1394_dev, "fw",
598 fwohci_print);
599
600 #if NFWISO > 0
601 fwiso_register_if(&sc->sc_sc1394);
602 #endif
603
604 /* Main loop. It's not coming back normally. */
605
606 fwohci_event_thread(sc);
607
608 kthread_exit(0);
609 }
610
611 static void
612 fwohci_event_thread(struct fwohci_softc *sc)
613 {
614 int i, s;
615 u_int32_t intmask, iso;
616
617 s = splbio();
618
619 /*
620 * Initialize hardware registers.
621 */
622
623 fwohci_hw_init(sc);
624
625 /* Initial Bus Reset */
626 fwohci_phy_busreset(sc);
627 splx(s);
628
629 while (!sc->sc_dying) {
630 s = splbio();
631 intmask = sc->sc_intmask;
632 if (intmask == 0) {
633 tsleep(fwohci_event_thread, PZERO, "fwohciev", 0);
634 splx(s);
635 continue;
636 }
637 sc->sc_intmask = 0;
638 splx(s);
639
640 if (intmask & OHCI_Int_BusReset) {
641 fwohci_buf_stop_tx(sc);
642 if (sc->sc_uidtbl != NULL) {
643 free(sc->sc_uidtbl, M_DEVBUF);
644 sc->sc_uidtbl = NULL;
645 }
646
647 callout_reset(&sc->sc_selfid_callout,
648 OHCI_SELFID_TIMEOUT,
649 (void (*)(void *))fwohci_phy_busreset, sc);
650 sc->sc_nodeid = 0xffff; /* indicate invalid */
651 sc->sc_rootid = 0;
652 sc->sc_irmid = IEEE1394_BCAST_PHY_ID;
653 }
654 if (intmask & OHCI_Int_SelfIDComplete) {
655 s = splbio();
656 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear,
657 OHCI_Int_BusReset);
658 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet,
659 OHCI_Int_BusReset);
660 splx(s);
661 callout_stop(&sc->sc_selfid_callout);
662 if (fwohci_selfid_input(sc) == 0) {
663 fwohci_buf_start_rx(sc);
664 fwohci_uid_collect(sc);
665 }
666 }
667 if (intmask & OHCI_Int_ReqTxComplete)
668 fwohci_at_done(sc, sc->sc_ctx_atrq, 0);
669 if (intmask & OHCI_Int_RespTxComplete)
670 fwohci_at_done(sc, sc->sc_ctx_atrs, 0);
671 if (intmask & OHCI_Int_RQPkt)
672 fwohci_arrq_input(sc, sc->sc_ctx_arrq);
673 if (intmask & OHCI_Int_RSPkt)
674 fwohci_arrs_input(sc, sc->sc_ctx_arrs);
675 if (intmask & OHCI_Int_IsochRx) {
676 if (sc->sc_ctx_as == NULL) {
677 continue;
678 }
679 s = splbio();
680 iso = sc->sc_iso;
681 sc->sc_iso = 0;
682 splx(s);
683 for (i = 0; i < sc->sc_isoctx; i++) {
684 if ((iso & (1 << i)) &&
685 sc->sc_ctx_as[i] != NULL) {
686 fwohci_as_input(sc, sc->sc_ctx_as[i]);
687 sc->sc_ascnt.ev_count++;
688 }
689 }
690 }
691 }
692 }
693
694 #if 0
695 static int
696 fwohci_dnamem_alloc(struct fwohci_softc *sc, int size, int alignment,
697 bus_dmamap_t *mapp, caddr_t *kvap, int flags)
698 {
699 bus_dma_segment_t segs[1];
700 int error, nsegs, steps;
701
702 steps = 0;
703 error = bus_dmamem_alloc(sc->sc_dmat, size, alignment, alignment,
704 segs, 1, &nsegs, flags);
705 if (error)
706 goto cleanup;
707
708 steps = 1;
709 error = bus_dmamem_map(sc->sc_dmat, segs, nsegs, segs[0].ds_len,
710 kvap, flags);
711 if (error)
712 goto cleanup;
713
714 if (error == 0)
715 error = bus_dmamap_create(sc->sc_dmat, size, 1, alignment,
716 size, flags, mapp);
717 if (error)
718 goto cleanup;
719 if (error == 0)
720 error = bus_dmamap_load(sc->sc_dmat, *mapp, *kvap, size, NULL,
721 flags);
722 if (error)
723 goto cleanup;
724
725 cleanup:
726 switch (steps) {
727 case 1:
728 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
729 }
730
731 return error;
732 }
733 #endif
734
735 int
736 fwohci_print(void *aux, const char *pnp)
737 {
738 char *name = aux;
739
740 if (pnp)
741 aprint_normal("%s at %s", name, pnp);
742
743 return UNCONF;
744 }
745
746 static void
747 fwohci_hw_init(struct fwohci_softc *sc)
748 {
749 int i;
750 u_int32_t val;
751
752 /*
753 * Software Reset.
754 */
755 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_SoftReset);
756 for (i = 0; i < OHCI_LOOP; i++) {
757 val = OHCI_CSR_READ(sc, OHCI_REG_HCControlClear);
758 if ((val & OHCI_HCControl_SoftReset) == 0)
759 break;
760 DELAY(10);
761 }
762
763 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LPS);
764
765 /*
766 * First, initilize CSRs with undefined value to default settings.
767 */
768 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions);
769 val |= OHCI_BusOptions_ISC | OHCI_BusOptions_CMC;
770 #if 0
771 val |= OHCI_BusOptions_BMC | OHCI_BusOptions_IRMC;
772 #else
773 val &= ~(OHCI_BusOptions_BMC | OHCI_BusOptions_IRMC);
774 #endif
775 OHCI_CSR_WRITE(sc, OHCI_REG_BusOptions, val);
776 for (i = 0; i < sc->sc_isoctx; i++) {
777 OHCI_SYNC_RX_DMA_WRITE(sc, i, OHCI_SUBREG_ContextControlClear,
778 ~0);
779 }
780 for (i = 0; i < sc->sc_itctx; i++) {
781 OHCI_SYNC_TX_DMA_WRITE(sc, i, OHCI_SUBREG_ContextControlClear,
782 ~0);
783 }
784 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlClear, ~0);
785
786 fwohci_configrom_init(sc);
787 fwohci_selfid_init(sc);
788 fwohci_buf_init_rx(sc);
789 fwohci_csr_init(sc);
790
791 /*
792 * Final CSR settings.
793 */
794 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlSet,
795 OHCI_LinkControl_CycleTimerEnable |
796 OHCI_LinkControl_RcvSelfID | OHCI_LinkControl_RcvPhyPkt);
797
798 OHCI_CSR_WRITE(sc, OHCI_REG_ATRetries, 0x00000888); /*XXX*/
799
800 /* clear receive filter */
801 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiClear, ~0);
802 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoClear, ~0);
803 OHCI_CSR_WRITE(sc, OHCI_REG_AsynchronousRequestFilterHiSet, 0x80000000);
804
805 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear,
806 OHCI_HCControl_NoByteSwapData | OHCI_HCControl_APhyEnhanceEnable);
807 #if BYTE_ORDER == BIG_ENDIAN
808 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet,
809 OHCI_HCControl_NoByteSwapData);
810 #endif
811
812 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear, ~0);
813 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_BusReset |
814 OHCI_Int_SelfIDComplete | OHCI_Int_IsochRx | OHCI_Int_IsochTx |
815 OHCI_Int_RSPkt | OHCI_Int_RQPkt | OHCI_Int_ARRS | OHCI_Int_ARRQ |
816 OHCI_Int_RespTxComplete | OHCI_Int_ReqTxComplete);
817 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_CycleTooLong |
818 OHCI_Int_UnrecoverableError | OHCI_Int_CycleInconsistent |
819 OHCI_Int_LockRespErr | OHCI_Int_PostedWriteErr);
820 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskSet, ~0);
821 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskSet, ~0);
822 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_MasterEnable);
823
824 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LinkEnable);
825
826 /*
827 * Start the receivers
828 */
829 fwohci_buf_start_rx(sc);
830 }
831
832 static void
833 fwohci_power(int why, void *arg)
834 {
835 struct fwohci_softc *sc = arg;
836 int s;
837
838 s = splbio();
839 switch (why) {
840 case PWR_SUSPEND:
841 case PWR_STANDBY:
842 fwohci_shutdown(sc);
843 break;
844 case PWR_RESUME:
845 fwohci_hw_init(sc);
846 fwohci_phy_busreset(sc);
847 break;
848 case PWR_SOFTSUSPEND:
849 case PWR_SOFTSTANDBY:
850 case PWR_SOFTRESUME:
851 break;
852 }
853 splx(s);
854 }
855
856 static void
857 fwohci_shutdown(void *arg)
858 {
859 struct fwohci_softc *sc = arg;
860 u_int32_t val;
861
862 callout_stop(&sc->sc_selfid_callout);
863 /* disable all interrupt */
864 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear, OHCI_Int_MasterEnable);
865 fwohci_buf_stop_tx(sc);
866 fwohci_buf_stop_rx(sc);
867 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions);
868 val &= ~(OHCI_BusOptions_BMC | OHCI_BusOptions_ISC |
869 OHCI_BusOptions_CMC | OHCI_BusOptions_IRMC);
870 OHCI_CSR_WRITE(sc, OHCI_REG_BusOptions, val);
871 fwohci_phy_busreset(sc);
872 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear, OHCI_HCControl_LinkEnable);
873 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear, OHCI_HCControl_LPS);
874 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_SoftReset);
875 }
876
877 /*
878 * COMMON FUNCTIONS
879 */
880
881 /*
882 * read the PHY Register.
883 */
884 static u_int8_t
885 fwohci_phy_read(struct fwohci_softc *sc, u_int8_t reg)
886 {
887 int i;
888 u_int32_t val;
889
890 OHCI_CSR_WRITE(sc, OHCI_REG_PhyControl,
891 OHCI_PhyControl_RdReg | (reg << OHCI_PhyControl_RegAddr_BITPOS));
892 for (i = 0; i < OHCI_LOOP; i++) {
893 if (OHCI_CSR_READ(sc, OHCI_REG_PhyControl) &
894 OHCI_PhyControl_RdDone)
895 break;
896 DELAY(10);
897 }
898 val = OHCI_CSR_READ(sc, OHCI_REG_PhyControl);
899 return (val & OHCI_PhyControl_RdData) >> OHCI_PhyControl_RdData_BITPOS;
900 }
901
902 /*
903 * write the PHY Register.
904 */
905 static void
906 fwohci_phy_write(struct fwohci_softc *sc, u_int8_t reg, u_int8_t val)
907 {
908 int i;
909
910 OHCI_CSR_WRITE(sc, OHCI_REG_PhyControl, OHCI_PhyControl_WrReg |
911 (reg << OHCI_PhyControl_RegAddr_BITPOS) |
912 (val << OHCI_PhyControl_WrData_BITPOS));
913 for (i = 0; i < OHCI_LOOP; i++) {
914 if (!(OHCI_CSR_READ(sc, OHCI_REG_PhyControl) &
915 OHCI_PhyControl_WrReg))
916 break;
917 DELAY(10);
918 }
919 }
920
921 /*
922 * Initiate Bus Reset
923 */
924 static void
925 fwohci_phy_busreset(struct fwohci_softc *sc)
926 {
927 int s;
928 u_int8_t val;
929
930 s = splbio();
931 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear,
932 OHCI_Int_BusReset | OHCI_Int_SelfIDComplete);
933 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_BusReset);
934 callout_stop(&sc->sc_selfid_callout);
935 val = fwohci_phy_read(sc, 1);
936 val = (val & 0x80) | /* preserve RHB (force root) */
937 0x40 | /* Initiate Bus Reset */
938 0x3f; /* default GAP count */
939 fwohci_phy_write(sc, 1, val);
940 splx(s);
941 }
942
943 /*
944 * PHY Packet
945 */
946 static void
947 fwohci_phy_input(struct fwohci_softc *sc, struct fwohci_pkt *pkt)
948 {
949 u_int32_t val;
950
951 val = pkt->fp_hdr[1];
952 if (val != ~pkt->fp_hdr[2]) {
953 if (val == 0 && ((*pkt->fp_trail & 0x001f0000) >> 16) ==
954 OHCI_CTXCTL_EVENT_BUS_RESET) {
955 DPRINTFN(1, ("fwohci_phy_input: BusReset: 0x%08x\n",
956 pkt->fp_hdr[2]));
957 } else {
958 printf("%s: phy packet corrupted (0x%08x, 0x%08x)\n",
959 sc->sc_sc1394.sc1394_dev.dv_xname, val,
960 pkt->fp_hdr[2]);
961 }
962 return;
963 }
964 #ifdef FW_DEBUG
965 if (fwdebug > 1)
966 fwohci_show_phypkt(sc, val);
967 #endif
968 }
969
970 /*
971 * Descriptor for context DMA.
972 */
973 static int
974 fwohci_desc_alloc(struct fwohci_softc *sc)
975 {
976 int error, mapsize, dsize;
977
978 /*
979 * allocate descriptor buffer
980 */
981
982 sc->sc_descsize = OHCI_BUF_ARRQ_CNT + OHCI_BUF_ARRS_CNT +
983 OHCI_BUF_ATRQ_CNT + OHCI_BUF_ATRS_CNT +
984 OHCI_BUF_IR_CNT * sc->sc_isoctx + 2;
985 dsize = sizeof(struct fwohci_desc) * sc->sc_descsize;
986 mapsize = howmany(sc->sc_descsize, NBBY);
987 sc->sc_descmap = malloc(mapsize, M_DEVBUF, M_WAITOK|M_ZERO);
988
989 if (sc->sc_descmap == NULL) {
990 printf("fwohci_desc_alloc: cannot get memory\n");
991 return -1;
992 }
993
994 if ((error = bus_dmamem_alloc(sc->sc_dmat, dsize, PAGE_SIZE, 0,
995 &sc->sc_dseg, 1, &sc->sc_dnseg, 0)) != 0) {
996 printf("%s: unable to allocate descriptor buffer, error = %d\n",
997 sc->sc_sc1394.sc1394_dev.dv_xname, error);
998 goto fail_0;
999 }
1000
1001 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg,
1002 dsize, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT | BUS_DMA_WAITOK))
1003 != 0) {
1004 printf("%s: unable to map descriptor buffer, error = %d\n",
1005 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1006 goto fail_1;
1007 }
1008
1009 if ((error = bus_dmamap_create(sc->sc_dmat, dsize, sc->sc_dnseg,
1010 dsize, 0, BUS_DMA_WAITOK, &sc->sc_ddmamap)) != 0) {
1011 printf("%s: unable to create descriptor buffer DMA map, "
1012 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname, error);
1013 goto fail_2;
1014 }
1015
1016 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc,
1017 dsize, NULL, BUS_DMA_WAITOK)) != 0) {
1018 printf("%s: unable to load descriptor buffer DMA map, "
1019 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname, error);
1020 goto fail_3;
1021 }
1022
1023 return 0;
1024
1025 fail_3:
1026 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap);
1027 fail_2:
1028 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, dsize);
1029 fail_1:
1030 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg);
1031 fail_0:
1032 return error;
1033 }
1034
1035 static struct fwohci_desc *
1036 fwohci_desc_get(struct fwohci_softc *sc, int ndesc)
1037 {
1038 int i, n;
1039
1040 for (n = 0; n <= sc->sc_descsize - ndesc; n++) {
1041 for (i = 0; ; i++) {
1042 if (i == ndesc) {
1043 for (i = 0; i < ndesc; i++)
1044 setbit(sc->sc_descmap, n + i);
1045 return sc->sc_desc + n;
1046 }
1047 if (isset(sc->sc_descmap, n + i))
1048 break;
1049 }
1050 }
1051 return NULL;
1052 }
1053
1054 static void
1055 fwohci_desc_put(struct fwohci_softc *sc, struct fwohci_desc *fd, int ndesc)
1056 {
1057 int i, n;
1058
1059 n = fd - sc->sc_desc;
1060 for (i = 0; i < ndesc; i++, n++) {
1061 #ifdef DIAGNOSTIC
1062 if (isclr(sc->sc_descmap, n))
1063 panic("fwohci_desc_put: duplicated free");
1064 #endif
1065 clrbit(sc->sc_descmap, n);
1066 }
1067 }
1068
1069 /*
1070 * Asyncronous/Isochronous Transmit/Receive Context
1071 */
1072 static int
1073 fwohci_ctx_alloc(struct fwohci_softc *sc, struct fwohci_ctx **fcp,
1074 int bufcnt, int ctx, int ctxtype)
1075 {
1076 int i, error;
1077 struct fwohci_ctx *fc;
1078 struct fwohci_buf *fb;
1079 struct fwohci_desc *fd;
1080 #if DOUBLEBUF
1081 int buf2cnt;
1082 #endif
1083
1084 fc = malloc(sizeof(*fc), M_DEVBUF, M_WAITOK|M_ZERO);
1085 LIST_INIT(&fc->fc_handler);
1086 TAILQ_INIT(&fc->fc_buf);
1087 fc->fc_ctx = ctx;
1088 fc->fc_buffers = fb = malloc(sizeof(*fb) * bufcnt, M_DEVBUF, M_WAITOK|M_ZERO);
1089 fc->fc_bufcnt = bufcnt;
1090 #if DOUBLEBUF
1091 TAILQ_INIT(&fc->fc_buf2); /* for isochronous */
1092 if (ctxtype == FWOHCI_CTX_ISO_MULTI) {
1093 buf2cnt = bufcnt/2;
1094 bufcnt -= buf2cnt;
1095 if (buf2cnt == 0) {
1096 panic("cannot allocate iso buffer");
1097 }
1098 }
1099 #endif
1100 for (i = 0; i < bufcnt; i++, fb++) {
1101 if ((error = fwohci_buf_alloc(sc, fb)) != 0)
1102 goto fail;
1103 if ((fd = fwohci_desc_get(sc, 1)) == NULL) {
1104 error = ENOBUFS;
1105 goto fail;
1106 }
1107 fb->fb_desc = fd;
1108 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr +
1109 ((caddr_t)fd - (caddr_t)sc->sc_desc);
1110 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_STATUS |
1111 OHCI_DESC_INTR_ALWAYS | OHCI_DESC_BRANCH;
1112 fd->fd_reqcount = fb->fb_dmamap->dm_segs[0].ds_len;
1113 fd->fd_data = fb->fb_dmamap->dm_segs[0].ds_addr;
1114 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list);
1115 }
1116 #if DOUBLEBUF
1117 if (ctxtype == FWOHCI_CTX_ISO_MULTI) {
1118 for (i = bufcnt; i < bufcnt + buf2cnt; i++, fb++) {
1119 if ((error = fwohci_buf_alloc(sc, fb)) != 0)
1120 goto fail;
1121 if ((fd = fwohci_desc_get(sc, 1)) == NULL) {
1122 error = ENOBUFS;
1123 goto fail;
1124 }
1125 fb->fb_desc = fd;
1126 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr +
1127 ((caddr_t)fd - (caddr_t)sc->sc_desc);
1128 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1129 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1130 BUS_DMASYNC_PREWRITE);
1131 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_STATUS |
1132 OHCI_DESC_INTR_ALWAYS | OHCI_DESC_BRANCH;
1133 fd->fd_reqcount = fb->fb_dmamap->dm_segs[0].ds_len;
1134 fd->fd_data = fb->fb_dmamap->dm_segs[0].ds_addr;
1135 TAILQ_INSERT_TAIL(&fc->fc_buf2, fb, fb_list);
1136 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1137 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1138 BUS_DMASYNC_POSTWRITE);
1139 }
1140 }
1141 #endif /* DOUBLEBUF */
1142 fc->fc_type = ctxtype;
1143 *fcp = fc;
1144 return 0;
1145
1146 fail:
1147 while (i-- > 0) {
1148 fb--;
1149 if (fb->fb_desc)
1150 fwohci_desc_put(sc, fb->fb_desc, 1);
1151 fwohci_buf_free(sc, fb);
1152 }
1153 free(fc, M_DEVBUF);
1154 return error;
1155 }
1156
1157 static void
1158 fwohci_ctx_free(struct fwohci_softc *sc, struct fwohci_ctx *fc)
1159 {
1160 struct fwohci_buf *fb;
1161 struct fwohci_handler *fh;
1162
1163 #if DOUBLEBUF
1164 if ((fc->fc_type == FWOHCI_CTX_ISO_MULTI) &&
1165 (TAILQ_FIRST(&fc->fc_buf) > TAILQ_FIRST(&fc->fc_buf2))) {
1166 struct fwohci_buf_s fctmp;
1167
1168 fctmp = fc->fc_buf;
1169 fc->fc_buf = fc->fc_buf2;
1170 fc->fc_buf2 = fctmp;
1171 }
1172 #endif
1173 while ((fh = LIST_FIRST(&fc->fc_handler)) != NULL)
1174 fwohci_handler_set(sc, fh->fh_tcode, fh->fh_key1, fh->fh_key2,
1175 fh->fh_key3, NULL, NULL);
1176 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) {
1177 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list);
1178 if (fb->fb_desc)
1179 fwohci_desc_put(sc, fb->fb_desc, 1);
1180 fwohci_buf_free(sc, fb);
1181 }
1182 #if DOUBLEBUF
1183 while ((fb = TAILQ_FIRST(&fc->fc_buf2)) != NULL) {
1184 TAILQ_REMOVE(&fc->fc_buf2, fb, fb_list);
1185 if (fb->fb_desc)
1186 fwohci_desc_put(sc, fb->fb_desc, 1);
1187 fwohci_buf_free(sc, fb);
1188 }
1189 #endif /* DOUBLEBUF */
1190 free(fc->fc_buffers, M_DEVBUF);
1191 free(fc, M_DEVBUF);
1192 }
1193
1194 static void
1195 fwohci_ctx_init(struct fwohci_softc *sc, struct fwohci_ctx *fc)
1196 {
1197 struct fwohci_buf *fb, *nfb;
1198 struct fwohci_desc *fd;
1199 struct fwohci_handler *fh;
1200 int n;
1201
1202 for (fb = TAILQ_FIRST(&fc->fc_buf); fb != NULL; fb = nfb) {
1203 nfb = TAILQ_NEXT(fb, fb_list);
1204 fb->fb_off = 0;
1205 fd = fb->fb_desc;
1206 fd->fd_branch = (nfb != NULL) ? (nfb->fb_daddr | 1) : 0;
1207 fd->fd_rescount = fd->fd_reqcount;
1208 }
1209
1210 #if DOUBLEBUF
1211 for (fb = TAILQ_FIRST(&fc->fc_buf2); fb != NULL; fb = nfb) {
1212 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1213 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1214 BUS_DMASYNC_PREWRITE);
1215 nfb = TAILQ_NEXT(fb, fb_list);
1216 fb->fb_off = 0;
1217 fd = fb->fb_desc;
1218 fd->fd_branch = (nfb != NULL) ? (nfb->fb_daddr | 1) : 0;
1219 fd->fd_rescount = fd->fd_reqcount;
1220 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1221 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1222 BUS_DMASYNC_POSTWRITE);
1223 }
1224 #endif /* DOUBLEBUF */
1225
1226 n = fc->fc_ctx;
1227 fb = TAILQ_FIRST(&fc->fc_buf);
1228 if (fc->fc_type != FWOHCI_CTX_ASYNC) {
1229 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_CommandPtr,
1230 fb->fb_daddr | 1);
1231 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlClear,
1232 OHCI_CTXCTL_RX_BUFFER_FILL |
1233 OHCI_CTXCTL_RX_CYCLE_MATCH_ENABLE |
1234 OHCI_CTXCTL_RX_MULTI_CHAN_MODE |
1235 OHCI_CTXCTL_RX_DUAL_BUFFER_MODE);
1236 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlSet,
1237 OHCI_CTXCTL_RX_ISOCH_HEADER);
1238 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) {
1239 OHCI_SYNC_RX_DMA_WRITE(sc, n,
1240 OHCI_SUBREG_ContextControlSet,
1241 OHCI_CTXCTL_RX_BUFFER_FILL);
1242 }
1243 fh = LIST_FIRST(&fc->fc_handler);
1244
1245 if (fh->fh_key1 == IEEE1394_ISO_CHANNEL_ANY) {
1246 OHCI_SYNC_RX_DMA_WRITE(sc, n,
1247 OHCI_SUBREG_ContextControlSet,
1248 OHCI_CTXCTL_RX_MULTI_CHAN_MODE);
1249
1250 /* Receive all the isochronous channels */
1251 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiSet,
1252 0xffffffff);
1253 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoSet,
1254 0xffffffff);
1255 DPRINTF(("%s: CTXCTL 0x%08x\n",
1256 sc->sc_sc1394.sc1394_dev.dv_xname,
1257 OHCI_SYNC_RX_DMA_READ(sc, n,
1258 OHCI_SUBREG_ContextControlSet)));
1259 }
1260 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextMatch,
1261 (fh->fh_key2 << OHCI_CTXMATCH_TAG_BITPOS) |
1262 (fh->fh_key1 & IEEE1394_ISO_CHANNEL_MASK));
1263 } else {
1264 OHCI_ASYNC_DMA_WRITE(sc, n, OHCI_SUBREG_CommandPtr,
1265 fb->fb_daddr | 1);
1266 }
1267 }
1268
1269 /*
1270 * DMA data buffer
1271 */
1272 static int
1273 fwohci_buf_alloc(struct fwohci_softc *sc, struct fwohci_buf *fb)
1274 {
1275 int error;
1276
1277 if ((error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1278 PAGE_SIZE, &fb->fb_seg, 1, &fb->fb_nseg, BUS_DMA_WAITOK)) != 0) {
1279 printf("%s: unable to allocate buffer, error = %d\n",
1280 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1281 goto fail_0;
1282 }
1283
1284 if ((error = bus_dmamem_map(sc->sc_dmat, &fb->fb_seg,
1285 fb->fb_nseg, PAGE_SIZE, &fb->fb_buf, BUS_DMA_WAITOK)) != 0) {
1286 printf("%s: unable to map buffer, error = %d\n",
1287 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1288 goto fail_1;
1289 }
1290
1291 if ((error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, fb->fb_nseg,
1292 PAGE_SIZE, 0, BUS_DMA_WAITOK, &fb->fb_dmamap)) != 0) {
1293 printf("%s: unable to create buffer DMA map, "
1294 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname,
1295 error);
1296 goto fail_2;
1297 }
1298
1299 if ((error = bus_dmamap_load(sc->sc_dmat, fb->fb_dmamap,
1300 fb->fb_buf, PAGE_SIZE, NULL, BUS_DMA_WAITOK)) != 0) {
1301 printf("%s: unable to load buffer DMA map, "
1302 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname,
1303 error);
1304 goto fail_3;
1305 }
1306
1307 return 0;
1308
1309 bus_dmamap_unload(sc->sc_dmat, fb->fb_dmamap);
1310 fail_3:
1311 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
1312 fail_2:
1313 bus_dmamem_unmap(sc->sc_dmat, fb->fb_buf, PAGE_SIZE);
1314 fail_1:
1315 bus_dmamem_free(sc->sc_dmat, &fb->fb_seg, fb->fb_nseg);
1316 fail_0:
1317 return error;
1318 }
1319
1320 static void
1321 fwohci_buf_free(struct fwohci_softc *sc, struct fwohci_buf *fb)
1322 {
1323
1324 bus_dmamap_unload(sc->sc_dmat, fb->fb_dmamap);
1325 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
1326 bus_dmamem_unmap(sc->sc_dmat, fb->fb_buf, PAGE_SIZE);
1327 bus_dmamem_free(sc->sc_dmat, &fb->fb_seg, fb->fb_nseg);
1328 }
1329
1330 static void
1331 fwohci_buf_init_rx(struct fwohci_softc *sc)
1332 {
1333 int i;
1334
1335 /*
1336 * Initialize for Asynchronous Receive Queue.
1337 */
1338 fwohci_ctx_init(sc, sc->sc_ctx_arrq);
1339 fwohci_ctx_init(sc, sc->sc_ctx_arrs);
1340
1341 /*
1342 * Initialize for Isochronous Receive Queue.
1343 */
1344 if (sc->sc_ctx_as != NULL) {
1345 for (i = 0; i < sc->sc_isoctx; i++) {
1346 if (sc->sc_ctx_as[i] != NULL)
1347 fwohci_ctx_init(sc, sc->sc_ctx_as[i]);
1348 }
1349 }
1350 }
1351
1352 static void
1353 fwohci_buf_start_rx(struct fwohci_softc *sc)
1354 {
1355 int i;
1356
1357 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_REQUEST,
1358 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
1359 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
1360 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
1361 if (sc->sc_ctx_as != NULL) {
1362 for (i = 0; i < sc->sc_isoctx; i++) {
1363 if (sc->sc_ctx_as[i] != NULL)
1364 OHCI_SYNC_RX_DMA_WRITE(sc, i,
1365 OHCI_SUBREG_ContextControlSet,
1366 OHCI_CTXCTL_RUN);
1367 }
1368 }
1369 }
1370
1371 static void
1372 fwohci_buf_stop_tx(struct fwohci_softc *sc)
1373 {
1374 int i;
1375
1376 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_TX_REQUEST,
1377 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1378 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_TX_RESPONSE,
1379 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1380
1381 /*
1382 * Make sure the transmitter is stopped.
1383 */
1384 for (i = 0; i < OHCI_LOOP; i++) {
1385 DELAY(10);
1386 if (OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_REQUEST,
1387 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE)
1388 continue;
1389 if (OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_RESPONSE,
1390 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE)
1391 continue;
1392 break;
1393 }
1394
1395 /*
1396 * Initialize for Asynchronous Transmit Queue.
1397 */
1398 fwohci_at_done(sc, sc->sc_ctx_atrq, 1);
1399 fwohci_at_done(sc, sc->sc_ctx_atrs, 1);
1400 }
1401
1402 static void
1403 fwohci_buf_stop_rx(struct fwohci_softc *sc)
1404 {
1405 int i;
1406
1407 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_REQUEST,
1408 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1409 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
1410 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1411 for (i = 0; i < sc->sc_isoctx; i++) {
1412 OHCI_SYNC_RX_DMA_WRITE(sc, i,
1413 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1414 }
1415 }
1416
1417 static void
1418 fwohci_buf_next(struct fwohci_softc *sc, struct fwohci_ctx *fc)
1419 {
1420 struct fwohci_buf *fb, *tfb;
1421
1422 #if DOUBLEBUF
1423 if (fc->fc_type != FWOHCI_CTX_ISO_MULTI) {
1424 #endif
1425 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) {
1426 if (fc->fc_type) {
1427 if (fb->fb_off == 0)
1428 break;
1429 } else {
1430 if (fb->fb_off != fb->fb_desc->fd_reqcount ||
1431 fb->fb_desc->fd_rescount != 0)
1432 break;
1433 }
1434 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list);
1435 fb->fb_desc->fd_rescount = fb->fb_desc->fd_reqcount;
1436 fb->fb_off = 0;
1437 fb->fb_desc->fd_branch = 0;
1438 tfb = TAILQ_LAST(&fc->fc_buf, fwohci_buf_s);
1439 tfb->fb_desc->fd_branch = fb->fb_daddr | 1;
1440 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list);
1441 }
1442 #if DOUBLEBUF
1443 } else {
1444 struct fwohci_buf_s fctmp;
1445
1446 /* cleaning buffer */
1447 for (fb = TAILQ_FIRST(&fc->fc_buf); fb != NULL;
1448 fb = TAILQ_NEXT(fb, fb_list)) {
1449 fb->fb_off = 0;
1450 fb->fb_desc->fd_rescount = fb->fb_desc->fd_reqcount;
1451 }
1452
1453 /* rotating buffer */
1454 fctmp = fc->fc_buf;
1455 fc->fc_buf = fc->fc_buf2;
1456 fc->fc_buf2 = fctmp;
1457 }
1458 #endif
1459 }
1460
1461 static int
1462 fwohci_buf_pktget(struct fwohci_softc *sc, struct fwohci_buf **fbp, caddr_t *pp,
1463 int len)
1464 {
1465 struct fwohci_buf *fb;
1466 struct fwohci_desc *fd;
1467 int bufend;
1468
1469 fb = *fbp;
1470 again:
1471 fd = fb->fb_desc;
1472 DPRINTFN(1, ("fwohci_buf_pktget: desc %ld, off %d, req %d, res %d,"
1473 " len %d, avail %d\n", (long)(fd - sc->sc_desc), fb->fb_off,
1474 fd->fd_reqcount, fd->fd_rescount, len,
1475 fd->fd_reqcount - fd->fd_rescount - fb->fb_off));
1476 bufend = fd->fd_reqcount - fd->fd_rescount;
1477 if (fb->fb_off >= bufend) {
1478 DPRINTFN(5, ("buf %x finish req %d res %d off %d ",
1479 fb->fb_desc->fd_data, fd->fd_reqcount, fd->fd_rescount,
1480 fb->fb_off));
1481 if (fd->fd_rescount == 0) {
1482 *fbp = fb = TAILQ_NEXT(fb, fb_list);
1483 if (fb != NULL)
1484 goto again;
1485 }
1486 return 0;
1487 }
1488 if (fb->fb_off + len > bufend)
1489 len = bufend - fb->fb_off;
1490 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, fb->fb_off, len,
1491 BUS_DMASYNC_POSTREAD);
1492 *pp = fb->fb_buf + fb->fb_off;
1493 fb->fb_off += roundup(len, 4);
1494 return len;
1495 }
1496
1497 static int
1498 fwohci_buf_input(struct fwohci_softc *sc, struct fwohci_ctx *fc,
1499 struct fwohci_pkt *pkt)
1500 {
1501 caddr_t p;
1502 struct fwohci_buf *fb;
1503 int len, count, i;
1504 #ifdef FW_DEBUG
1505 int tlabel;
1506 #endif
1507
1508 memset(pkt, 0, sizeof(*pkt));
1509 pkt->fp_uio.uio_iov = pkt->fp_iov;
1510 pkt->fp_uio.uio_rw = UIO_WRITE;
1511 pkt->fp_uio.uio_segflg = UIO_SYSSPACE;
1512
1513 /* get first quadlet */
1514 fb = TAILQ_FIRST(&fc->fc_buf);
1515 count = 4;
1516 len = fwohci_buf_pktget(sc, &fb, &p, count);
1517 if (len <= 0) {
1518 DPRINTFN(1, ("fwohci_buf_input: no input for %d\n",
1519 fc->fc_ctx));
1520 return 0;
1521 }
1522 pkt->fp_hdr[0] = *(u_int32_t *)p;
1523 pkt->fp_tcode = (pkt->fp_hdr[0] & 0x000000f0) >> 4;
1524 switch (pkt->fp_tcode) {
1525 case IEEE1394_TCODE_WRITE_REQ_QUAD:
1526 case IEEE1394_TCODE_READ_RESP_QUAD:
1527 pkt->fp_hlen = 12;
1528 pkt->fp_dlen = 4;
1529 break;
1530 case IEEE1394_TCODE_READ_REQ_BLOCK:
1531 pkt->fp_hlen = 16;
1532 pkt->fp_dlen = 0;
1533 break;
1534 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
1535 case IEEE1394_TCODE_READ_RESP_BLOCK:
1536 case IEEE1394_TCODE_LOCK_REQ:
1537 case IEEE1394_TCODE_LOCK_RESP:
1538 pkt->fp_hlen = 16;
1539 break;
1540 case IEEE1394_TCODE_STREAM_DATA:
1541 #ifdef DIAGNOSTIC
1542 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI)
1543 #endif
1544 {
1545 pkt->fp_hlen = 4;
1546 pkt->fp_dlen = pkt->fp_hdr[0] >> 16;
1547 DPRINTFN(5, ("[%d]", pkt->fp_dlen));
1548 break;
1549 }
1550 #ifdef DIAGNOSTIC
1551 else {
1552 printf("fwohci_buf_input: bad tcode: STREAM_DATA\n");
1553 return 0;
1554 }
1555 #endif
1556 default:
1557 pkt->fp_hlen = 12;
1558 pkt->fp_dlen = 0;
1559 break;
1560 }
1561
1562 /* get header */
1563 while (count < pkt->fp_hlen) {
1564 len = fwohci_buf_pktget(sc, &fb, &p, pkt->fp_hlen - count);
1565 if (len == 0) {
1566 printf("fwohci_buf_input: malformed input 1: %d\n",
1567 pkt->fp_hlen - count);
1568 return 0;
1569 }
1570 memcpy((caddr_t)pkt->fp_hdr + count, p, len);
1571 count += len;
1572 }
1573 if (pkt->fp_hlen == 16 &&
1574 pkt->fp_tcode != IEEE1394_TCODE_READ_REQ_BLOCK)
1575 pkt->fp_dlen = pkt->fp_hdr[3] >> 16;
1576 #ifdef FW_DEBUG
1577 tlabel = (pkt->fp_hdr[0] & 0x0000fc00) >> 10;
1578 #endif
1579 DPRINTFN(1, ("fwohci_buf_input: tcode=0x%x, tlabel=0x%x, hlen=%d, "
1580 "dlen=%d\n", pkt->fp_tcode, tlabel, pkt->fp_hlen, pkt->fp_dlen));
1581
1582 /* get data */
1583 count = 0;
1584 i = 0;
1585 while (count < pkt->fp_dlen) {
1586 len = fwohci_buf_pktget(sc, &fb,
1587 (caddr_t *)&pkt->fp_iov[i].iov_base,
1588 pkt->fp_dlen - count);
1589 if (len == 0) {
1590 printf("fwohci_buf_input: malformed input 2: %d\n",
1591 pkt->fp_dlen - count);
1592 return 0;
1593 }
1594 pkt->fp_iov[i++].iov_len = len;
1595 count += len;
1596 }
1597 pkt->fp_uio.uio_iovcnt = i;
1598 pkt->fp_uio.uio_resid = count;
1599
1600 /* get trailer */
1601 len = fwohci_buf_pktget(sc, &fb, (caddr_t *)&pkt->fp_trail,
1602 sizeof(*pkt->fp_trail));
1603 if (len <= 0) {
1604 printf("fwohci_buf_input: malformed input 3: %d\n",
1605 pkt->fp_hlen - count);
1606 return 0;
1607 }
1608 return 1;
1609 }
1610
1611 static int
1612 fwohci_buf_input_ppb(struct fwohci_softc *sc, struct fwohci_ctx *fc,
1613 struct fwohci_pkt *pkt)
1614 {
1615 caddr_t p;
1616 int len;
1617 struct fwohci_buf *fb;
1618 struct fwohci_desc *fd;
1619
1620 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) {
1621 return fwohci_buf_input(sc, fc, pkt);
1622 }
1623
1624 memset(pkt, 0, sizeof(*pkt));
1625 pkt->fp_uio.uio_iov = pkt->fp_iov;
1626 pkt->fp_uio.uio_rw = UIO_WRITE;
1627 pkt->fp_uio.uio_segflg = UIO_SYSSPACE;
1628
1629 for (fb = TAILQ_FIRST(&fc->fc_buf); ; fb = TAILQ_NEXT(fb, fb_list)) {
1630 if (fb == NULL)
1631 return 0;
1632 if (fb->fb_off == 0)
1633 break;
1634 }
1635 fd = fb->fb_desc;
1636 len = fd->fd_reqcount - fd->fd_rescount;
1637 if (len == 0)
1638 return 0;
1639 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, fb->fb_off, len,
1640 BUS_DMASYNC_POSTREAD);
1641
1642 p = fb->fb_buf;
1643 fb->fb_off += roundup(len, 4);
1644 if (len < 8) {
1645 printf("fwohci_buf_input_ppb: malformed input 1: %d\n", len);
1646 return 0;
1647 }
1648
1649 /*
1650 * get trailer first, may be bogus data unless status update
1651 * in descriptor is set.
1652 */
1653 pkt->fp_trail = (u_int32_t *)p;
1654 *pkt->fp_trail = (*pkt->fp_trail & 0xffff) | (fd->fd_status << 16);
1655 pkt->fp_hdr[0] = ((u_int32_t *)p)[1];
1656 pkt->fp_tcode = (pkt->fp_hdr[0] & 0x000000f0) >> 4;
1657 #ifdef DIAGNOSTIC
1658 if (pkt->fp_tcode != IEEE1394_TCODE_STREAM_DATA) {
1659 printf("fwohci_buf_input_ppb: bad tcode: 0x%x\n",
1660 pkt->fp_tcode);
1661 return 0;
1662 }
1663 #endif
1664 pkt->fp_hlen = 4;
1665 pkt->fp_dlen = pkt->fp_hdr[0] >> 16;
1666 p += 8;
1667 len -= 8;
1668 if (pkt->fp_dlen != len) {
1669 printf("fwohci_buf_input_ppb: malformed input 2: %d != %d\n",
1670 pkt->fp_dlen, len);
1671 return 0;
1672 }
1673 DPRINTFN(1, ("fwohci_buf_input_ppb: tcode=0x%x, hlen=%d, dlen=%d\n",
1674 pkt->fp_tcode, pkt->fp_hlen, pkt->fp_dlen));
1675 pkt->fp_iov[0].iov_base = p;
1676 pkt->fp_iov[0].iov_len = len;
1677 pkt->fp_uio.uio_iovcnt = 0;
1678 pkt->fp_uio.uio_resid = len;
1679 return 1;
1680 }
1681
1682 static int
1683 fwohci_handler_set(struct fwohci_softc *sc,
1684 int tcode, u_int32_t key1, u_int32_t key2, u_int32_t key3,
1685 int (*handler)(struct fwohci_softc *, void *, struct fwohci_pkt *),
1686 void *arg)
1687 {
1688 struct fwohci_ctx *fc;
1689 struct fwohci_handler *fh;
1690 u_int64_t addr, naddr;
1691 u_int32_t off;
1692 int i, j;
1693
1694 if (tcode == IEEE1394_TCODE_STREAM_DATA &&
1695 (((key1 & OHCI_ASYNC_STREAM) && sc->sc_ctx_as != NULL)
1696 || (key1 & OHCI_ASYNC_STREAM) == 0)) {
1697 int isasync = key1 & OHCI_ASYNC_STREAM;
1698
1699 key1 = key1 & IEEE1394_ISO_CHANNEL_ANY ?
1700 IEEE1394_ISO_CHANNEL_ANY : (key1 & IEEE1394_ISOCH_MASK);
1701 if (key1 & IEEE1394_ISO_CHANNEL_ANY) {
1702 printf("%s: key changed to %x\n",
1703 sc->sc_sc1394.sc1394_dev.dv_xname, key1);
1704 }
1705 j = sc->sc_isoctx;
1706 fh = NULL;
1707
1708 for (i = 0; i < sc->sc_isoctx; i++) {
1709 if ((fc = sc->sc_ctx_as[i]) == NULL) {
1710 if (j == sc->sc_isoctx)
1711 j = i;
1712 continue;
1713 }
1714 fh = LIST_FIRST(&fc->fc_handler);
1715 if (fh->fh_tcode == tcode &&
1716 fh->fh_key1 == key1 && fh->fh_key2 == key2)
1717 break;
1718 fh = NULL;
1719 }
1720 if (fh == NULL) {
1721 if (handler == NULL)
1722 return 0;
1723 if (j == sc->sc_isoctx) {
1724 DPRINTF(("fwohci_handler_set: no more free "
1725 "context\n"));
1726 return ENOMEM;
1727 }
1728 if ((fc = sc->sc_ctx_as[j]) == NULL) {
1729 fwohci_ctx_alloc(sc, &fc, OHCI_BUF_IR_CNT, j,
1730 isasync ? FWOHCI_CTX_ISO_SINGLE :
1731 FWOHCI_CTX_ISO_MULTI);
1732 sc->sc_ctx_as[j] = fc;
1733 }
1734 }
1735 #ifdef FW_DEBUG
1736 if (fh == NULL && handler != NULL) {
1737 printf("use ir context %d\n", j);
1738 } else if (fh != NULL && handler == NULL) {
1739 printf("remove ir context %d\n", i);
1740 }
1741 #endif
1742 } else {
1743 switch (tcode) {
1744 case IEEE1394_TCODE_WRITE_REQ_QUAD:
1745 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
1746 case IEEE1394_TCODE_READ_REQ_QUAD:
1747 case IEEE1394_TCODE_READ_REQ_BLOCK:
1748 case IEEE1394_TCODE_LOCK_REQ:
1749 fc = sc->sc_ctx_arrq;
1750 break;
1751 case IEEE1394_TCODE_WRITE_RESP:
1752 case IEEE1394_TCODE_READ_RESP_QUAD:
1753 case IEEE1394_TCODE_READ_RESP_BLOCK:
1754 case IEEE1394_TCODE_LOCK_RESP:
1755 fc = sc->sc_ctx_arrs;
1756 break;
1757 default:
1758 return EIO;
1759 }
1760 naddr = ((u_int64_t)key1 << 32) + key2;
1761
1762 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
1763 fh = LIST_NEXT(fh, fh_list)) {
1764 if (fh->fh_tcode == tcode) {
1765 if (fh->fh_key1 == key1 &&
1766 fh->fh_key2 == key2 && fh->fh_key3 == key3)
1767 break;
1768 /* Make sure it's not within a current range. */
1769 addr = ((u_int64_t)fh->fh_key1 << 32) +
1770 fh->fh_key2;
1771 off = fh->fh_key3;
1772 if (key3 &&
1773 (((naddr >= addr) &&
1774 (naddr < (addr + off))) ||
1775 (((naddr + key3) > addr) &&
1776 ((naddr + key3) <= (addr + off))) ||
1777 ((addr > naddr) &&
1778 (addr < (naddr + key3)))))
1779 if (handler)
1780 return EEXIST;
1781 }
1782 }
1783 }
1784 if (handler == NULL) {
1785 if (fh != NULL) {
1786 LIST_REMOVE(fh, fh_list);
1787 free(fh, M_DEVBUF);
1788 }
1789 if (tcode == IEEE1394_TCODE_STREAM_DATA) {
1790 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
1791 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1792 sc->sc_ctx_as[fc->fc_ctx] = NULL;
1793 fwohci_ctx_free(sc, fc);
1794 }
1795 return 0;
1796 }
1797 if (fh == NULL) {
1798 fh = malloc(sizeof(*fh), M_DEVBUF, M_WAITOK);
1799 LIST_INSERT_HEAD(&fc->fc_handler, fh, fh_list);
1800 }
1801 fh->fh_tcode = tcode;
1802 fh->fh_key1 = key1;
1803 fh->fh_key2 = key2;
1804 fh->fh_key3 = key3;
1805 fh->fh_handler = handler;
1806 fh->fh_handarg = arg;
1807 DPRINTFN(1, ("fwohci_handler_set: ctx %d, tcode %x, key 0x%x, 0x%x, "
1808 "0x%x\n", fc->fc_ctx, tcode, key1, key2, key3));
1809
1810 if (tcode == IEEE1394_TCODE_STREAM_DATA) {
1811 fwohci_ctx_init(sc, fc);
1812 DPRINTFN(1, ("fwohci_handler_set: SYNC desc %ld\n",
1813 (long)(TAILQ_FIRST(&fc->fc_buf)->fb_desc - sc->sc_desc)));
1814 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
1815 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
1816 }
1817 return 0;
1818 }
1819
1820 /*
1821 * static ieee1394_ir_tag_t
1822 * fwohci_ir_ctx_set(struct device *dev, int channel, int tagbm,
1823 * int bufnum, int maxsize, int flags)
1824 *
1825 * This function will return non-negative value if it succeeds.
1826 * This return value is pointer to the context of isochronous
1827 * transmission. This function will return NULL value if it
1828 * fails.
1829 */
1830 ieee1394_ir_tag_t
1831 fwohci_ir_ctx_set(struct device *dev, int channel, int tagbm,
1832 int bufnum, int maxsize, int flags)
1833 {
1834 int i, openctx;
1835 struct fwohci_ir_ctx *irc;
1836 struct fwohci_softc *sc = (struct fwohci_softc *)dev;
1837 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname;
1838
1839 printf("%s: ir_ctx_set channel %d tagbm 0x%x maxsize %d bufnum %d\n",
1840 xname, channel, tagbm, maxsize, bufnum);
1841 /*
1842 * This loop will find the smallest vacant context and check
1843 * whether other channel uses the same channel.
1844 */
1845 openctx = sc->sc_isoctx;
1846 for (i = 0; i < sc->sc_isoctx; ++i) {
1847 if (sc->sc_ctx_ir[i] == NULL) {
1848 /*
1849 * Find a vacant contet. If this has the
1850 * smallest context number, register it.
1851 */
1852 if (openctx == sc->sc_isoctx) {
1853 openctx = i;
1854 }
1855 } else {
1856 /*
1857 * This context is used. Check whether this
1858 * context uses the same channel as ours.
1859 */
1860 if (sc->sc_ctx_ir[i]->irc_channel == channel) {
1861 /* Using same channel. */
1862 printf("%s: channel %d occupied by ctx%d\n",
1863 xname, channel, i);
1864 return NULL;
1865 }
1866 }
1867 }
1868
1869 /*
1870 * If there is a vacant context, allocate isochronous transmit
1871 * context for it.
1872 */
1873 if (openctx != sc->sc_isoctx) {
1874 printf("%s using ctx %d for iso receive\n", xname, openctx);
1875 if ((irc = fwohci_ir_ctx_construct(sc, openctx, channel,
1876 tagbm, bufnum, maxsize, flags)) == NULL) {
1877 return NULL;
1878 }
1879 #ifndef IR_CTX_OPENTEST
1880 sc->sc_ctx_ir[openctx] = irc;
1881 #else
1882 fwohci_ir_ctx_destruct(irc);
1883 irc = NULL;
1884 #endif
1885 } else {
1886 printf("%s: cannot find any vacant contexts\n", xname);
1887 irc = NULL;
1888 }
1889
1890 return (ieee1394_ir_tag_t)irc;
1891 }
1892
1893
1894 /*
1895 * int fwohci_ir_ctx_clear(struct device *dev, ieee1394_ir_tag_t *ir)
1896 *
1897 * This function will return 0 if it succeed. Otherwise return
1898 * negative value.
1899 */
1900 int
1901 fwohci_ir_ctx_clear(struct device *dev, ieee1394_ir_tag_t ir)
1902 {
1903 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)ir;
1904 struct fwohci_softc *sc = irc->irc_sc;
1905 int i;
1906
1907 if (sc->sc_ctx_ir[irc->irc_num] != irc) {
1908 printf("fwohci_ir_ctx_clear: irc differs %p %p\n",
1909 sc->sc_ctx_ir[irc->irc_num], irc);
1910 return -1;
1911 }
1912
1913 i = 0;
1914 while (irc->irc_status & IRC_STATUS_RUN) {
1915 tsleep((void *)irc, PWAIT|PCATCH, "IEEE1394 iso receive", 100);
1916 if (irc->irc_status & IRC_STATUS_RUN) {
1917 if (fwohci_ir_stop(irc) == 0) {
1918 irc->irc_status &= ~IRC_STATUS_RUN;
1919 }
1920
1921 }
1922 if (++i > 20) {
1923 u_int32_t reg
1924 = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
1925 OHCI_SUBREG_ContextControlSet);
1926
1927 printf("fwochi_ir_ctx_clear: "
1928 "Cannot stop iso receive engine\n");
1929 printf("%s: intr IR_CommandPtr 0x%08x "
1930 "ContextCtrl 0x%08x%s%s%s%s\n",
1931 sc->sc_sc1394.sc1394_dev.dv_xname,
1932 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
1933 OHCI_SUBREG_CommandPtr),
1934 reg,
1935 reg & OHCI_CTXCTL_RUN ? " run" : "",
1936 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
1937 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
1938 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
1939
1940 return EBUSY;
1941 }
1942 }
1943
1944 printf("fwohci_ir_ctx_clear: DMA engine is stopped. get %d frames max queuelen %d pos %d\n",
1945 irc->irc_pktcount, irc->irc_maxqueuelen, irc->irc_maxqueuepos);
1946
1947 fwohci_ir_ctx_destruct(irc);
1948
1949 sc->sc_ctx_ir[irc->irc_num] = NULL;
1950
1951 return 0;
1952 }
1953
1954
1955
1956
1957
1958
1959
1960
1961 ieee1394_it_tag_t
1962 fwohci_it_set(struct ieee1394_softc *isc, int channel, int tagbm)
1963 {
1964 ieee1394_it_tag_t rv;
1965 int tag;
1966
1967 for (tag = 0; tagbm != 0 && (tagbm & 0x01) == 0; tagbm >>= 1, ++tag);
1968
1969 rv = fwohci_it_ctx_set((struct fwohci_softc *)isc, channel, tag, 488);
1970
1971 return rv;
1972 }
1973
1974 /*
1975 * static ieee1394_it_tag_t
1976 * fwohci_it_ctx_set(struct fwohci_softc *sc,
1977 * u_int32_t key1 (channel), u_int32_t key2 (tag), int maxsize)
1978 *
1979 * This function will return non-negative value if it succeeds.
1980 * This return value is pointer to the context of isochronous
1981 * transmission. This function will return NULL value if it
1982 * fails.
1983 */
1984 static ieee1394_it_tag_t
1985 fwohci_it_ctx_set(struct fwohci_softc *sc, int channel, int tag, int maxsize)
1986 {
1987 int i, openctx;
1988 struct fwohci_it_ctx *itc;
1989 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname;
1990 #ifdef TEST_CHAIN
1991 extern int fwohci_test_chain(struct fwohci_it_ctx *);
1992 #endif /* TEST_CHAIN */
1993 #ifdef TEST_WRITE
1994 extern void fwohci_test_write(struct fwohci_it_ctx *itc);
1995 #endif /* TEST_WRITE */
1996
1997 printf("%s: it_ctx_set channel %d tag %d maxsize %d\n",
1998 xname, channel, tag, maxsize);
1999
2000 /*
2001 * This loop will find the smallest vacant context and check
2002 * whether other channel uses the same channel.
2003 */
2004 openctx = sc->sc_itctx;
2005 for (i = 0; i < sc->sc_itctx; ++i) {
2006 if (sc->sc_ctx_it[i] == NULL) {
2007 /*
2008 * Find a vacant contet. If this has the
2009 * smallest context number, register it.
2010 */
2011 if (openctx == sc->sc_itctx) {
2012 openctx = i;
2013 }
2014 } else {
2015 /*
2016 * This context is used. Check whether this
2017 * context uses the same channel as ours.
2018 */
2019 if (sc->sc_ctx_it[i]->itc_channel == channel) {
2020 /* Using same channel. */
2021 printf("%s: channel %d occupied by ctx%d\n",
2022 xname, channel, i);
2023 return NULL;
2024 }
2025 }
2026 }
2027
2028 /*
2029 * If there is a vacant context, allocate isochronous transmit
2030 * context for it.
2031 */
2032 if (openctx != sc->sc_itctx) {
2033 printf("%s using ctx %d for iso trasmit\n", xname, openctx);
2034 if ((itc = fwohci_it_ctx_construct(sc, openctx, channel,
2035 tag, maxsize)) == NULL) {
2036 return NULL;
2037 }
2038 sc->sc_ctx_it[openctx] = itc;
2039
2040 #ifdef TEST_CHAIN
2041 fwohci_test_chain(itc);
2042 #endif /* TEST_CHAIN */
2043 #ifdef TEST_WRITE
2044 fwohci_test_write(itc);
2045 itc = NULL;
2046 #endif /* TEST_WRITE */
2047
2048 } else {
2049 printf("%s: cannot find any vacant contexts\n", xname);
2050 itc = NULL;
2051 }
2052
2053 return (ieee1394_it_tag_t)itc;
2054 }
2055
2056
2057 /*
2058 * int fwohci_it_ctx_clear(ieee1394_it_tag_t *it)
2059 *
2060 * This function will return 0 if it succeed. Otherwise return
2061 * negative value.
2062 */
2063 int
2064 fwohci_it_ctx_clear(ieee1394_it_tag_t *it)
2065 {
2066 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
2067 struct fwohci_softc *sc = itc->itc_sc;
2068 int i;
2069
2070 if (sc->sc_ctx_it[itc->itc_num] != itc) {
2071 printf("fwohci_it_ctx_clear: itc differs %p %p\n",
2072 sc->sc_ctx_it[itc->itc_num], itc);
2073 return -1;
2074 }
2075
2076 fwohci_it_ctx_flush(it);
2077
2078 i = 0;
2079 while (itc->itc_flags & ITC_FLAGS_RUN) {
2080 tsleep((void *)itc, PWAIT|PCATCH, "IEEE1394 iso transmit", 100);
2081 if (itc->itc_flags & ITC_FLAGS_RUN) {
2082 u_int32_t reg;
2083
2084 reg = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2085 OHCI_SUBREG_ContextControlSet);
2086
2087 if ((reg & OHCI_CTXCTL_WAKE) == 0) {
2088 itc->itc_flags &= ~ITC_FLAGS_RUN;
2089 printf("fwochi_it_ctx_clear: "
2090 "DMA engine stopped without intr\n");
2091 }
2092 printf("%s: %d intr IT_CommandPtr 0x%08x "
2093 "ContextCtrl 0x%08x%s%s%s%s\n",
2094 sc->sc_sc1394.sc1394_dev.dv_xname, i,
2095 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2096 OHCI_SUBREG_CommandPtr),
2097 reg,
2098 reg & OHCI_CTXCTL_RUN ? " run" : "",
2099 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
2100 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
2101 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
2102
2103
2104 }
2105 if (++i > 20) {
2106 u_int32_t reg
2107 = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2108 OHCI_SUBREG_ContextControlSet);
2109
2110 printf("fwochi_it_ctx_clear: "
2111 "Cannot stop iso transmit engine\n");
2112 printf("%s: intr IT_CommandPtr 0x%08x "
2113 "ContextCtrl 0x%08x%s%s%s%s\n",
2114 sc->sc_sc1394.sc1394_dev.dv_xname,
2115 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2116 OHCI_SUBREG_CommandPtr),
2117 reg,
2118 reg & OHCI_CTXCTL_RUN ? " run" : "",
2119 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
2120 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
2121 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
2122
2123 return EBUSY;
2124 }
2125 }
2126
2127 printf("fwohci_it_ctx_clear: DMA engine is stopped.\n");
2128
2129 fwohci_it_ctx_destruct(itc);
2130
2131 sc->sc_ctx_it[itc->itc_num] = NULL;
2132
2133
2134 return 0;
2135 }
2136
2137
2138
2139
2140
2141
2142 /*
2143 * Asyncronous Receive Requests input frontend.
2144 */
2145 static void
2146 fwohci_arrq_input(struct fwohci_softc *sc, struct fwohci_ctx *fc)
2147 {
2148 int rcode;
2149 u_int16_t len;
2150 u_int32_t key1, key2, off;
2151 u_int64_t addr, naddr;
2152 struct fwohci_handler *fh;
2153 struct fwohci_pkt pkt, res;
2154
2155 /*
2156 * Do not return if next packet is in the buffer, or the next
2157 * packet cannot be received until the next receive interrupt.
2158 */
2159 while (fwohci_buf_input(sc, fc, &pkt)) {
2160 if (pkt.fp_tcode == OHCI_TCODE_PHY) {
2161 fwohci_phy_input(sc, &pkt);
2162 continue;
2163 }
2164 key1 = pkt.fp_hdr[1] & 0xffff;
2165 key2 = pkt.fp_hdr[2];
2166 if ((pkt.fp_tcode == IEEE1394_TCODE_WRITE_REQ_BLOCK) ||
2167 (pkt.fp_tcode == IEEE1394_TCODE_READ_REQ_BLOCK)) {
2168 len = (pkt.fp_hdr[3] & 0xffff0000) >> 16;
2169 naddr = ((u_int64_t)key1 << 32) + key2;
2170 } else
2171 len = 0;
2172 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
2173 fh = LIST_NEXT(fh, fh_list)) {
2174 if (pkt.fp_tcode == fh->fh_tcode) {
2175 /* Assume length check happens in handler */
2176 if (key1 == fh->fh_key1 &&
2177 key2 == fh->fh_key2) {
2178 rcode = (*fh->fh_handler)(sc,
2179 fh->fh_handarg, &pkt);
2180 break;
2181 }
2182 addr = ((u_int64_t)fh->fh_key1 << 32) +
2183 fh->fh_key2;
2184 off = fh->fh_key3;
2185 /* Check for a range qualifier */
2186 if (len &&
2187 ((naddr >= addr) && (naddr < (addr + off))
2188 && (naddr + len <= (addr + off)))) {
2189 rcode = (*fh->fh_handler)(sc,
2190 fh->fh_handarg, &pkt);
2191 break;
2192 }
2193 }
2194 }
2195 if (fh == NULL) {
2196 rcode = IEEE1394_RCODE_ADDRESS_ERROR;
2197 DPRINTFN(1, ("fwohci_arrq_input: no listener: tcode "
2198 "0x%x, addr=0x%04x %08x\n", pkt.fp_tcode, key1,
2199 key2));
2200 DPRINTFN(2, ("fwohci_arrq_input: no listener: hdr[0]: "
2201 "0x%08x, hdr[1]: 0x%08x, hdr[2]: 0x%08x, hdr[3]: "
2202 "0x%08x\n", pkt.fp_hdr[0], pkt.fp_hdr[1],
2203 pkt.fp_hdr[2], pkt.fp_hdr[3]));
2204 }
2205 if (((*pkt.fp_trail & 0x001f0000) >> 16) !=
2206 OHCI_CTXCTL_EVENT_ACK_PENDING)
2207 continue;
2208 if (rcode != -1) {
2209 memset(&res, 0, sizeof(res));
2210 res.fp_uio.uio_rw = UIO_WRITE;
2211 res.fp_uio.uio_segflg = UIO_SYSSPACE;
2212 fwohci_atrs_output(sc, rcode, &pkt, &res);
2213 }
2214 }
2215 fwohci_buf_next(sc, fc);
2216 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2217 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
2218 }
2219
2220
2221 /*
2222 * Asynchronous Receive Response input frontend.
2223 */
2224 static void
2225 fwohci_arrs_input(struct fwohci_softc *sc, struct fwohci_ctx *fc)
2226 {
2227 struct fwohci_pkt pkt;
2228 struct fwohci_handler *fh;
2229 u_int16_t srcid;
2230 int rcode, tlabel;
2231
2232 while (fwohci_buf_input(sc, fc, &pkt)) {
2233 srcid = pkt.fp_hdr[1] >> 16;
2234 rcode = (pkt.fp_hdr[1] & 0x0000f000) >> 12;
2235 tlabel = (pkt.fp_hdr[0] & 0x0000fc00) >> 10;
2236 DPRINTFN(1, ("fwohci_arrs_input: tcode 0x%x, from 0x%04x,"
2237 " tlabel 0x%x, rcode 0x%x, hlen %d, dlen %d\n",
2238 pkt.fp_tcode, srcid, tlabel, rcode, pkt.fp_hlen,
2239 pkt.fp_dlen));
2240 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
2241 fh = LIST_NEXT(fh, fh_list)) {
2242 if (pkt.fp_tcode == fh->fh_tcode &&
2243 (srcid & OHCI_NodeId_NodeNumber) == fh->fh_key1 &&
2244 tlabel == fh->fh_key2) {
2245 (*fh->fh_handler)(sc, fh->fh_handarg, &pkt);
2246 LIST_REMOVE(fh, fh_list);
2247 free(fh, M_DEVBUF);
2248 break;
2249 }
2250 }
2251 if (fh == NULL)
2252 DPRINTFN(1, ("fwohci_arrs_input: no listner\n"));
2253 }
2254 fwohci_buf_next(sc, fc);
2255 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2256 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
2257 }
2258
2259 /*
2260 * Isochronous Receive input frontend.
2261 */
2262 static void
2263 fwohci_as_input(struct fwohci_softc *sc, struct fwohci_ctx *fc)
2264 {
2265 int rcode, chan, tag;
2266 struct iovec *iov;
2267 struct fwohci_handler *fh;
2268 struct fwohci_pkt pkt;
2269
2270 #if DOUBLEBUF
2271 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) {
2272 struct fwohci_buf *fb;
2273 int i;
2274 u_int32_t reg;
2275
2276 /* stop dma engine before read buffer */
2277 reg = OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx,
2278 OHCI_SUBREG_ContextControlClear);
2279 DPRINTFN(5, ("ir_input %08x =>", reg));
2280 if (reg & OHCI_CTXCTL_RUN) {
2281 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
2282 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
2283 }
2284 DPRINTFN(5, (" %08x\n", OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx, OHCI_SUBREG_ContextControlClear)));
2285
2286 i = 0;
2287 while ((reg = OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx, OHCI_SUBREG_ContextControlSet)) & OHCI_CTXCTL_ACTIVE) {
2288 delay(10);
2289 if (++i > 10000) {
2290 printf("cannot stop dma engine 0x%08x\n", reg);
2291 return;
2292 }
2293 }
2294
2295 /* rotate dma buffer */
2296 fb = TAILQ_FIRST(&fc->fc_buf2);
2297 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx, OHCI_SUBREG_CommandPtr,
2298 fb->fb_daddr | 1);
2299 /* start dma engine */
2300 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
2301 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
2302 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear,
2303 (1 << fc->fc_ctx));
2304 }
2305 #endif
2306
2307 while (fwohci_buf_input_ppb(sc, fc, &pkt)) {
2308 chan = (pkt.fp_hdr[0] & 0x00003f00) >> 8;
2309 tag = (pkt.fp_hdr[0] & 0x0000c000) >> 14;
2310 DPRINTFN(1, ("fwohci_as_input: hdr 0x%08x, tcode 0x%0x, hlen %d"
2311 ", dlen %d\n", pkt.fp_hdr[0], pkt.fp_tcode, pkt.fp_hlen,
2312 pkt.fp_dlen));
2313 if (tag == IEEE1394_TAG_GASP &&
2314 fc->fc_type == FWOHCI_CTX_ISO_SINGLE) {
2315 /*
2316 * The pkt with tag=3 is GASP format.
2317 * Move GASP header to header part.
2318 */
2319 if (pkt.fp_dlen < 8)
2320 continue;
2321 iov = pkt.fp_iov;
2322 /* assuming pkt per buffer mode */
2323 pkt.fp_hdr[1] = ntohl(((u_int32_t *)iov->iov_base)[0]);
2324 pkt.fp_hdr[2] = ntohl(((u_int32_t *)iov->iov_base)[1]);
2325 iov->iov_base = (caddr_t)iov->iov_base + 8;
2326 iov->iov_len -= 8;
2327 pkt.fp_hlen += 8;
2328 pkt.fp_dlen -= 8;
2329 }
2330 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
2331 fh = LIST_NEXT(fh, fh_list)) {
2332 if (pkt.fp_tcode == fh->fh_tcode &&
2333 (chan == fh->fh_key1 ||
2334 fh->fh_key1 == IEEE1394_ISO_CHANNEL_ANY) &&
2335 ((1 << tag) & fh->fh_key2) != 0) {
2336 rcode = (*fh->fh_handler)(sc, fh->fh_handarg,
2337 &pkt);
2338 break;
2339 }
2340 }
2341 #ifdef FW_DEBUG
2342 if (fh == NULL) {
2343 DPRINTFN(1, ("fwohci_as_input: no handler\n"));
2344 } else {
2345 DPRINTFN(1, ("fwohci_as_input: rcode %d\n", rcode));
2346 }
2347 #endif
2348 }
2349 fwohci_buf_next(sc, fc);
2350
2351 if (fc->fc_type == FWOHCI_CTX_ISO_SINGLE) {
2352 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
2353 OHCI_SUBREG_ContextControlSet,
2354 OHCI_CTXCTL_WAKE);
2355 }
2356 }
2357
2358 /*
2359 * Asynchronous Transmit common routine.
2360 */
2361 static int
2362 fwohci_at_output(struct fwohci_softc *sc, struct fwohci_ctx *fc,
2363 struct fwohci_pkt *pkt)
2364 {
2365 struct fwohci_buf *fb;
2366 struct fwohci_desc *fd;
2367 struct mbuf *m, *m0;
2368 int i, ndesc, error, off, len;
2369 u_int32_t val;
2370 #ifdef FW_DEBUG
2371 struct iovec *iov;
2372 int tlabel = (pkt->fp_hdr[0] & 0x0000fc00) >> 10;
2373 #endif
2374
2375 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) == IEEE1394_BCAST_PHY_ID)
2376 /* We can't send anything during selfid duration */
2377 return EAGAIN;
2378
2379 #ifdef FW_DEBUG
2380 DPRINTFN(1, ("fwohci_at_output: tcode 0x%x, tlabel 0x%x hlen %d, "
2381 "dlen %d", pkt->fp_tcode, tlabel, pkt->fp_hlen, pkt->fp_dlen));
2382 for (i = 0; i < pkt->fp_hlen/4; i++)
2383 DPRINTFN(2, ("%s%08x", i?" ":"\n ", pkt->fp_hdr[i]));
2384 DPRINTFN(2, ("$"));
2385 for (ndesc = 0, iov = pkt->fp_iov;
2386 ndesc < pkt->fp_uio.uio_iovcnt; ndesc++, iov++) {
2387 for (i = 0; i < iov->iov_len; i++)
2388 DPRINTFN(2, ("%s%02x", (i%32)?((i%4)?"":" "):"\n ",
2389 ((u_int8_t *)iov->iov_base)[i]));
2390 DPRINTFN(2, ("$"));
2391 }
2392 DPRINTFN(1, ("\n"));
2393 #endif
2394
2395 if ((m = pkt->fp_m) != NULL) {
2396 for (ndesc = 2; m != NULL; m = m->m_next)
2397 ndesc++;
2398 if (ndesc > OHCI_DESC_MAX) {
2399 m0 = NULL;
2400 ndesc = 2;
2401 for (off = 0; off < pkt->fp_dlen; off += len) {
2402 if (m0 == NULL) {
2403 MGETHDR(m0, M_DONTWAIT, MT_DATA);
2404 if (m0 != NULL)
2405 M_COPY_PKTHDR(m0, pkt->fp_m);
2406 m = m0;
2407 } else {
2408 MGET(m->m_next, M_DONTWAIT, MT_DATA);
2409 m = m->m_next;
2410 }
2411 if (m != NULL)
2412 MCLGET(m, M_DONTWAIT);
2413 if (m == NULL || (m->m_flags & M_EXT) == 0) {
2414 m_freem(m0);
2415 return ENOMEM;
2416 }
2417 len = pkt->fp_dlen - off;
2418 if (len > m->m_ext.ext_size)
2419 len = m->m_ext.ext_size;
2420 m_copydata(pkt->fp_m, off, len,
2421 mtod(m, caddr_t));
2422 m->m_len = len;
2423 ndesc++;
2424 }
2425 m_freem(pkt->fp_m);
2426 pkt->fp_m = m0;
2427 }
2428 } else
2429 ndesc = 2 + pkt->fp_uio.uio_iovcnt;
2430
2431 if (ndesc > OHCI_DESC_MAX)
2432 return ENOBUFS;
2433
2434 if (fc->fc_bufcnt > 50) /*XXX*/
2435 return ENOBUFS;
2436 fb = malloc(sizeof(*fb), M_DEVBUF, M_WAITOK);
2437 if (ndesc > 2) {
2438 if ((error = bus_dmamap_create(sc->sc_dmat, pkt->fp_dlen,
2439 OHCI_DESC_MAX - 2, pkt->fp_dlen, 0, BUS_DMA_WAITOK,
2440 &fb->fb_dmamap)) != 0) {
2441 fwohci_desc_put(sc, fb->fb_desc, ndesc);
2442 free(fb, M_DEVBUF);
2443 return error;
2444 }
2445
2446 if (pkt->fp_m != NULL)
2447 error = bus_dmamap_load_mbuf(sc->sc_dmat, fb->fb_dmamap,
2448 pkt->fp_m, BUS_DMA_WAITOK);
2449 else
2450 error = bus_dmamap_load_uio(sc->sc_dmat, fb->fb_dmamap,
2451 &pkt->fp_uio, BUS_DMA_WAITOK);
2452 if (error != 0) {
2453 DPRINTFN(1, ("Can't load DMA map: %d\n", error));
2454 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
2455 fwohci_desc_put(sc, fb->fb_desc, ndesc);
2456 free(fb, M_DEVBUF);
2457 return error;
2458 }
2459 ndesc = fb->fb_dmamap->dm_nsegs + 2;
2460
2461 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0, pkt->fp_dlen,
2462 BUS_DMASYNC_PREWRITE);
2463 }
2464
2465 fb->fb_nseg = ndesc;
2466 fb->fb_desc = fwohci_desc_get(sc, ndesc);
2467 if (fb->fb_desc == NULL) {
2468 free(fb, M_DEVBUF);
2469 return ENOBUFS;
2470 }
2471 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr +
2472 ((caddr_t)fb->fb_desc - (caddr_t)sc->sc_desc);
2473 fb->fb_m = pkt->fp_m;
2474 fb->fb_callback = pkt->fp_callback;
2475 fb->fb_statuscb = pkt->fp_statuscb;
2476 fb->fb_statusarg = pkt->fp_statusarg;
2477
2478 fd = fb->fb_desc;
2479 fd->fd_flags = OHCI_DESC_IMMED;
2480 fd->fd_reqcount = pkt->fp_hlen;
2481 fd->fd_data = 0;
2482 fd->fd_branch = 0;
2483 fd->fd_status = 0;
2484 if (fc->fc_ctx == OHCI_CTX_ASYNC_TX_RESPONSE) {
2485 i = 3; /* XXX: 3 sec */
2486 val = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
2487 fd->fd_timestamp = ((val >> 12) & 0x1fff) |
2488 ((((val >> 25) + i) & 0x7) << 13);
2489 } else
2490 fd->fd_timestamp = 0;
2491 memcpy(fd + 1, pkt->fp_hdr, pkt->fp_hlen);
2492 for (i = 0; i < ndesc - 2; i++) {
2493 fd = fb->fb_desc + 2 + i;
2494 fd->fd_flags = 0;
2495 fd->fd_reqcount = fb->fb_dmamap->dm_segs[i].ds_len;
2496 fd->fd_data = fb->fb_dmamap->dm_segs[i].ds_addr;
2497 fd->fd_branch = 0;
2498 fd->fd_status = 0;
2499 fd->fd_timestamp = 0;
2500 }
2501 fd->fd_flags |= OHCI_DESC_LAST | OHCI_DESC_BRANCH;
2502 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
2503
2504 #ifdef FW_DEBUG
2505 DPRINTFN(1, ("fwohci_at_output: desc %ld",
2506 (long)(fb->fb_desc - sc->sc_desc)));
2507 for (i = 0; i < ndesc * 4; i++)
2508 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ",
2509 ((u_int32_t *)fb->fb_desc)[i]));
2510 DPRINTFN(1, ("\n"));
2511 #endif
2512
2513 val = OHCI_ASYNC_DMA_READ(sc, fc->fc_ctx,
2514 OHCI_SUBREG_ContextControlClear);
2515
2516 if (val & OHCI_CTXCTL_RUN) {
2517 if (fc->fc_branch == NULL) {
2518 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2519 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
2520 goto run;
2521 }
2522 *fc->fc_branch = fb->fb_daddr | ndesc;
2523 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2524 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
2525 } else {
2526 run:
2527 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2528 OHCI_SUBREG_CommandPtr, fb->fb_daddr | ndesc);
2529 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2530 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
2531 }
2532 fc->fc_branch = &fd->fd_branch;
2533
2534 fc->fc_bufcnt++;
2535 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list);
2536 pkt->fp_m = NULL;
2537 return 0;
2538 }
2539
2540 static void
2541 fwohci_at_done(struct fwohci_softc *sc, struct fwohci_ctx *fc, int force)
2542 {
2543 struct fwohci_buf *fb;
2544 struct fwohci_desc *fd;
2545 struct fwohci_pkt pkt;
2546 int i;
2547
2548 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) {
2549 fd = fb->fb_desc;
2550 #ifdef FW_DEBUG
2551 DPRINTFN(1, ("fwohci_at_done: %sdesc %ld (%d)",
2552 force ? "force " : "", (long)(fd - sc->sc_desc),
2553 fb->fb_nseg));
2554 for (i = 0; i < fb->fb_nseg * 4; i++)
2555 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ",
2556 ((u_int32_t *)fd)[i]));
2557 DPRINTFN(1, ("\n"));
2558 #endif
2559 if (fb->fb_nseg > 2)
2560 fd += fb->fb_nseg - 1;
2561 if (!force && !(fd->fd_status & OHCI_CTXCTL_ACTIVE))
2562 break;
2563 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list);
2564 if (fc->fc_branch == &fd->fd_branch) {
2565 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2566 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
2567 fc->fc_branch = NULL;
2568 for (i = 0; i < OHCI_LOOP; i++) {
2569 if (!(OHCI_ASYNC_DMA_READ(sc, fc->fc_ctx,
2570 OHCI_SUBREG_ContextControlClear) &
2571 OHCI_CTXCTL_ACTIVE))
2572 break;
2573 DELAY(10);
2574 }
2575 }
2576
2577 if (fb->fb_statuscb) {
2578 memset(&pkt, 0, sizeof(pkt));
2579 pkt.fp_status = fd->fd_status;
2580 memcpy(pkt.fp_hdr, fd + 1, sizeof(pkt.fp_hdr[0]));
2581
2582 /* Indicate this is just returning the status bits. */
2583 pkt.fp_tcode = -1;
2584 (*fb->fb_statuscb)(sc, fb->fb_statusarg, &pkt);
2585 fb->fb_statuscb = NULL;
2586 fb->fb_statusarg = NULL;
2587 }
2588 fwohci_desc_put(sc, fb->fb_desc, fb->fb_nseg);
2589 if (fb->fb_nseg > 2)
2590 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
2591 fc->fc_bufcnt--;
2592 if (fb->fb_callback) {
2593 (*fb->fb_callback)(sc->sc_sc1394.sc1394_if, fb->fb_m);
2594 fb->fb_callback = NULL;
2595 } else if (fb->fb_m != NULL)
2596 m_freem(fb->fb_m);
2597 free(fb, M_DEVBUF);
2598 }
2599 }
2600
2601 /*
2602 * Asynchronous Transmit Reponse -- in response of request packet.
2603 */
2604 static void
2605 fwohci_atrs_output(struct fwohci_softc *sc, int rcode, struct fwohci_pkt *req,
2606 struct fwohci_pkt *res)
2607 {
2608
2609 if (((*req->fp_trail & 0x001f0000) >> 16) !=
2610 OHCI_CTXCTL_EVENT_ACK_PENDING)
2611 return;
2612
2613 res->fp_hdr[0] = (req->fp_hdr[0] & 0x0000fc00) | 0x00000100;
2614 res->fp_hdr[1] = (req->fp_hdr[1] & 0xffff0000) | (rcode << 12);
2615 switch (req->fp_tcode) {
2616 case IEEE1394_TCODE_WRITE_REQ_QUAD:
2617 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
2618 res->fp_tcode = IEEE1394_TCODE_WRITE_RESP;
2619 res->fp_hlen = 12;
2620 break;
2621 case IEEE1394_TCODE_READ_REQ_QUAD:
2622 res->fp_tcode = IEEE1394_TCODE_READ_RESP_QUAD;
2623 res->fp_hlen = 16;
2624 res->fp_dlen = 0;
2625 if (res->fp_uio.uio_iovcnt == 1 && res->fp_iov[0].iov_len == 4)
2626 res->fp_hdr[3] =
2627 *(u_int32_t *)res->fp_iov[0].iov_base;
2628 res->fp_uio.uio_iovcnt = 0;
2629 break;
2630 case IEEE1394_TCODE_READ_REQ_BLOCK:
2631 case IEEE1394_TCODE_LOCK_REQ:
2632 if (req->fp_tcode == IEEE1394_TCODE_LOCK_REQ)
2633 res->fp_tcode = IEEE1394_TCODE_LOCK_RESP;
2634 else
2635 res->fp_tcode = IEEE1394_TCODE_READ_RESP_BLOCK;
2636 res->fp_hlen = 16;
2637 res->fp_dlen = res->fp_uio.uio_resid;
2638 res->fp_hdr[3] = res->fp_dlen << 16;
2639 break;
2640 }
2641 res->fp_hdr[0] |= (res->fp_tcode << 4);
2642 fwohci_at_output(sc, sc->sc_ctx_atrs, res);
2643 }
2644
2645 /*
2646 * APPLICATION LAYER SERVICES
2647 */
2648
2649 /*
2650 * Retrieve Global UID from GUID ROM
2651 */
2652 static int
2653 fwohci_guidrom_init(struct fwohci_softc *sc)
2654 {
2655 int i, n, off;
2656 u_int32_t val1, val2;
2657
2658 /* Extract the Global UID
2659 */
2660 val1 = OHCI_CSR_READ(sc, OHCI_REG_GUIDHi);
2661 val2 = OHCI_CSR_READ(sc, OHCI_REG_GUIDLo);
2662
2663 if (val1 != 0 || val2 != 0) {
2664 sc->sc_sc1394.sc1394_guid[0] = (val1 >> 24) & 0xff;
2665 sc->sc_sc1394.sc1394_guid[1] = (val1 >> 16) & 0xff;
2666 sc->sc_sc1394.sc1394_guid[2] = (val1 >> 8) & 0xff;
2667 sc->sc_sc1394.sc1394_guid[3] = (val1 >> 0) & 0xff;
2668 sc->sc_sc1394.sc1394_guid[4] = (val2 >> 24) & 0xff;
2669 sc->sc_sc1394.sc1394_guid[5] = (val2 >> 16) & 0xff;
2670 sc->sc_sc1394.sc1394_guid[6] = (val2 >> 8) & 0xff;
2671 sc->sc_sc1394.sc1394_guid[7] = (val2 >> 0) & 0xff;
2672 } else {
2673 val1 = OHCI_CSR_READ(sc, OHCI_REG_Version);
2674 if ((val1 & OHCI_Version_GUID_ROM) == 0)
2675 return -1;
2676 OHCI_CSR_WRITE(sc, OHCI_REG_Guid_Rom, OHCI_Guid_AddrReset);
2677 for (i = 0; i < OHCI_LOOP; i++) {
2678 val1 = OHCI_CSR_READ(sc, OHCI_REG_Guid_Rom);
2679 if (!(val1 & OHCI_Guid_AddrReset))
2680 break;
2681 DELAY(10);
2682 }
2683 off = OHCI_BITVAL(val1, OHCI_Guid_MiniROM) + 4;
2684 val2 = 0;
2685 for (n = 0; n < off + sizeof(sc->sc_sc1394.sc1394_guid); n++) {
2686 OHCI_CSR_WRITE(sc, OHCI_REG_Guid_Rom,
2687 OHCI_Guid_RdStart);
2688 for (i = 0; i < OHCI_LOOP; i++) {
2689 val1 = OHCI_CSR_READ(sc, OHCI_REG_Guid_Rom);
2690 if (!(val1 & OHCI_Guid_RdStart))
2691 break;
2692 DELAY(10);
2693 }
2694 if (n < off)
2695 continue;
2696 val1 = OHCI_BITVAL(val1, OHCI_Guid_RdData);
2697 sc->sc_sc1394.sc1394_guid[n - off] = val1;
2698 val2 |= val1;
2699 }
2700 if (val2 == 0)
2701 return -1;
2702 }
2703 return 0;
2704 }
2705
2706 /*
2707 * Initialization for Configuration ROM (no DMA context)
2708 */
2709
2710 #define CFR_MAXUNIT 20
2711
2712 struct configromctx {
2713 u_int32_t *ptr;
2714 int curunit;
2715 struct {
2716 u_int32_t *start;
2717 int length;
2718 u_int32_t *refer;
2719 int refunit;
2720 } unit[CFR_MAXUNIT];
2721 };
2722
2723 #define CFR_PUT_DATA4(cfr, d1, d2, d3, d4) \
2724 (*(cfr)->ptr++ = (((d1)<<24) | ((d2)<<16) | ((d3)<<8) | (d4)))
2725
2726 #define CFR_PUT_DATA1(cfr, d) (*(cfr)->ptr++ = (d))
2727
2728 #define CFR_PUT_VALUE(cfr, key, d) (*(cfr)->ptr++ = ((key)<<24) | (d))
2729
2730 #define CFR_PUT_CRC(cfr, n) \
2731 (*(cfr)->unit[n].start = ((cfr)->unit[n].length << 16) | \
2732 fwohci_crc16((cfr)->unit[n].start + 1, (cfr)->unit[n].length))
2733
2734 #define CFR_START_UNIT(cfr, n) \
2735 do { \
2736 if ((cfr)->unit[n].refer != NULL) { \
2737 *(cfr)->unit[n].refer |= \
2738 (cfr)->ptr - (cfr)->unit[n].refer; \
2739 CFR_PUT_CRC(cfr, (cfr)->unit[n].refunit); \
2740 } \
2741 (cfr)->curunit = (n); \
2742 (cfr)->unit[n].start = (cfr)->ptr++; \
2743 } while (0 /* CONSTCOND */)
2744
2745 #define CFR_PUT_REFER(cfr, key, n) \
2746 do { \
2747 (cfr)->unit[n].refer = (cfr)->ptr; \
2748 (cfr)->unit[n].refunit = (cfr)->curunit; \
2749 *(cfr)->ptr++ = (key) << 24; \
2750 } while (0 /* CONSTCOND */)
2751
2752 #define CFR_END_UNIT(cfr) \
2753 do { \
2754 (cfr)->unit[(cfr)->curunit].length = (cfr)->ptr - \
2755 ((cfr)->unit[(cfr)->curunit].start + 1); \
2756 CFR_PUT_CRC(cfr, (cfr)->curunit); \
2757 } while (0 /* CONSTCOND */)
2758
2759 static u_int16_t
2760 fwohci_crc16(u_int32_t *ptr, int len)
2761 {
2762 int shift;
2763 u_int32_t crc, sum, data;
2764
2765 crc = 0;
2766 while (len-- > 0) {
2767 data = *ptr++;
2768 for (shift = 28; shift >= 0; shift -= 4) {
2769 sum = ((crc >> 12) ^ (data >> shift)) & 0x000f;
2770 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ sum;
2771 }
2772 crc &= 0xffff;
2773 }
2774 return crc;
2775 }
2776
2777 static void
2778 fwohci_configrom_init(struct fwohci_softc *sc)
2779 {
2780 int i, val;
2781 struct fwohci_buf *fb;
2782 u_int32_t *hdr;
2783 struct configromctx cfr;
2784
2785 fb = &sc->sc_buf_cnfrom;
2786 memset(&cfr, 0, sizeof(cfr));
2787 cfr.ptr = hdr = (u_int32_t *)fb->fb_buf;
2788
2789 /* headers */
2790 CFR_START_UNIT(&cfr, 0);
2791 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_BusId));
2792 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_BusOptions));
2793 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_GUIDHi));
2794 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_GUIDLo));
2795 CFR_END_UNIT(&cfr);
2796 /* copy info_length from crc_length */
2797 *hdr |= (*hdr & 0x00ff0000) << 8;
2798 OHCI_CSR_WRITE(sc, OHCI_REG_ConfigROMhdr, *hdr);
2799
2800 /* root directory */
2801 CFR_START_UNIT(&cfr, 1);
2802 CFR_PUT_VALUE(&cfr, 0x03, 0x00005e); /* vendor id */
2803 CFR_PUT_REFER(&cfr, 0x81, 2); /* textual descriptor offset */
2804 CFR_PUT_VALUE(&cfr, 0x0c, 0x0083c0); /* node capability */
2805 /* spt,64,fix,lst,drq */
2806 #ifdef INET
2807 CFR_PUT_REFER(&cfr, 0xd1, 3); /* IPv4 unit directory */
2808 #endif /* INET */
2809 #ifdef INET6
2810 CFR_PUT_REFER(&cfr, 0xd1, 4); /* IPv6 unit directory */
2811 #endif /* INET6 */
2812 CFR_END_UNIT(&cfr);
2813
2814 CFR_START_UNIT(&cfr, 2);
2815 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2816 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2817 CFR_PUT_DATA4(&cfr, 'N', 'e', 't', 'B');
2818 CFR_PUT_DATA4(&cfr, 'S', 'D', 0x00, 0x00);
2819 CFR_END_UNIT(&cfr);
2820
2821 #ifdef INET
2822 /* IPv4 unit directory */
2823 CFR_START_UNIT(&cfr, 3);
2824 CFR_PUT_VALUE(&cfr, 0x12, 0x00005e); /* unit spec id */
2825 CFR_PUT_REFER(&cfr, 0x81, 6); /* textual descriptor offset */
2826 CFR_PUT_VALUE(&cfr, 0x13, 0x000001); /* unit sw version */
2827 CFR_PUT_REFER(&cfr, 0x81, 7); /* textual descriptor offset */
2828 CFR_PUT_REFER(&cfr, 0x95, 8); /* Unit location */
2829 CFR_END_UNIT(&cfr);
2830
2831 CFR_START_UNIT(&cfr, 6);
2832 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2833 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2834 CFR_PUT_DATA4(&cfr, 'I', 'A', 'N', 'A');
2835 CFR_END_UNIT(&cfr);
2836
2837 CFR_START_UNIT(&cfr, 7);
2838 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2839 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2840 CFR_PUT_DATA4(&cfr, 'I', 'P', 'v', '4');
2841 CFR_END_UNIT(&cfr);
2842
2843 CFR_START_UNIT(&cfr, 8); /* Spec's valid addr range. */
2844 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2845 CFR_PUT_DATA1(&cfr, (FW_FIFO_LO | 0x1));
2846 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2847 CFR_PUT_DATA1(&cfr, FW_FIFO_LO);
2848 CFR_END_UNIT(&cfr);
2849
2850 #endif /* INET */
2851
2852 #ifdef INET6
2853 /* IPv6 unit directory */
2854 CFR_START_UNIT(&cfr, 4);
2855 CFR_PUT_VALUE(&cfr, 0x12, 0x00005e); /* unit spec id */
2856 CFR_PUT_REFER(&cfr, 0x81, 9); /* textual descriptor offset */
2857 CFR_PUT_VALUE(&cfr, 0x13, 0x000002); /* unit sw version */
2858 /* XXX: TBA by IANA */
2859 CFR_PUT_REFER(&cfr, 0x81, 10); /* textual descriptor offset */
2860 CFR_PUT_REFER(&cfr, 0x95, 11); /* Unit location */
2861 CFR_END_UNIT(&cfr);
2862
2863 CFR_START_UNIT(&cfr, 9);
2864 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2865 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2866 CFR_PUT_DATA4(&cfr, 'I', 'A', 'N', 'A');
2867 CFR_END_UNIT(&cfr);
2868
2869 CFR_START_UNIT(&cfr, 10);
2870 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2871 CFR_PUT_DATA1(&cfr, 0);
2872 CFR_PUT_DATA4(&cfr, 'I', 'P', 'v', '6');
2873 CFR_END_UNIT(&cfr);
2874
2875 CFR_START_UNIT(&cfr, 11); /* Spec's valid addr range. */
2876 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2877 CFR_PUT_DATA1(&cfr, (FW_FIFO_LO | 0x1));
2878 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2879 CFR_PUT_DATA1(&cfr, FW_FIFO_LO);
2880 CFR_END_UNIT(&cfr);
2881
2882 #endif /* INET6 */
2883
2884 fb->fb_off = cfr.ptr - hdr;
2885 #ifdef FW_DEBUG
2886 DPRINTF(("%s: Config ROM:", sc->sc_sc1394.sc1394_dev.dv_xname));
2887 for (i = 0; i < fb->fb_off; i++)
2888 DPRINTF(("%s%08x", i&7?" ":"\n ", hdr[i]));
2889 DPRINTF(("\n"));
2890 #endif /* FW_DEBUG */
2891
2892 /*
2893 * Make network byte order for DMA
2894 */
2895 for (i = 0; i < fb->fb_off; i++)
2896 HTONL(hdr[i]);
2897 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0,
2898 (caddr_t)cfr.ptr - fb->fb_buf, BUS_DMASYNC_PREWRITE);
2899
2900 OHCI_CSR_WRITE(sc, OHCI_REG_ConfigROMmap,
2901 fb->fb_dmamap->dm_segs[0].ds_addr);
2902
2903 /* This register is only valid on OHCI 1.1. */
2904 val = OHCI_CSR_READ(sc, OHCI_REG_Version);
2905 if ((OHCI_Version_GET_Version(val) == 1) &&
2906 (OHCI_Version_GET_Revision(val) == 1))
2907 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet,
2908 OHCI_HCControl_BIBImageValid);
2909
2910 /* Only allow quad reads of the rom. */
2911 for (i = 0; i < fb->fb_off; i++)
2912 fwohci_handler_set(sc, IEEE1394_TCODE_READ_REQ_QUAD,
2913 CSR_BASE_HI, CSR_BASE_LO + CSR_CONFIG_ROM + (i * 4), 0,
2914 fwohci_configrom_input, NULL);
2915 }
2916
2917 static int
2918 fwohci_configrom_input(struct fwohci_softc *sc, void *arg,
2919 struct fwohci_pkt *pkt)
2920 {
2921 struct fwohci_pkt res;
2922 u_int32_t loc, *rom;
2923
2924 /* This will be used as an array index so size accordingly. */
2925 loc = pkt->fp_hdr[2] - (CSR_BASE_LO + CSR_CONFIG_ROM);
2926 if ((loc & 0x03) != 0) {
2927 /* alignment error */
2928 return IEEE1394_RCODE_ADDRESS_ERROR;
2929 }
2930 else
2931 loc /= 4;
2932 rom = (u_int32_t *)sc->sc_buf_cnfrom.fb_buf;
2933
2934 DPRINTFN(1, ("fwohci_configrom_input: ConfigRom[0x%04x]: 0x%08x\n", loc,
2935 ntohl(rom[loc])));
2936
2937 memset(&res, 0, sizeof(res));
2938 res.fp_hdr[3] = rom[loc];
2939 fwohci_atrs_output(sc, IEEE1394_RCODE_COMPLETE, pkt, &res);
2940 return -1;
2941 }
2942
2943 /*
2944 * SelfID buffer (no DMA context)
2945 */
2946 static void
2947 fwohci_selfid_init(struct fwohci_softc *sc)
2948 {
2949 struct fwohci_buf *fb;
2950
2951 fb = &sc->sc_buf_selfid;
2952 #ifdef DIAGNOSTIC
2953 if ((fb->fb_dmamap->dm_segs[0].ds_addr & 0x7ff) != 0)
2954 panic("fwohci_selfid_init: not aligned: %ld (%ld) %p",
2955 (unsigned long)fb->fb_dmamap->dm_segs[0].ds_addr,
2956 (unsigned long)fb->fb_dmamap->dm_segs[0].ds_len, fb->fb_buf);
2957 #endif
2958 memset(fb->fb_buf, 0, fb->fb_dmamap->dm_segs[0].ds_len);
2959 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0,
2960 fb->fb_dmamap->dm_segs[0].ds_len, BUS_DMASYNC_PREREAD);
2961
2962 OHCI_CSR_WRITE(sc, OHCI_REG_SelfIDBuffer,
2963 fb->fb_dmamap->dm_segs[0].ds_addr);
2964 }
2965
2966 static int
2967 fwohci_selfid_input(struct fwohci_softc *sc)
2968 {
2969 int i;
2970 u_int32_t count, val, gen;
2971 u_int32_t *buf;
2972
2973 buf = (u_int32_t *)sc->sc_buf_selfid.fb_buf;
2974 val = OHCI_CSR_READ(sc, OHCI_REG_SelfIDCount);
2975 again:
2976 if (val & OHCI_SelfID_Error) {
2977 printf("%s: SelfID Error\n", sc->sc_sc1394.sc1394_dev.dv_xname);
2978 return -1;
2979 }
2980 count = OHCI_BITVAL(val, OHCI_SelfID_Size);
2981
2982 bus_dmamap_sync(sc->sc_dmat, sc->sc_buf_selfid.fb_dmamap,
2983 0, count << 2, BUS_DMASYNC_POSTREAD);
2984 gen = OHCI_BITVAL(buf[0], OHCI_SelfID_Gen);
2985
2986 #ifdef FW_DEBUG
2987 DPRINTFN(1, ("%s: SelfID: 0x%08x", sc->sc_sc1394.sc1394_dev.dv_xname,
2988 val));
2989 for (i = 0; i < count; i++)
2990 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ", buf[i]));
2991 DPRINTFN(1, ("\n"));
2992 #endif /* FW_DEBUG */
2993
2994 for (i = 1; i < count; i += 2) {
2995 if (buf[i] != ~buf[i + 1])
2996 break;
2997 if (buf[i] & 0x00000001)
2998 continue; /* more pkt */
2999 if (buf[i] & 0x00800000)
3000 continue; /* external id */
3001 sc->sc_rootid = (buf[i] & 0x3f000000) >> 24;
3002 if ((buf[i] & 0x00400800) == 0x00400800)
3003 sc->sc_irmid = sc->sc_rootid;
3004 }
3005
3006 val = OHCI_CSR_READ(sc, OHCI_REG_SelfIDCount);
3007 if (OHCI_BITVAL(val, OHCI_SelfID_Gen) != gen) {
3008 if (OHCI_BITVAL(val, OHCI_SelfID_Gen) !=
3009 OHCI_BITVAL(buf[0], OHCI_SelfID_Gen))
3010 goto again;
3011 DPRINTF(("%s: SelfID Gen mismatch (%d, %d)\n",
3012 sc->sc_sc1394.sc1394_dev.dv_xname, gen,
3013 OHCI_BITVAL(val, OHCI_SelfID_Gen)));
3014 return -1;
3015 }
3016 if (i != count) {
3017 printf("%s: SelfID corrupted (%d, 0x%08x, 0x%08x)\n",
3018 sc->sc_sc1394.sc1394_dev.dv_xname, i, buf[i], buf[i + 1]);
3019 #if 1
3020 if (i == 1 && buf[i] == 0 && buf[i + 1] == 0) {
3021 /*
3022 * XXX: CXD3222 sometimes fails to DMA
3023 * selfid packet??
3024 */
3025 sc->sc_rootid = (count - 1) / 2 - 1;
3026 sc->sc_irmid = sc->sc_rootid;
3027 } else
3028 #endif
3029 return -1;
3030 }
3031
3032 val = OHCI_CSR_READ(sc, OHCI_REG_NodeId);
3033 if ((val & OHCI_NodeId_IDValid) == 0) {
3034 sc->sc_nodeid = 0xffff; /* invalid */
3035 printf("%s: nodeid is invalid\n",
3036 sc->sc_sc1394.sc1394_dev.dv_xname);
3037 return -1;
3038 }
3039 sc->sc_nodeid = val & 0xffff;
3040 sc->sc_sc1394.sc1394_node_id = sc->sc_nodeid & OHCI_NodeId_NodeNumber;
3041
3042 DPRINTF(("%s: nodeid=0x%04x(%d), rootid=%d, irmid=%d\n",
3043 sc->sc_sc1394.sc1394_dev.dv_xname, sc->sc_nodeid,
3044 sc->sc_nodeid & OHCI_NodeId_NodeNumber, sc->sc_rootid,
3045 sc->sc_irmid));
3046
3047 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) > sc->sc_rootid)
3048 return -1;
3049
3050 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) == sc->sc_rootid)
3051 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlSet,
3052 OHCI_LinkControl_CycleMaster);
3053 else
3054 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlClear,
3055 OHCI_LinkControl_CycleMaster);
3056 return 0;
3057 }
3058
3059 /*
3060 * some CSRs are handled by driver.
3061 */
3062 static void
3063 fwohci_csr_init(struct fwohci_softc *sc)
3064 {
3065 int i;
3066 static u_int32_t csr[] = {
3067 CSR_STATE_CLEAR, CSR_STATE_SET, CSR_SB_CYCLE_TIME,
3068 CSR_SB_BUS_TIME, CSR_SB_BUSY_TIMEOUT, CSR_SB_BUS_MANAGER_ID,
3069 CSR_SB_CHANNEL_AVAILABLE_HI, CSR_SB_CHANNEL_AVAILABLE_LO,
3070 CSR_SB_BROADCAST_CHANNEL
3071 };
3072
3073 for (i = 0; i < sizeof(csr) / sizeof(csr[0]); i++) {
3074 fwohci_handler_set(sc, IEEE1394_TCODE_WRITE_REQ_QUAD,
3075 CSR_BASE_HI, CSR_BASE_LO + csr[i], 0, fwohci_csr_input,
3076 NULL);
3077 fwohci_handler_set(sc, IEEE1394_TCODE_READ_REQ_QUAD,
3078 CSR_BASE_HI, CSR_BASE_LO + csr[i], 0, fwohci_csr_input,
3079 NULL);
3080 }
3081 sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] = 31; /*XXX*/
3082 }
3083
3084 static int
3085 fwohci_csr_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3086 {
3087 struct fwohci_pkt res;
3088 u_int32_t reg;
3089
3090 /*
3091 * XXX need to do special functionality other than just r/w...
3092 */
3093 reg = pkt->fp_hdr[2] - CSR_BASE_LO;
3094
3095 if ((reg & 0x03) != 0) {
3096 /* alignment error */
3097 return IEEE1394_RCODE_ADDRESS_ERROR;
3098 }
3099 DPRINTFN(1, ("fwohci_csr_input: CSR[0x%04x]: 0x%08x", reg,
3100 *(u_int32_t *)(&sc->sc_csr[reg])));
3101 if (pkt->fp_tcode == IEEE1394_TCODE_WRITE_REQ_QUAD) {
3102 DPRINTFN(1, (" -> 0x%08x\n",
3103 ntohl(*(u_int32_t *)pkt->fp_iov[0].iov_base)));
3104 *(u_int32_t *)&sc->sc_csr[reg] =
3105 ntohl(*(u_int32_t *)pkt->fp_iov[0].iov_base);
3106 } else {
3107 DPRINTFN(1, ("\n"));
3108 res.fp_hdr[3] = htonl(*(u_int32_t *)&sc->sc_csr[reg]);
3109 res.fp_iov[0].iov_base = &res.fp_hdr[3];
3110 res.fp_iov[0].iov_len = 4;
3111 res.fp_uio.uio_resid = 4;
3112 res.fp_uio.uio_iovcnt = 1;
3113 fwohci_atrs_output(sc, IEEE1394_RCODE_COMPLETE, pkt, &res);
3114 return -1;
3115 }
3116 return IEEE1394_RCODE_COMPLETE;
3117 }
3118
3119 /*
3120 * Mapping between nodeid and unique ID (EUI-64).
3121 *
3122 * Track old mappings and simply update their devices with the new id's when
3123 * they match an existing EUI. This allows proper renumeration of the bus.
3124 */
3125 static void
3126 fwohci_uid_collect(struct fwohci_softc *sc)
3127 {
3128 int i;
3129 struct fwohci_uidtbl *fu;
3130 struct ieee1394_softc *iea;
3131
3132 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node)
3133 iea->sc1394_node_id = 0xffff;
3134
3135 if (sc->sc_uidtbl != NULL)
3136 free(sc->sc_uidtbl, M_DEVBUF);
3137 sc->sc_uidtbl = malloc(sizeof(*fu) * (sc->sc_rootid + 1), M_DEVBUF,
3138 M_NOWAIT|M_ZERO); /* XXX M_WAITOK requires locks */
3139 if (sc->sc_uidtbl == NULL)
3140 return;
3141
3142 for (i = 0, fu = sc->sc_uidtbl; i <= sc->sc_rootid; i++, fu++) {
3143 if (i == (sc->sc_nodeid & OHCI_NodeId_NodeNumber)) {
3144 memcpy(fu->fu_uid, sc->sc_sc1394.sc1394_guid, 8);
3145 fu->fu_valid = 3;
3146
3147 iea = (struct ieee1394_softc *)sc->sc_sc1394.sc1394_if;
3148 if (iea) {
3149 iea->sc1394_node_id = i;
3150 DPRINTF(("%s: Updating nodeid to %d\n",
3151 iea->sc1394_dev.dv_xname,
3152 iea->sc1394_node_id));
3153 }
3154 } else {
3155 fu->fu_valid = 0;
3156 fwohci_uid_req(sc, i);
3157 }
3158 }
3159 if (sc->sc_rootid == 0)
3160 fwohci_check_nodes(sc);
3161 }
3162
3163 static void
3164 fwohci_uid_req(struct fwohci_softc *sc, int phyid)
3165 {
3166 struct fwohci_pkt pkt;
3167
3168 memset(&pkt, 0, sizeof(pkt));
3169 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
3170 pkt.fp_hlen = 12;
3171 pkt.fp_dlen = 0;
3172 pkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
3173 (pkt.fp_tcode << 4);
3174 pkt.fp_hdr[1] = ((0xffc0 | phyid) << 16) | CSR_BASE_HI;
3175 pkt.fp_hdr[2] = CSR_BASE_LO + CSR_CONFIG_ROM + 12;
3176 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD, phyid,
3177 sc->sc_tlabel, 0, fwohci_uid_input, (void *)0);
3178 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3179 fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt);
3180
3181 pkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
3182 (pkt.fp_tcode << 4);
3183 pkt.fp_hdr[2] = CSR_BASE_LO + CSR_CONFIG_ROM + 16;
3184 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD, phyid,
3185 sc->sc_tlabel, 0, fwohci_uid_input, (void *)1);
3186 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3187 fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt);
3188 }
3189
3190 static int
3191 fwohci_uid_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *res)
3192 {
3193 struct fwohci_uidtbl *fu;
3194 struct ieee1394_softc *iea;
3195 struct ieee1394_attach_args fwa;
3196 int i, n, done, rcode, found;
3197
3198 found = 0;
3199
3200 n = (res->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber;
3201 rcode = (res->fp_hdr[1] & 0x0000f000) >> 12;
3202 if (rcode != IEEE1394_RCODE_COMPLETE ||
3203 sc->sc_uidtbl == NULL ||
3204 n > sc->sc_rootid)
3205 return 0;
3206 fu = &sc->sc_uidtbl[n];
3207 if (arg == 0) {
3208 memcpy(fu->fu_uid, res->fp_iov[0].iov_base, 4);
3209 fu->fu_valid |= 0x1;
3210 } else {
3211 memcpy(fu->fu_uid + 4, res->fp_iov[0].iov_base, 4);
3212 fu->fu_valid |= 0x2;
3213 }
3214 #ifdef FW_DEBUG
3215 if (fu->fu_valid == 0x3)
3216 DPRINTFN(1, ("fwohci_uid_input: "
3217 "Node %d, UID %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", n,
3218 fu->fu_uid[0], fu->fu_uid[1], fu->fu_uid[2], fu->fu_uid[3],
3219 fu->fu_uid[4], fu->fu_uid[5], fu->fu_uid[6], fu->fu_uid[7]));
3220 #endif
3221 if (fu->fu_valid == 0x3) {
3222 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node)
3223 if (memcmp(iea->sc1394_guid, fu->fu_uid, 8) == 0) {
3224 found = 1;
3225 iea->sc1394_node_id = n;
3226 DPRINTF(("%s: Updating nodeid to %d\n",
3227 iea->sc1394_dev.dv_xname,
3228 iea->sc1394_node_id));
3229 if (iea->sc1394_callback.sc1394_reset)
3230 iea->sc1394_callback.sc1394_reset(iea,
3231 iea->sc1394_callback.sc1394_resetarg);
3232 break;
3233 }
3234 if (!found) {
3235 strcpy(fwa.name, "fwnode");
3236 memcpy(fwa.uid, fu->fu_uid, 8);
3237 fwa.nodeid = n;
3238 iea = (struct ieee1394_softc *)
3239 config_found_sm(&sc->sc_sc1394.sc1394_dev, &fwa,
3240 fwohci_print, fwohci_submatch);
3241 if (iea != NULL)
3242 LIST_INSERT_HEAD(&sc->sc_nodelist, iea,
3243 sc1394_node);
3244 }
3245 }
3246 done = 1;
3247
3248 for (i = 0; i < sc->sc_rootid + 1; i++) {
3249 fu = &sc->sc_uidtbl[i];
3250 if (fu->fu_valid != 0x3) {
3251 done = 0;
3252 break;
3253 }
3254 }
3255 if (done)
3256 fwohci_check_nodes(sc);
3257
3258 return 0;
3259 }
3260
3261 static void
3262 fwohci_check_nodes(struct fwohci_softc *sc)
3263 {
3264 struct device *detach = NULL;
3265 struct ieee1394_softc *iea;
3266
3267 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node) {
3268
3269 /*
3270 * Have to defer detachment until the next
3271 * loop iteration since config_detach
3272 * free's the softc and the loop iterator
3273 * needs data from the softc to move
3274 * forward.
3275 */
3276
3277 if (detach) {
3278 config_detach(detach, 0);
3279 detach = NULL;
3280 }
3281 if (iea->sc1394_node_id == 0xffff) {
3282 detach = (struct device *)iea;
3283 LIST_REMOVE(iea, sc1394_node);
3284 }
3285 }
3286 if (detach)
3287 config_detach(detach, 0);
3288 }
3289
3290 static int
3291 fwohci_uid_lookup(struct fwohci_softc *sc, const u_int8_t *uid)
3292 {
3293 struct fwohci_uidtbl *fu;
3294 int n;
3295 static const u_int8_t bcast[] =
3296 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3297
3298 fu = sc->sc_uidtbl;
3299 if (fu == NULL) {
3300 if (memcmp(uid, bcast, sizeof(bcast)) == 0)
3301 return IEEE1394_BCAST_PHY_ID;
3302 fwohci_uid_collect(sc); /* try to get */
3303 return -1;
3304 }
3305 for (n = 0; n <= sc->sc_rootid; n++, fu++) {
3306 if (fu->fu_valid == 0x3 && memcmp(fu->fu_uid, uid, 8) == 0)
3307 return n;
3308 }
3309 if (memcmp(uid, bcast, sizeof(bcast)) == 0)
3310 return IEEE1394_BCAST_PHY_ID;
3311 for (n = 0, fu = sc->sc_uidtbl; n <= sc->sc_rootid; n++, fu++) {
3312 if (fu->fu_valid != 0x3) {
3313 /*
3314 * XXX: need timer before retransmission
3315 */
3316 fwohci_uid_req(sc, n);
3317 }
3318 }
3319 return -1;
3320 }
3321
3322 /*
3323 * functions to support network interface
3324 */
3325 static int
3326 fwohci_if_inreg(struct device *self, u_int32_t offhi, u_int32_t offlo,
3327 void (*handler)(struct device *, struct mbuf *))
3328 {
3329 struct fwohci_softc *sc = (struct fwohci_softc *)self;
3330
3331 fwohci_handler_set(sc, IEEE1394_TCODE_WRITE_REQ_BLOCK, offhi, offlo, 0,
3332 handler ? fwohci_if_input : NULL, handler);
3333 fwohci_handler_set(sc, IEEE1394_TCODE_STREAM_DATA,
3334 (sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] & IEEE1394_ISOCH_MASK) |
3335 OHCI_ASYNC_STREAM,
3336 1 << IEEE1394_TAG_GASP, 0,
3337 handler ? fwohci_if_input : NULL, handler);
3338 return 0;
3339 }
3340
3341 static int
3342 fwohci_if_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3343 {
3344 int n, len;
3345 struct mbuf *m;
3346 struct iovec *iov;
3347 void (*handler)(struct device *, struct mbuf *) = arg;
3348
3349 #ifdef FW_DEBUG
3350 int i;
3351 DPRINTFN(1, ("fwohci_if_input: tcode=0x%x, dlen=%d", pkt->fp_tcode,
3352 pkt->fp_dlen));
3353 for (i = 0; i < pkt->fp_hlen/4; i++)
3354 DPRINTFN(2, ("%s%08x", i?" ":"\n ", pkt->fp_hdr[i]));
3355 DPRINTFN(2, ("$"));
3356 for (n = 0, len = pkt->fp_dlen; len > 0; len -= i, n++){
3357 iov = &pkt->fp_iov[n];
3358 for (i = 0; i < iov->iov_len; i++)
3359 DPRINTFN(2, ("%s%02x", (i%32)?((i%4)?"":" "):"\n ",
3360 ((u_int8_t *)iov->iov_base)[i]));
3361 DPRINTFN(2, ("$"));
3362 }
3363 DPRINTFN(1, ("\n"));
3364 #endif /* FW_DEBUG */
3365 len = pkt->fp_dlen;
3366 MGETHDR(m, M_DONTWAIT, MT_DATA);
3367 if (m == NULL)
3368 return IEEE1394_RCODE_COMPLETE;
3369 m->m_len = 16;
3370 if (len + m->m_len > MHLEN) {
3371 MCLGET(m, M_DONTWAIT);
3372 if ((m->m_flags & M_EXT) == 0) {
3373 m_freem(m);
3374 return IEEE1394_RCODE_COMPLETE;
3375 }
3376 }
3377 n = (pkt->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber;
3378 if (sc->sc_uidtbl == NULL || n > sc->sc_rootid ||
3379 sc->sc_uidtbl[n].fu_valid != 0x3) {
3380 printf("%s: packet from unknown node: phy id %d\n",
3381 sc->sc_sc1394.sc1394_dev.dv_xname, n);
3382 m_freem(m);
3383 fwohci_uid_req(sc, n);
3384 return IEEE1394_RCODE_COMPLETE;
3385 }
3386 memcpy(mtod(m, caddr_t), sc->sc_uidtbl[n].fu_uid, 8);
3387 if (pkt->fp_tcode == IEEE1394_TCODE_STREAM_DATA) {
3388 m->m_flags |= M_BCAST;
3389 mtod(m, u_int32_t *)[2] = mtod(m, u_int32_t *)[3] = 0;
3390 } else {
3391 mtod(m, u_int32_t *)[2] = htonl(pkt->fp_hdr[1]);
3392 mtod(m, u_int32_t *)[3] = htonl(pkt->fp_hdr[2]);
3393 }
3394 mtod(m, u_int8_t *)[8] = n; /*XXX: node id for debug */
3395 mtod(m, u_int8_t *)[9] =
3396 (*pkt->fp_trail >> (16 + OHCI_CTXCTL_SPD_BITPOS)) &
3397 ((1 << OHCI_CTXCTL_SPD_BITLEN) - 1);
3398
3399 m->m_pkthdr.rcvif = NULL; /* set in child */
3400 m->m_pkthdr.len = len + m->m_len;
3401 /*
3402 * We may use receive buffer by external mbuf instead of copy here.
3403 * But asynchronous receive buffer must be operate in buffer fill
3404 * mode, so that each receive buffer will shared by multiple mbufs.
3405 * If upper layer doesn't free mbuf soon, e.g. application program
3406 * is suspended, buffer must be reallocated.
3407 * Isochronous buffer must be operate in packet buffer mode, and
3408 * it is easy to map receive buffer to external mbuf. But it is
3409 * used for broadcast/multicast only, and is expected not so
3410 * performance sensitive for now.
3411 * XXX: The performance may be important for multicast case,
3412 * so we should revisit here later.
3413 * -- onoe
3414 */
3415 n = 0;
3416 iov = pkt->fp_uio.uio_iov;
3417 while (len > 0) {
3418 memcpy(mtod(m, caddr_t) + m->m_len, iov->iov_base,
3419 iov->iov_len);
3420 m->m_len += iov->iov_len;
3421 len -= iov->iov_len;
3422 iov++;
3423 }
3424 (*handler)(sc->sc_sc1394.sc1394_if, m);
3425 return IEEE1394_RCODE_COMPLETE;
3426 }
3427
3428 static int
3429 fwohci_if_input_iso(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3430 {
3431 int n, len;
3432 int chan, tag;
3433 struct mbuf *m;
3434 struct iovec *iov;
3435 void (*handler)(struct device *, struct mbuf *) = arg;
3436 #ifdef FW_DEBUG
3437 int i;
3438 #endif
3439
3440 chan = (pkt->fp_hdr[0] & 0x00003f00) >> 8;
3441 tag = (pkt->fp_hdr[0] & 0x0000c000) >> 14;
3442 #ifdef FW_DEBUG
3443 DPRINTFN(1, ("fwohci_if_input_iso: "
3444 "tcode=0x%x, chan=%d, tag=%x, dlen=%d",
3445 pkt->fp_tcode, chan, tag, pkt->fp_dlen));
3446 for (i = 0; i < pkt->fp_hlen/4; i++)
3447 DPRINTFN(2, ("%s%08x", i?" ":"\n\t", pkt->fp_hdr[i]));
3448 DPRINTFN(2, ("$"));
3449 for (n = 0, len = pkt->fp_dlen; len > 0; len -= i, n++){
3450 iov = &pkt->fp_iov[n];
3451 for (i = 0; i < iov->iov_len; i++)
3452 DPRINTFN(2, ("%s%02x",
3453 (i%32)?((i%4)?"":" "):"\n\t",
3454 ((u_int8_t *)iov->iov_base)[i]));
3455 DPRINTFN(2, ("$"));
3456 }
3457 DPRINTFN(2, ("\n"));
3458 #endif /* FW_DEBUG */
3459 len = pkt->fp_dlen;
3460 MGETHDR(m, M_DONTWAIT, MT_DATA);
3461 if (m == NULL)
3462 return IEEE1394_RCODE_COMPLETE;
3463 m->m_len = 16;
3464 if (m->m_len + len > MHLEN) {
3465 MCLGET(m, M_DONTWAIT);
3466 if ((m->m_flags & M_EXT) == 0) {
3467 m_freem(m);
3468 return IEEE1394_RCODE_COMPLETE;
3469 }
3470 }
3471
3472 m->m_flags |= M_BCAST;
3473
3474 if (tag == IEEE1394_TAG_GASP) {
3475 n = (pkt->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber;
3476 if (sc->sc_uidtbl == NULL || n > sc->sc_rootid ||
3477 sc->sc_uidtbl[n].fu_valid != 0x3) {
3478 printf("%s: packet from unknown node: phy id %d\n",
3479 sc->sc_sc1394.sc1394_dev.dv_xname, n);
3480 m_freem(m);
3481 return IEEE1394_RCODE_COMPLETE;
3482 }
3483 memcpy(mtod(m, caddr_t), sc->sc_uidtbl[n].fu_uid, 8);
3484 mtod(m, u_int32_t *)[2] = htonl(pkt->fp_hdr[1]);
3485 mtod(m, u_int32_t *)[3] = htonl(pkt->fp_hdr[2]);
3486 mtod(m, u_int8_t *)[8] = n; /*XXX: node id for debug */
3487 mtod(m, u_int8_t *)[9] =
3488 (*pkt->fp_trail >> (16 + OHCI_CTXCTL_SPD_BITPOS)) &
3489 ((1 << OHCI_CTXCTL_SPD_BITLEN) - 1);
3490 }
3491 mtod(m, u_int8_t *)[14] = chan;
3492 mtod(m, u_int8_t *)[15] = tag;
3493
3494
3495 m->m_pkthdr.rcvif = NULL; /* set in child */
3496 m->m_pkthdr.len = len + m->m_len;
3497 /*
3498 * We may use receive buffer by external mbuf instead of copy here.
3499 * But asynchronous receive buffer must be operate in buffer fill
3500 * mode, so that each receive buffer will shared by multiple mbufs.
3501 * If upper layer doesn't free mbuf soon, e.g. application program
3502 * is suspended, buffer must be reallocated.
3503 * Isochronous buffer must be operate in packet buffer mode, and
3504 * it is easy to map receive buffer to external mbuf. But it is
3505 * used for broadcast/multicast only, and is expected not so
3506 * performance sensitive for now.
3507 * XXX: The performance may be important for multicast case,
3508 * so we should revisit here later.
3509 * -- onoe
3510 */
3511 n = 0;
3512 iov = pkt->fp_uio.uio_iov;
3513 while (len > 0) {
3514 memcpy(mtod(m, caddr_t) + m->m_len, iov->iov_base,
3515 iov->iov_len);
3516 m->m_len += iov->iov_len;
3517 len -= iov->iov_len;
3518 iov++;
3519 }
3520 (*handler)(sc->sc_sc1394.sc1394_if, m);
3521 return IEEE1394_RCODE_COMPLETE;
3522 }
3523
3524
3525
3526 static int
3527 fwohci_if_output(struct device *self, struct mbuf *m0,
3528 void (*callback)(struct device *, struct mbuf *))
3529 {
3530 struct fwohci_softc *sc = (struct fwohci_softc *)self;
3531 struct fwohci_pkt pkt;
3532 u_int8_t *p;
3533 int n, error, spd, hdrlen, maxrec;
3534 #ifdef FW_DEBUG
3535 struct mbuf *m;
3536 #endif
3537
3538 p = mtod(m0, u_int8_t *);
3539 if (m0->m_flags & (M_BCAST | M_MCAST)) {
3540 spd = IEEE1394_SPD_S100; /*XXX*/
3541 maxrec = 512; /*XXX*/
3542 hdrlen = 8;
3543 } else {
3544 n = fwohci_uid_lookup(sc, p);
3545 if (n < 0) {
3546 printf("%s: nodeid unknown:"
3547 " %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
3548 sc->sc_sc1394.sc1394_dev.dv_xname,
3549 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]);
3550 error = EHOSTUNREACH;
3551 goto end;
3552 }
3553 if (n == IEEE1394_BCAST_PHY_ID) {
3554 printf("%s: broadcast with !M_MCAST\n",
3555 sc->sc_sc1394.sc1394_dev.dv_xname);
3556 #ifdef FW_DEBUG
3557 DPRINTFN(2, ("packet:"));
3558 for (m = m0; m != NULL; m = m->m_next) {
3559 for (n = 0; n < m->m_len; n++)
3560 DPRINTFN(2, ("%s%02x", (n%32)?
3561 ((n%4)?"":" "):"\n ",
3562 mtod(m, u_int8_t *)[n]));
3563 DPRINTFN(2, ("$"));
3564 }
3565 DPRINTFN(2, ("\n"));
3566 #endif
3567 error = EHOSTUNREACH;
3568 goto end;
3569 }
3570 maxrec = 2 << p[8];
3571 spd = p[9];
3572 hdrlen = 0;
3573 }
3574 if (spd > sc->sc_sc1394.sc1394_link_speed) {
3575 DPRINTF(("fwohci_if_output: spd (%d) is faster than %d\n",
3576 spd, sc->sc_sc1394.sc1394_link_speed));
3577 spd = sc->sc_sc1394.sc1394_link_speed;
3578 }
3579 if (maxrec > (512 << spd)) {
3580 DPRINTF(("fwohci_if_output: maxrec (%d) is larger for spd (%d)"
3581 "\n", maxrec, spd));
3582 maxrec = 512 << spd;
3583 }
3584 while (maxrec > sc->sc_sc1394.sc1394_max_receive) {
3585 DPRINTF(("fwohci_if_output: maxrec (%d) is larger than"
3586 " %d\n", maxrec, sc->sc_sc1394.sc1394_max_receive));
3587 maxrec >>= 1;
3588 }
3589 if (maxrec < 512) {
3590 DPRINTF(("fwohci_if_output: maxrec (%d) is smaller than "
3591 "minimum\n", maxrec));
3592 maxrec = 512;
3593 }
3594
3595 m_adj(m0, 16 - hdrlen);
3596 if (m0->m_pkthdr.len > maxrec) {
3597 DPRINTF(("fwohci_if_output: packet too big: hdr %d, pktlen "
3598 "%d, maxrec %d\n", hdrlen, m0->m_pkthdr.len, maxrec));
3599 error = E2BIG; /*XXX*/
3600 goto end;
3601 }
3602
3603 memset(&pkt, 0, sizeof(pkt));
3604 pkt.fp_uio.uio_iov = pkt.fp_iov;
3605 pkt.fp_uio.uio_segflg = UIO_SYSSPACE;
3606 pkt.fp_uio.uio_rw = UIO_WRITE;
3607 if (m0->m_flags & (M_BCAST | M_MCAST)) {
3608 /* construct GASP header */
3609 p = mtod(m0, u_int8_t *);
3610 p[0] = sc->sc_nodeid >> 8;
3611 p[1] = sc->sc_nodeid & 0xff;
3612 p[2] = 0x00; p[3] = 0x00; p[4] = 0x5e;
3613 p[5] = 0x00; p[6] = 0x00; p[7] = 0x01;
3614 pkt.fp_tcode = IEEE1394_TCODE_STREAM_DATA;
3615 pkt.fp_hlen = 8;
3616 pkt.fp_hdr[0] = (spd << 16) | (IEEE1394_TAG_GASP << 14) |
3617 ((sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] &
3618 OHCI_NodeId_NodeNumber) << 8);
3619 pkt.fp_hdr[1] = m0->m_pkthdr.len << 16;
3620 } else {
3621 pkt.fp_tcode = IEEE1394_TCODE_WRITE_REQ_BLOCK;
3622 pkt.fp_hlen = 16;
3623 pkt.fp_hdr[0] = 0x00800100 | (sc->sc_tlabel << 10) |
3624 (spd << 16);
3625 pkt.fp_hdr[1] =
3626 (((sc->sc_nodeid & OHCI_NodeId_BusNumber) | n) << 16) |
3627 (p[10] << 8) | p[11];
3628 pkt.fp_hdr[2] = (p[12]<<24) | (p[13]<<16) | (p[14]<<8) | p[15];
3629 pkt.fp_hdr[3] = m0->m_pkthdr.len << 16;
3630 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3631 }
3632 pkt.fp_hdr[0] |= (pkt.fp_tcode << 4);
3633 pkt.fp_dlen = m0->m_pkthdr.len;
3634 pkt.fp_m = m0;
3635 pkt.fp_callback = callback;
3636 error = fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt);
3637 m0 = pkt.fp_m;
3638 end:
3639 if (m0 != NULL) {
3640 if (callback)
3641 (*callback)(sc->sc_sc1394.sc1394_if, m0);
3642 else
3643 m_freem(m0);
3644 }
3645 return error;
3646 }
3647
3648 /*
3649 * High level routines to provide abstraction to attaching layers to
3650 * send/receive data.
3651 */
3652
3653 /*
3654 * These break down into 4 routines as follows:
3655 *
3656 * int fwohci_read(struct ieee1394_abuf *)
3657 *
3658 * This routine will attempt to read a region from the requested node.
3659 * A callback must be provided which will be called when either the completed
3660 * read is done or an unrecoverable error occurs. This is mainly a convenience
3661 * routine since it will encapsulate retrying a region as quadlet vs. block
3662 * reads and recombining all the returned data. This could also be done with a
3663 * series of write/inreg's for each packet sent.
3664 *
3665 * int fwohci_write(struct ieee1394_abuf *)
3666 *
3667 * The work horse main entry point for putting packets on the bus. This is the
3668 * generalized interface for fwnode/etc code to put packets out onto the bus.
3669 * It accepts all standard ieee1394 tcodes (XXX: only a few today) and
3670 * optionally will callback via a func pointer to the calling code with the
3671 * resulting ACK code from the packet. If the ACK code is to be ignored (i.e.
3672 * no cb) then the write routine will take care of free'ing the abuf since the
3673 * fwnode/etc code won't have any knowledge of when to do this. This allows for
3674 * simple one-off packets to be sent from the upper-level code without worrying
3675 * about a callback for cleanup.
3676 *
3677 * int fwohci_inreg(struct ieee1394_abuf *, int)
3678 *
3679 * This is very simple. It evals the abuf passed in and registers an internal
3680 * handler as the callback for packets received for that operation.
3681 * The integer argument specifies whether on a block read/write operation to
3682 * allow sub-regions to be read/written (in block form) as well.
3683 *
3684 * XXX: This whole structure needs to be redone as a list of regions and
3685 * operations allowed on those regions.
3686 *
3687 * int fwohci_unreg(struct ieee1394_abuf *, int)
3688 *
3689 * This simply unregisters the respective callback done via inreg for items
3690 * which only need to register an area for a one-time operation (like a status
3691 * buffer a remote node will write to when the current operation is done). The
3692 * int argument specifies the same behavior as inreg, except in reverse (i.e.
3693 * it unregisters).
3694 */
3695
3696 static int
3697 fwohci_read(struct ieee1394_abuf *ab)
3698 {
3699 struct fwohci_pkt pkt;
3700 struct ieee1394_softc *sc = ab->ab_req;
3701 struct fwohci_softc *psc =
3702 (struct fwohci_softc *)sc->sc1394_dev.dv_parent;
3703 struct fwohci_cb *fcb;
3704 u_int32_t high, lo;
3705 int rv, tcode;
3706
3707 /* Have to have a callback when reading. */
3708 if (ab->ab_cb == NULL)
3709 return -1;
3710
3711 fcb = malloc(sizeof(struct fwohci_cb), M_DEVBUF, M_WAITOK);
3712 fcb->ab = ab;
3713 fcb->count = 0;
3714 fcb->abuf_valid = 1;
3715
3716 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
3717 lo = (ab->ab_addr & 0x00000000ffffffffULL);
3718
3719 memset(&pkt, 0, sizeof(pkt));
3720 pkt.fp_hdr[1] = ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
3721 pkt.fp_hdr[2] = lo;
3722 pkt.fp_dlen = 0;
3723
3724 if (ab->ab_length == 4) {
3725 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
3726 tcode = IEEE1394_TCODE_READ_RESP_QUAD;
3727 pkt.fp_hlen = 12;
3728 } else {
3729 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_BLOCK;
3730 pkt.fp_hlen = 16;
3731 tcode = IEEE1394_TCODE_READ_RESP_BLOCK;
3732 pkt.fp_hdr[3] = (ab->ab_length << 16);
3733 }
3734 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) |
3735 (psc->sc_tlabel << 10) | (pkt.fp_tcode << 4);
3736
3737 pkt.fp_statusarg = fcb;
3738 pkt.fp_statuscb = fwohci_read_resp;
3739
3740 rv = fwohci_handler_set(psc, tcode, ab->ab_req->sc1394_node_id,
3741 psc->sc_tlabel, 0, fwohci_read_resp, fcb);
3742 if (rv)
3743 return rv;
3744 rv = fwohci_at_output(psc, psc->sc_ctx_atrq, &pkt);
3745 if (rv)
3746 fwohci_handler_set(psc, tcode, ab->ab_req->sc1394_node_id,
3747 psc->sc_tlabel, 0, NULL, NULL);
3748 psc->sc_tlabel = (psc->sc_tlabel + 1) & 0x3f;
3749 fcb->count = 1;
3750 return rv;
3751 }
3752
3753 static int
3754 fwohci_write(struct ieee1394_abuf *ab)
3755 {
3756 struct fwohci_pkt pkt;
3757 struct ieee1394_softc *sc = ab->ab_req;
3758 struct fwohci_softc *psc =
3759 (struct fwohci_softc *)sc->sc1394_dev.dv_parent;
3760 u_int32_t high, lo;
3761 int rv;
3762
3763 if (ab->ab_tcode == IEEE1394_TCODE_WRITE_REQ_BLOCK) {
3764 if (ab->ab_length > IEEE1394_MAX_REC(sc->sc1394_max_receive)) {
3765 DPRINTF(("Packet too large: %d\n", ab->ab_length));
3766 return E2BIG;
3767 }
3768 }
3769
3770 if (ab->ab_length >
3771 IEEE1394_MAX_ASYNCH_FOR_SPEED(sc->sc1394_link_speed)) {
3772 DPRINTF(("Packet too large: %d\n", ab->ab_length));
3773 return E2BIG;
3774 }
3775
3776 if (ab->ab_data && ab->ab_uio)
3777 panic("Can't call with uio and data set");
3778 if ((ab->ab_data == NULL) && (ab->ab_uio == NULL))
3779 panic("One of either ab_data or ab_uio must be set");
3780
3781 memset(&pkt, 0, sizeof(pkt));
3782
3783 pkt.fp_tcode = ab->ab_tcode;
3784 if (ab->ab_data) {
3785 pkt.fp_uio.uio_iov = pkt.fp_iov;
3786 pkt.fp_uio.uio_segflg = UIO_SYSSPACE;
3787 pkt.fp_uio.uio_rw = UIO_WRITE;
3788 } else
3789 memcpy(&pkt.fp_uio, ab->ab_uio, sizeof(struct uio));
3790
3791 pkt.fp_statusarg = ab;
3792 pkt.fp_statuscb = fwohci_write_ack;
3793
3794 switch (ab->ab_tcode) {
3795 case IEEE1394_TCODE_WRITE_RESP:
3796 pkt.fp_hlen = 12;
3797 case IEEE1394_TCODE_READ_RESP_QUAD:
3798 case IEEE1394_TCODE_READ_RESP_BLOCK:
3799 if (!pkt.fp_hlen)
3800 pkt.fp_hlen = 16;
3801 high = ab->ab_retlen;
3802 ab->ab_retlen = 0;
3803 lo = 0;
3804 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) |
3805 (ab->ab_tlabel << 10) | (pkt.fp_tcode << 4);
3806 break;
3807 default:
3808 pkt.fp_hlen = 16;
3809 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
3810 lo = (ab->ab_addr & 0x00000000ffffffffULL);
3811 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) |
3812 (psc->sc_tlabel << 10) | (pkt.fp_tcode << 4);
3813 psc->sc_tlabel = (psc->sc_tlabel + 1) & 0x3f;
3814 break;
3815 }
3816
3817 pkt.fp_hdr[1] = ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
3818 pkt.fp_hdr[2] = lo;
3819 if (pkt.fp_hlen == 16) {
3820 if (ab->ab_length == 4) {
3821 pkt.fp_hdr[3] = ab->ab_data[0];
3822 pkt.fp_dlen = 0;
3823 } else {
3824 pkt.fp_hdr[3] = (ab->ab_length << 16);
3825 pkt.fp_dlen = ab->ab_length;
3826 if (ab->ab_data) {
3827 pkt.fp_uio.uio_iovcnt = 1;
3828 pkt.fp_uio.uio_resid = ab->ab_length;
3829 pkt.fp_iov[0].iov_base = ab->ab_data;
3830 pkt.fp_iov[0].iov_len = ab->ab_length;
3831 }
3832 }
3833 }
3834 switch (ab->ab_tcode) {
3835 case IEEE1394_TCODE_WRITE_RESP:
3836 case IEEE1394_TCODE_READ_RESP_QUAD:
3837 case IEEE1394_TCODE_READ_RESP_BLOCK:
3838 rv = fwohci_at_output(psc, psc->sc_ctx_atrs, &pkt);
3839 break;
3840 default:
3841 rv = fwohci_at_output(psc, psc->sc_ctx_atrq, &pkt);
3842 break;
3843 }
3844 return rv;
3845 }
3846
3847 static int
3848 fwohci_read_resp(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3849 {
3850 struct fwohci_cb *fcb = arg;
3851 struct ieee1394_abuf *ab = fcb->ab;
3852 struct fwohci_pkt newpkt;
3853 u_int32_t *cur, high, lo;
3854 int i, tcode, rcode, status, rv;
3855
3856 /*
3857 * Both the ACK handling and normal response callbacks are handled here.
3858 * The main reason for this is the various error conditions that can
3859 * occur trying to block read some areas and the ways that gets reported
3860 * back to calling station. This is a variety of ACK codes, responses,
3861 * etc which makes it much more difficult to process if both aren't
3862 * handled here.
3863 */
3864
3865 /* Check for status packet. */
3866
3867 if (pkt->fp_tcode == -1) {
3868 status = pkt->fp_status & OHCI_DESC_STATUS_ACK_MASK;
3869 rcode = -1;
3870 tcode = (pkt->fp_hdr[0] >> 4) & 0xf;
3871 if ((status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) &&
3872 (status != OHCI_CTXCTL_EVENT_ACK_PENDING))
3873 DPRINTFN(2, ("Got status packet: 0x%02x\n",
3874 (unsigned int)status));
3875 fcb->count--;
3876
3877 /*
3878 * Got all the ack's back and the buffer is invalid (i.e. the
3879 * callback has been called. Clean up.
3880 */
3881
3882 if (fcb->abuf_valid == 0) {
3883 if (fcb->count == 0)
3884 free(fcb, M_DEVBUF);
3885 return IEEE1394_RCODE_COMPLETE;
3886 }
3887 } else {
3888 status = -1;
3889 tcode = pkt->fp_tcode;
3890 rcode = (pkt->fp_hdr[1] & 0x0000f000) >> 12;
3891 }
3892
3893 /*
3894 * Some area's (like the config rom want to be read as quadlets only.
3895 *
3896 * The current ideas to try are:
3897 *
3898 * Got an ACK_TYPE_ERROR on a block read.
3899 *
3900 * Got either RCODE_TYPE or RCODE_ADDRESS errors in a block read
3901 * response.
3902 *
3903 * In all cases construct a new packet for a quadlet read and let
3904 * mutli_resp handle the iteration over the space.
3905 */
3906
3907 if (((status == OHCI_CTXCTL_EVENT_ACK_TYPE_ERROR) &&
3908 (tcode == IEEE1394_TCODE_READ_REQ_BLOCK)) ||
3909 (((rcode == IEEE1394_RCODE_TYPE_ERROR) ||
3910 (rcode == IEEE1394_RCODE_ADDRESS_ERROR)) &&
3911 (tcode == IEEE1394_TCODE_READ_RESP_BLOCK))) {
3912
3913 /* Read the area in quadlet chunks (internally track this). */
3914
3915 memset(&newpkt, 0, sizeof(newpkt));
3916
3917 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
3918 lo = (ab->ab_addr & 0x00000000ffffffffULL);
3919
3920 newpkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
3921 newpkt.fp_hlen = 12;
3922 newpkt.fp_dlen = 0;
3923 newpkt.fp_hdr[1] =
3924 ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
3925 newpkt.fp_hdr[2] = lo;
3926 newpkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
3927 (newpkt.fp_tcode << 4);
3928
3929 rv = fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD,
3930 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0,
3931 fwohci_read_multi_resp, fcb);
3932 if (rv) {
3933 (*ab->ab_cb)(ab, -1);
3934 goto cleanup;
3935 }
3936 newpkt.fp_statusarg = fcb;
3937 newpkt.fp_statuscb = fwohci_read_resp;
3938 rv = fwohci_at_output(sc, sc->sc_ctx_atrq, &newpkt);
3939 if (rv) {
3940 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD,
3941 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0, NULL,
3942 NULL);
3943 (*ab->ab_cb)(ab, -1);
3944 goto cleanup;
3945 }
3946 fcb->count++;
3947 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3948 return IEEE1394_RCODE_COMPLETE;
3949 } else if ((rcode != -1) || ((status != -1) &&
3950 (status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) &&
3951 (status != OHCI_CTXCTL_EVENT_ACK_PENDING))) {
3952
3953 /*
3954 * Recombine all the iov data into 1 chunk for higher
3955 * level code.
3956 */
3957
3958 if (rcode != -1) {
3959 cur = ab->ab_data;
3960 for (i = 0; i < pkt->fp_uio.uio_iovcnt; i++) {
3961 /*
3962 * Make sure and don't exceed the buffer
3963 * allocated for return.
3964 */
3965 if ((ab->ab_retlen + pkt->fp_iov[i].iov_len) >
3966 ab->ab_length) {
3967 memcpy(cur, pkt->fp_iov[i].iov_base,
3968 (ab->ab_length - ab->ab_retlen));
3969 ab->ab_retlen = ab->ab_length;
3970 break;
3971 }
3972 memcpy(cur, pkt->fp_iov[i].iov_base,
3973 pkt->fp_iov[i].iov_len);
3974 cur += pkt->fp_iov[i].iov_len;
3975 ab->ab_retlen += pkt->fp_iov[i].iov_len;
3976 }
3977 }
3978 if (status != -1)
3979 /* XXX: Need a complete tlabel interface. */
3980 for (i = 0; i < 64; i++)
3981 fwohci_handler_set(sc,
3982 IEEE1394_TCODE_READ_RESP_QUAD,
3983 ab->ab_req->sc1394_node_id, i, 0, NULL,
3984 NULL);
3985 (*ab->ab_cb)(ab, rcode);
3986 goto cleanup;
3987 } else
3988 /* Good ack packet. */
3989 return IEEE1394_RCODE_COMPLETE;
3990
3991 /* Can't get here unless ab->ab_cb has been called. */
3992
3993 cleanup:
3994 fcb->abuf_valid = 0;
3995 if (fcb->count == 0)
3996 free(fcb, M_DEVBUF);
3997 return IEEE1394_RCODE_COMPLETE;
3998 }
3999
4000 static int
4001 fwohci_read_multi_resp(struct fwohci_softc *sc, void *arg,
4002 struct fwohci_pkt *pkt)
4003 {
4004 struct fwohci_cb *fcb = arg;
4005 struct ieee1394_abuf *ab = fcb->ab;
4006 struct fwohci_pkt newpkt;
4007 u_int32_t high, lo;
4008 int rcode, rv;
4009
4010 /*
4011 * Bad return codes from the wire, just return what's already in the
4012 * buf.
4013 */
4014
4015 /* Make sure a response packet didn't arrive after a bad ACK. */
4016 if (fcb->abuf_valid == 0)
4017 return IEEE1394_RCODE_COMPLETE;
4018
4019 rcode = (pkt->fp_hdr[1] & 0x0000f000) >> 12;
4020
4021 if (rcode) {
4022 (*ab->ab_cb)(ab, rcode);
4023 goto cleanup;
4024 }
4025
4026 if ((ab->ab_retlen + pkt->fp_iov[0].iov_len) > ab->ab_length) {
4027 memcpy(((char *)ab->ab_data + ab->ab_retlen),
4028 pkt->fp_iov[0].iov_base, (ab->ab_length - ab->ab_retlen));
4029 ab->ab_retlen = ab->ab_length;
4030 } else {
4031 memcpy(((char *)ab->ab_data + ab->ab_retlen),
4032 pkt->fp_iov[0].iov_base, 4);
4033 ab->ab_retlen += 4;
4034 }
4035 /* Still more, loop and read 4 more bytes. */
4036 if (ab->ab_retlen < ab->ab_length) {
4037 memset(&newpkt, 0, sizeof(newpkt));
4038
4039 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
4040 lo = (ab->ab_addr & 0x00000000ffffffffULL) + ab->ab_retlen;
4041
4042 newpkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
4043 newpkt.fp_hlen = 12;
4044 newpkt.fp_dlen = 0;
4045 newpkt.fp_hdr[1] =
4046 ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
4047 newpkt.fp_hdr[2] = lo;
4048 newpkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
4049 (newpkt.fp_tcode << 4);
4050
4051 newpkt.fp_statusarg = fcb;
4052 newpkt.fp_statuscb = fwohci_read_resp;
4053
4054 /*
4055 * Bad return code. Just give up and return what's
4056 * come in now.
4057 */
4058 rv = fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD,
4059 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0,
4060 fwohci_read_multi_resp, fcb);
4061 if (rv)
4062 (*ab->ab_cb)(ab, -1);
4063 else {
4064 rv = fwohci_at_output(sc, sc->sc_ctx_atrq, &newpkt);
4065 if (rv) {
4066 fwohci_handler_set(sc,
4067 IEEE1394_TCODE_READ_RESP_QUAD,
4068 ab->ab_req->sc1394_node_id, sc->sc_tlabel,
4069 0, NULL, NULL);
4070 (*ab->ab_cb)(ab, -1);
4071 } else {
4072 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
4073 fcb->count++;
4074 return IEEE1394_RCODE_COMPLETE;
4075 }
4076 }
4077 } else
4078 (*ab->ab_cb)(ab, IEEE1394_RCODE_COMPLETE);
4079
4080 cleanup:
4081 /* Can't get here unless ab_cb has been called. */
4082 fcb->abuf_valid = 0;
4083 if (fcb->count == 0)
4084 free(fcb, M_DEVBUF);
4085 return IEEE1394_RCODE_COMPLETE;
4086 }
4087
4088 static int
4089 fwohci_write_ack(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
4090 {
4091 struct ieee1394_abuf *ab = arg;
4092 u_int16_t status;
4093
4094
4095 status = pkt->fp_status & OHCI_DESC_STATUS_ACK_MASK;
4096 if ((status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) &&
4097 (status != OHCI_CTXCTL_EVENT_ACK_PENDING))
4098 DPRINTF(("Got status packet: 0x%02x\n",
4099 (unsigned int)status));
4100
4101 /* No callback means this level should free the buffers. */
4102 if (ab->ab_cb)
4103 (*ab->ab_cb)(ab, status);
4104 else {
4105 if (ab->ab_data)
4106 free(ab->ab_data, M_1394DATA);
4107 free(ab, M_1394DATA);
4108 }
4109 return IEEE1394_RCODE_COMPLETE;
4110 }
4111
4112 static int
4113 fwohci_inreg(struct ieee1394_abuf *ab, int allow)
4114 {
4115 struct ieee1394_softc *sc = ab->ab_req;
4116 struct fwohci_softc *psc =
4117 (struct fwohci_softc *)sc->sc1394_dev.dv_parent;
4118 u_int32_t high, lo;
4119 int rv;
4120
4121 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
4122 lo = (ab->ab_addr & 0x00000000ffffffffULL);
4123
4124 rv = 0;
4125 switch (ab->ab_tcode) {
4126 case IEEE1394_TCODE_READ_REQ_QUAD:
4127 case IEEE1394_TCODE_WRITE_REQ_QUAD:
4128 if (ab->ab_cb)
4129 rv = fwohci_handler_set(psc, ab->ab_tcode, high, lo, 0,
4130 fwohci_parse_input, ab);
4131 else
4132 fwohci_handler_set(psc, ab->ab_tcode, high, lo, 0, NULL,
4133 NULL);
4134 break;
4135 case IEEE1394_TCODE_READ_REQ_BLOCK:
4136 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
4137 if (allow) {
4138 if (ab->ab_cb) {
4139 rv = fwohci_handler_set(psc, ab->ab_tcode,
4140 high, lo, ab->ab_length,
4141 fwohci_parse_input, ab);
4142 if (rv)
4143 fwohci_handler_set(psc, ab->ab_tcode,
4144 high, lo, ab->ab_length, NULL,
4145 NULL);
4146 ab->ab_subok = 1;
4147 } else
4148 fwohci_handler_set(psc, ab->ab_tcode, high, lo,
4149 ab->ab_length, NULL, NULL);
4150 } else {
4151 if (ab->ab_cb)
4152 rv = fwohci_handler_set(psc, ab->ab_tcode, high,
4153 lo, 0, fwohci_parse_input, ab);
4154 else
4155 fwohci_handler_set(psc, ab->ab_tcode, high, lo,
4156 0, NULL, NULL);
4157 }
4158 break;
4159 default:
4160 DPRINTF(("Invalid registration tcode: %d\n", ab->ab_tcode));
4161 return -1;
4162 break;
4163 }
4164 return rv;
4165 }
4166
4167 static int
4168 fwohci_unreg(struct ieee1394_abuf *ab, int allow)
4169 {
4170 void *save;
4171 int rv;
4172
4173 save = ab->ab_cb;
4174 ab->ab_cb = NULL;
4175 rv = fwohci_inreg(ab, allow);
4176 ab->ab_cb = save;
4177 return rv;
4178 }
4179
4180 static int
4181 fwohci_parse_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
4182 {
4183 struct ieee1394_abuf *ab = (struct ieee1394_abuf *)arg;
4184 u_int64_t addr;
4185 u_int8_t *cur;
4186 int i, count, ret;
4187
4188 ab->ab_tcode = (pkt->fp_hdr[0] >> 4) & 0xf;
4189 ab->ab_tlabel = (pkt->fp_hdr[0] >> 10) & 0x3f;
4190 addr = (((u_int64_t)(pkt->fp_hdr[1] & 0xffff) << 32) | pkt->fp_hdr[2]);
4191
4192 /* Make sure it's always 0 in case this gets reused multiple times. */
4193 ab->ab_retlen = 0;
4194
4195 switch (ab->ab_tcode) {
4196 case IEEE1394_TCODE_READ_REQ_QUAD:
4197 ab->ab_retlen = 4;
4198 /* Response's (if required) will come from callback code */
4199 ret = -1;
4200 break;
4201 case IEEE1394_TCODE_READ_REQ_BLOCK:
4202 ab->ab_retlen = (pkt->fp_hdr[3] >> 16) & 0xffff;
4203 if (ab->ab_subok) {
4204 if ((addr + ab->ab_retlen) >
4205 (ab->ab_addr + ab->ab_length))
4206 return IEEE1394_RCODE_ADDRESS_ERROR;
4207 } else
4208 if (ab->ab_retlen != ab->ab_length)
4209 return IEEE1394_RCODE_ADDRESS_ERROR;
4210 /* Response's (if required) will come from callback code */
4211 ret = -1;
4212 break;
4213 case IEEE1394_TCODE_WRITE_REQ_QUAD:
4214 ab->ab_retlen = 4;
4215 /* Fall through. */
4216
4217 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
4218 if (!ab->ab_retlen)
4219 ab->ab_retlen = (pkt->fp_hdr[3] >> 16) & 0xffff;
4220 if (ab->ab_subok) {
4221 if ((addr + ab->ab_retlen) >
4222 (ab->ab_addr + ab->ab_length))
4223 return IEEE1394_RCODE_ADDRESS_ERROR;
4224 } else
4225 if (ab->ab_retlen > ab->ab_length)
4226 return IEEE1394_RCODE_ADDRESS_ERROR;
4227
4228 if (ab->ab_tcode == IEEE1394_TCODE_WRITE_REQ_QUAD)
4229 ab->ab_data[0] = pkt->fp_hdr[3];
4230 else {
4231 count = 0;
4232 cur = (u_int8_t *)ab->ab_data + (addr - ab->ab_addr);
4233 for (i = 0; i < pkt->fp_uio.uio_iovcnt; i++) {
4234 memcpy(cur, pkt->fp_iov[i].iov_base,
4235 pkt->fp_iov[i].iov_len);
4236 cur += pkt->fp_iov[i].iov_len;
4237 count += pkt->fp_iov[i].iov_len;
4238 }
4239 if (ab->ab_retlen != count)
4240 panic("Packet claims %d length "
4241 "but only %d bytes returned\n",
4242 ab->ab_retlen, count);
4243 }
4244 ret = IEEE1394_RCODE_COMPLETE;
4245 break;
4246 default:
4247 panic("Got a callback for a tcode that wasn't requested: %d",
4248 ab->ab_tcode);
4249 break;
4250 }
4251 if (ab->ab_cb) {
4252 ab->ab_retaddr = addr;
4253 ab->ab_cb(ab, IEEE1394_RCODE_COMPLETE);
4254 }
4255 return ret;
4256 }
4257
4258 static int
4259 fwohci_submatch(struct device *parent, struct cfdata *cf, void *aux)
4260 {
4261 struct ieee1394_attach_args *fwa = aux;
4262
4263 /* Both halves must be filled in for a match. */
4264 if ((cf->fwbuscf_idhi == FWBUS_UNK_IDHI &&
4265 cf->fwbuscf_idlo == FWBUS_UNK_IDLO) ||
4266 (cf->fwbuscf_idhi == ntohl(*((u_int32_t *)&fwa->uid[0])) &&
4267 cf->fwbuscf_idlo == ntohl(*((u_int32_t *)&fwa->uid[4]))))
4268 return (config_match(parent, cf, aux));
4269 return 0;
4270 }
4271
4272 int
4273 fwohci_detach(struct fwohci_softc *sc, int flags)
4274 {
4275 int rv = 0;
4276
4277 if (sc->sc_sc1394.sc1394_if != NULL)
4278 rv = config_detach(sc->sc_sc1394.sc1394_if, flags);
4279 if (rv != 0)
4280 return (rv);
4281
4282 callout_stop(&sc->sc_selfid_callout);
4283
4284 if (sc->sc_powerhook != NULL)
4285 powerhook_disestablish(sc->sc_powerhook);
4286 if (sc->sc_shutdownhook != NULL)
4287 shutdownhook_disestablish(sc->sc_shutdownhook);
4288
4289 return (rv);
4290 }
4291
4292 int
4293 fwohci_activate(struct device *self, enum devact act)
4294 {
4295 struct fwohci_softc *sc = (struct fwohci_softc *)self;
4296 int s, rv = 0;
4297
4298 s = splhigh();
4299 switch (act) {
4300 case DVACT_ACTIVATE:
4301 rv = EOPNOTSUPP;
4302 break;
4303
4304 case DVACT_DEACTIVATE:
4305 if (sc->sc_sc1394.sc1394_if != NULL)
4306 rv = config_deactivate(sc->sc_sc1394.sc1394_if);
4307 break;
4308 }
4309 splx(s);
4310
4311 return (rv);
4312 }
4313
4314 #ifdef FW_DEBUG
4315 static void
4316 fwohci_show_intr(struct fwohci_softc *sc, u_int32_t intmask)
4317 {
4318
4319 printf("%s: intmask=0x%08x:", sc->sc_sc1394.sc1394_dev.dv_xname,
4320 intmask);
4321 if (intmask & OHCI_Int_CycleTooLong)
4322 printf(" CycleTooLong");
4323 if (intmask & OHCI_Int_UnrecoverableError)
4324 printf(" UnrecoverableError");
4325 if (intmask & OHCI_Int_CycleInconsistent)
4326 printf(" CycleInconsistent");
4327 if (intmask & OHCI_Int_BusReset)
4328 printf(" BusReset");
4329 if (intmask & OHCI_Int_SelfIDComplete)
4330 printf(" SelfIDComplete");
4331 if (intmask & OHCI_Int_LockRespErr)
4332 printf(" LockRespErr");
4333 if (intmask & OHCI_Int_PostedWriteErr)
4334 printf(" PostedWriteErr");
4335 if (intmask & OHCI_Int_ReqTxComplete)
4336 printf(" ReqTxComplete(0x%04x)",
4337 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_REQUEST,
4338 OHCI_SUBREG_ContextControlClear));
4339 if (intmask & OHCI_Int_RespTxComplete)
4340 printf(" RespTxComplete(0x%04x)",
4341 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_RESPONSE,
4342 OHCI_SUBREG_ContextControlClear));
4343 if (intmask & OHCI_Int_ARRS)
4344 printf(" ARRS(0x%04x)",
4345 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
4346 OHCI_SUBREG_ContextControlClear));
4347 if (intmask & OHCI_Int_ARRQ)
4348 printf(" ARRQ(0x%04x)",
4349 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_REQUEST,
4350 OHCI_SUBREG_ContextControlClear));
4351 if (intmask & OHCI_Int_IsochRx)
4352 printf(" IsochRx(0x%08x)",
4353 OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntEventClear));
4354 if (intmask & OHCI_Int_IsochTx)
4355 printf(" IsochTx(0x%08x)",
4356 OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntEventClear));
4357 if (intmask & OHCI_Int_RQPkt)
4358 printf(" RQPkt(0x%04x)",
4359 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_REQUEST,
4360 OHCI_SUBREG_ContextControlClear));
4361 if (intmask & OHCI_Int_RSPkt)
4362 printf(" RSPkt(0x%04x)",
4363 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
4364 OHCI_SUBREG_ContextControlClear));
4365 printf("\n");
4366 }
4367
4368 static void
4369 fwohci_show_phypkt(struct fwohci_softc *sc, u_int32_t val)
4370 {
4371 u_int8_t key, phyid;
4372
4373 key = (val & 0xc0000000) >> 30;
4374 phyid = (val & 0x3f000000) >> 24;
4375 printf("%s: PHY packet from %d: ",
4376 sc->sc_sc1394.sc1394_dev.dv_xname, phyid);
4377 switch (key) {
4378 case 0:
4379 printf("PHY Config:");
4380 if (val & 0x00800000)
4381 printf(" ForceRoot");
4382 if (val & 0x00400000)
4383 printf(" Gap=%x", (val & 0x003f0000) >> 16);
4384 printf("\n");
4385 break;
4386 case 1:
4387 printf("Link-on\n");
4388 break;
4389 case 2:
4390 printf("SelfID:");
4391 if (val & 0x00800000) {
4392 printf(" #%d", (val & 0x00700000) >> 20);
4393 } else {
4394 if (val & 0x00400000)
4395 printf(" LinkActive");
4396 printf(" Gap=%x", (val & 0x003f0000) >> 16);
4397 printf(" Spd=S%d", 100 << ((val & 0x0000c000) >> 14));
4398 if (val & 0x00000800)
4399 printf(" Cont");
4400 if (val & 0x00000002)
4401 printf(" InitiateBusReset");
4402 }
4403 if (val & 0x00000001)
4404 printf(" +");
4405 printf("\n");
4406 break;
4407 default:
4408 printf("unknown: 0x%08x\n", val);
4409 break;
4410 }
4411 }
4412 #endif /* FW_DEBUG */
4413
4414 #if 0
4415 void fwohci_dumpreg(struct ieee1394_softc *, struct fwiso_regdump *);
4416
4417 void
4418 fwohci_dumpreg(struct ieee1394_softc *isc, struct fwiso_regdump *fr)
4419 {
4420 struct fwohci_softc *sc = (struct fwohci_softc *)isc;
4421 #if 0
4422 u_int32_t val;
4423
4424 printf("%s: dump reg\n", isc->sc1394_dev.dv_xname);
4425 printf("\tNodeID reg 0x%08x\n",
4426 OHCI_CSR_READ(sc, OHCI_REG_NodeId));
4427 val = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
4428 printf("\tIsoCounter 0x%08x, %d %d %d", val,
4429 (val >> 25) & 0xfe, (val >> 12) & 0x1fff, val & 0xfff);
4430 val = OHCI_CSR_READ(sc, OHCI_REG_IntMaskSet);
4431 printf(" IntMask 0x%08x, %s\n", val,
4432 val & OHCI_Int_IsochTx ? "isoTx" : "");
4433
4434 val = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_ContextControlSet);
4435 printf("\tIT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
4436 OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_CommandPtr),
4437 val,
4438 val & OHCI_CTXCTL_RUN ? " run" : "",
4439 val & OHCI_CTXCTL_WAKE ? " wake" : "",
4440 val & OHCI_CTXCTL_DEAD ? " dead" : "",
4441 val & OHCI_CTXCTL_ACTIVE ? " active" : "");
4442 #endif
4443
4444 fr->fr_nodeid = OHCI_CSR_READ(sc, OHCI_REG_NodeId);
4445 fr->fr_isocounter = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
4446 fr->fr_intmask = OHCI_CSR_READ(sc, OHCI_REG_IntMaskSet);
4447 fr->fr_it0_commandptr = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_CommandPtr);
4448 fr->fr_it0_contextctrl = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_ContextControlSet);
4449
4450
4451 }
4452 #endif
4453
4454
4455 u_int16_t
4456 fwohci_cycletimer(struct fwohci_softc *sc)
4457 {
4458 u_int32_t reg;
4459
4460 reg = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
4461
4462 return (reg >> 12)&0xffff;
4463 }
4464
4465
4466 u_int16_t
4467 fwohci_it_cycletimer(ieee1394_it_tag_t it)
4468 {
4469 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
4470
4471 return fwohci_cycletimer(itc->itc_sc);
4472 }
4473
4474
4475
4476
4477
4478 /*
4479 * return value: if positive value, number of DMA buffer segments. If
4480 * negative value, error happens. Never zero.
4481 */
4482 static int
4483 fwohci_misc_dmabuf_alloc(bus_dma_tag_t dmat, int dsize, int segno,
4484 bus_dma_segment_t *segp, bus_dmamap_t *dmapp, void **mapp,
4485 const char *xname)
4486 {
4487 int nsegs;
4488 int error;
4489
4490 printf("fwohci_misc_desc_alloc: dsize %d segno %d\n", dsize, segno);
4491
4492 if ((error = bus_dmamem_alloc(dmat, dsize, PAGE_SIZE, 0,
4493 segp, segno, &nsegs, 0)) != 0) {
4494 printf("%s: unable to allocate descriptor buffer, error = %d\n",
4495 xname, error);
4496 goto fail_0;
4497 }
4498
4499 DPRINTF(("fwohci_misc_desc_alloc: %d segment[s]\n", nsegs));
4500
4501 if ((error = bus_dmamem_map(dmat, segp, nsegs, dsize, (caddr_t *)mapp,
4502 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) {
4503 printf("%s: unable to map descriptor buffer, error = %d\n",
4504 xname, error);
4505 goto fail_1;
4506 }
4507
4508 DPRINTF(("fwohci_misc_desc_alloc: %s map ok\n", xname));
4509
4510 #ifdef FWOHCI_DEBUG
4511 {
4512 int loop;
4513
4514 for (loop = 0; loop < nsegs; ++loop) {
4515 printf("\t%.2d: 0x%lx - 0x%lx\n", loop,
4516 (long)segp[loop].ds_addr,
4517 (long)segp[loop].ds_addr + segp[loop].ds_len - 1);
4518 }
4519 }
4520 #endif /* FWOHCI_DEBUG */
4521
4522 if ((error = bus_dmamap_create(dmat, dsize, nsegs, dsize,
4523 0, BUS_DMA_WAITOK, dmapp)) != 0) {
4524 printf("%s: unable to create descriptor buffer DMA map, "
4525 "error = %d\n", xname, error);
4526 goto fail_2;
4527 }
4528
4529 DPRINTF(("fwohci_misc_dmabuf_alloc: bus_dmamem_create success\n"));
4530
4531 if ((error = bus_dmamap_load(dmat, *dmapp, *mapp, dsize, NULL,
4532 BUS_DMA_WAITOK)) != 0) {
4533 printf("%s: unable to load descriptor buffer DMA map, "
4534 "error = %d\n", xname, error);
4535 goto fail_3;
4536 }
4537
4538 DPRINTF(("fwohci_it_desc_alloc: bus_dmamem_load success\n"));
4539
4540 return nsegs;
4541
4542 fail_3:
4543 bus_dmamap_destroy(dmat, *dmapp);
4544 fail_2:
4545 bus_dmamem_unmap(dmat, *mapp, dsize);
4546 fail_1:
4547 bus_dmamem_free(dmat, segp, nsegs);
4548 fail_0:
4549 return error;
4550 }
4551
4552
4553 static void
4554 fwohci_misc_dmabuf_free(bus_dma_tag_t dmat, int dsize, int nsegs,
4555 bus_dma_segment_t *segp, bus_dmamap_t *dmapp, caddr_t map)
4556 {
4557 bus_dmamap_destroy(dmat, *dmapp);
4558 bus_dmamem_unmap(dmat, map, dsize);
4559 bus_dmamem_free(dmat, segp, nsegs);
4560 }
4561
4562
4563
4564
4565 /*
4566 * Isochronous receive service
4567 */
4568
4569 /*
4570 * static struct fwohci_ir_ctx *
4571 * fwohci_ir_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tagbm,
4572 * int bufnum, int maxsize, int flags)
4573 */
4574 static struct fwohci_ir_ctx *
4575 fwohci_ir_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tagbm,
4576 int bufnum, int maxsize, int flags)
4577 {
4578 struct fwohci_ir_ctx *irc;
4579 int i;
4580
4581 printf("fwohci_ir_construct(%s, %d, %d, %x, %d, %d\n",
4582 sc->sc_sc1394.sc1394_dev.dv_xname, no, ch, tagbm, bufnum, maxsize);
4583
4584 if ((irc = malloc(sizeof(*irc), M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) {
4585 return NULL;
4586 }
4587
4588 irc->irc_sc = sc;
4589
4590 irc->irc_num = no;
4591 irc->irc_status = 0;
4592
4593 irc->irc_channel = ch;
4594 irc->irc_tagbm = tagbm;
4595
4596 irc->irc_desc_num = bufnum;
4597
4598 irc->irc_flags = flags;
4599
4600 /* add header */
4601 maxsize += 8;
4602 /* rounding up */
4603 for (i = 32; i < maxsize; i <<= 1);
4604 printf("fwohci_ir_ctx_construct: maxsize %d => %d\n",
4605 maxsize, i);
4606
4607 maxsize = i;
4608
4609 irc->irc_maxsize = maxsize;
4610 irc->irc_buf_totalsize = bufnum * maxsize;
4611
4612 if (fwohci_ir_buf_setup(irc)) {
4613 /* cannot alloc descriptor */
4614 return NULL;
4615 }
4616
4617 irc->irc_readtop = irc->irc_desc_map;
4618 irc->irc_writeend = irc->irc_desc_map + irc->irc_desc_num - 1;
4619 irc->irc_savedbranch = irc->irc_writeend->fd_branch;
4620 irc->irc_writeend->fd_branch = 0;
4621 /* sync */
4622
4623 if (fwohci_ir_stop(irc) || fwohci_ir_init(irc)) {
4624 return NULL;
4625 }
4626
4627 irc->irc_status |= IRC_STATUS_READY;
4628
4629 return irc;
4630 }
4631
4632
4633
4634 /*
4635 * static void fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *irc)
4636 *
4637 * This function release all DMA buffers and itself.
4638 */
4639 static void
4640 fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *irc)
4641 {
4642 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat, irc->irc_buf_totalsize,
4643 irc->irc_buf_nsegs, irc->irc_buf_segs,
4644 &irc->irc_buf_dmamap, (caddr_t)irc->irc_buf);
4645 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4646 irc->irc_desc_size,
4647 irc->irc_desc_nsegs, &irc->irc_desc_seg,
4648 &irc->irc_desc_dmamap, (caddr_t)irc->irc_desc_map);
4649
4650 free(irc, M_DEVBUF);
4651 }
4652
4653
4654
4655
4656 /*
4657 * static int fwohci_ir_buf_setup(struct fwohci_ir_ctx *irc)
4658 *
4659 * Allocates descriptors for context DMA dedicated for
4660 * isochronous receive.
4661 *
4662 * This function returns 0 (zero) if it succeeds. Otherwise,
4663 * return negative value.
4664 */
4665 static int
4666 fwohci_ir_buf_setup(struct fwohci_ir_ctx *irc)
4667 {
4668 int nsegs;
4669 struct fwohci_desc *fd;
4670 u_int32_t branch;
4671 int bufno = 0; /* DMA segment */
4672 bus_size_t bufused = 0; /* offset in a DMA segment */
4673
4674 irc->irc_desc_size = irc->irc_desc_num * sizeof(struct fwohci_desc);
4675
4676 nsegs = fwohci_misc_dmabuf_alloc(irc->irc_sc->sc_dmat,
4677 irc->irc_desc_size, 1, &irc->irc_desc_seg, &irc->irc_desc_dmamap,
4678 (void **)&irc->irc_desc_map,
4679 irc->irc_sc->sc_sc1394.sc1394_dev.dv_xname);
4680
4681 if (nsegs < 0) {
4682 printf("fwohci_ir_buf_alloc: cannot get descriptor\n");
4683 return -1;
4684 }
4685 irc->irc_desc_nsegs = nsegs;
4686
4687 nsegs = fwohci_misc_dmabuf_alloc(irc->irc_sc->sc_dmat,
4688 irc->irc_buf_totalsize, 16, irc->irc_buf_segs,
4689 &irc->irc_buf_dmamap, (void **)&irc->irc_buf,
4690 irc->irc_sc->sc_sc1394.sc1394_dev.dv_xname);
4691
4692 if (nsegs < 0) {
4693 printf("fwohci_ir_buf_alloc: cannot get DMA buffer\n");
4694 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4695 irc->irc_desc_size,
4696 irc->irc_desc_nsegs, &irc->irc_desc_seg,
4697 &irc->irc_desc_dmamap, (caddr_t)irc->irc_desc_map);
4698 return -1;
4699 }
4700 irc->irc_buf_nsegs = nsegs;
4701
4702 branch = irc->irc_desc_dmamap->dm_segs[0].ds_addr
4703 + sizeof(struct fwohci_desc);
4704 bufno = 0;
4705 bufused = 0;
4706
4707 for (fd = irc->irc_desc_map;
4708 fd < irc->irc_desc_map + irc->irc_desc_num; ++fd) {
4709 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_LAST
4710 | OHCI_DESC_STATUS | OHCI_DESC_BRANCH;
4711 if (irc->irc_flags & IEEE1394_IR_SHORTDELAY) {
4712 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
4713 }
4714 #if 0
4715 if ((fd - irc->irc_desc_map) % 64 == 0) {
4716 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
4717 }
4718 #endif
4719 fd->fd_reqcount = irc->irc_maxsize;
4720 fd->fd_status = fd->fd_rescount = 0;
4721
4722 fd->fd_branch = branch | 0x01;
4723 branch += sizeof(struct fwohci_desc);
4724
4725 /* physical addr to data? */
4726 fd->fd_data =
4727 (u_int32_t)((irc->irc_buf_segs[bufno].ds_addr + bufused));
4728 bufused += irc->irc_maxsize;
4729 if (bufused > irc->irc_buf_segs[bufno].ds_len) {
4730 bufused = 0;
4731 if (++bufno == irc->irc_buf_nsegs) {
4732 /* fail */
4733 printf("fwohci_ir_buf_setup fail\n");
4734
4735 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4736 irc->irc_desc_size,
4737 irc->irc_desc_nsegs, &irc->irc_desc_seg,
4738 &irc->irc_desc_dmamap,
4739 (caddr_t)irc->irc_desc_map);
4740 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4741 irc->irc_buf_totalsize,
4742 irc->irc_buf_nsegs, irc->irc_buf_segs,
4743 &irc->irc_buf_dmamap,
4744 (caddr_t)irc->irc_buf);
4745 return -1;
4746 }
4747 }
4748
4749 #ifdef FWOHCI_DEBUG
4750 if (fd < irc->irc_desc_map + 4
4751 || (fd > irc->irc_desc_map + irc->irc_desc_num - 4)) {
4752 printf("fwohci_ir_buf_setup: desc %d %p buf %08x"
4753 " size %d branch %08x\n",
4754 fd - irc->irc_desc_map, fd, fd->fd_data,
4755 fd->fd_reqcount, fd->fd_branch);
4756 }
4757 #endif /* FWOHCI_DEBUG */
4758 }
4759
4760 --fd;
4761 fd->fd_branch = irc->irc_desc_dmamap->dm_segs[0].ds_addr | 1;
4762 DPRINTF(("fwohci_ir_buf_setup: desc %d %p buf %08x size %d branch %08x\n",
4763 fd - irc->irc_desc_map, fd, fd->fd_data, fd->fd_reqcount,
4764 fd->fd_branch));
4765
4766 return 0;
4767 }
4768
4769
4770
4771 /*
4772 * static void fwohci_ir_init(struct fwohci_ir_ctx *irc)
4773 *
4774 * This function initialise DMA engine.
4775 */
4776 static int
4777 fwohci_ir_init(struct fwohci_ir_ctx *irc)
4778 {
4779 struct fwohci_softc *sc = irc->irc_sc;
4780 int n = irc->irc_num;
4781 u_int32_t ctxmatch;
4782
4783 ctxmatch = irc->irc_channel & IEEE1394_ISO_CHANNEL_MASK;
4784
4785 if (irc->irc_channel & IEEE1394_ISO_CHANNEL_ANY) {
4786 OHCI_SYNC_RX_DMA_WRITE(sc, n,
4787 OHCI_SUBREG_ContextControlSet,
4788 OHCI_CTXCTL_RX_MULTI_CHAN_MODE);
4789
4790 /* Receive all the isochronous channels */
4791 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiSet, 0xffffffff);
4792 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoSet, 0xffffffff);
4793 ctxmatch = 0;
4794 }
4795
4796 ctxmatch |= ((irc->irc_tagbm & 0x0f) << OHCI_CTXMATCH_TAG_BITPOS);
4797 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextMatch, ctxmatch);
4798
4799 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlClear,
4800 OHCI_CTXCTL_RX_BUFFER_FILL | OHCI_CTXCTL_RX_CYCLE_MATCH_ENABLE);
4801 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlSet,
4802 OHCI_CTXCTL_RX_ISOCH_HEADER);
4803
4804 printf("fwohci_ir_init\n");
4805
4806 return 0;
4807 }
4808
4809
4810 /*
4811 * static int fwohci_ir_start(struct fwohci_ir_ctx *irc)
4812 *
4813 * This function starts DMA engine. This function must call
4814 * after fwohci_ir_init() and active bit of context control
4815 * register negated. This function will not check it.
4816 */
4817 static int
4818 fwohci_ir_start(struct fwohci_ir_ctx *irc)
4819 {
4820 struct fwohci_softc *sc = irc->irc_sc;
4821 int startidx = irc->irc_readtop - irc->irc_desc_map;
4822 u_int32_t startaddr;
4823
4824 startaddr = irc->irc_desc_dmamap->dm_segs[0].ds_addr
4825 + sizeof(struct fwohci_desc)*startidx;
4826
4827 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num, OHCI_SUBREG_CommandPtr,
4828 startaddr | 1);
4829 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear,
4830 (1 << irc->irc_num));
4831 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num,
4832 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
4833
4834 printf("fwohci_ir_start: CmdPtr %08x Ctx %08x startidx %d\n",
4835 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, OHCI_SUBREG_CommandPtr),
4836 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, OHCI_SUBREG_ContextControlSet),
4837 startidx);
4838
4839 irc->irc_status &= ~IRC_STATUS_READY;
4840 irc->irc_status |= IRC_STATUS_RUN;
4841
4842 if ((irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC) == 0) {
4843 irc->irc_status |= IRC_STATUS_RECEIVE;
4844 }
4845
4846 return 0;
4847 }
4848
4849
4850
4851 /*
4852 * static int fwohci_ir_stop(struct fwohci_ir_ctx *irc)
4853 *
4854 * This function stops DMA engine.
4855 */
4856 static int
4857 fwohci_ir_stop(struct fwohci_ir_ctx *irc)
4858 {
4859 struct fwohci_softc *sc = irc->irc_sc;
4860 int i;
4861
4862 printf("fwohci_ir_stop\n");
4863
4864 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num,
4865 OHCI_SUBREG_ContextControlClear,
4866 OHCI_CTXCTL_RUN | OHCI_CTXCTL_DEAD);
4867
4868 i = 0;
4869 while (OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4870 OHCI_SUBREG_ContextControlSet) & OHCI_CTXCTL_ACTIVE) {
4871 #if 0
4872 u_int32_t reg = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4873 OHCI_SUBREG_ContextControlClear);
4874
4875 printf("%s: %d intr IR_CommandPtr 0x%08x "
4876 "ContextCtrl 0x%08x%s%s%s%s\n",
4877 sc->sc_sc1394.sc1394_dev.dv_xname, i,
4878 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4879 OHCI_SUBREG_CommandPtr),
4880 reg,
4881 reg & OHCI_CTXCTL_RUN ? " run" : "",
4882 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
4883 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
4884 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
4885 #endif
4886 if (i > 20) {
4887 printf("fwohci_ir_stop: %s does not stop\n",
4888 sc->sc_sc1394.sc1394_dev.dv_xname);
4889 return 1;
4890 }
4891 DELAY(10);
4892 }
4893
4894 irc->irc_status &= ~IRC_STATUS_RUN;
4895
4896 return 0;
4897 }
4898
4899
4900
4901
4902
4903
4904 static void
4905 fwohci_ir_intr(struct fwohci_softc *sc, struct fwohci_ir_ctx *irc)
4906 {
4907 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname;
4908 u_int32_t cmd, ctx;
4909 int idx;
4910 struct fwohci_desc *fd;
4911
4912 sc->sc_isocnt.ev_count++;
4913
4914 if (!(irc->irc_status & IRC_STATUS_RUN)) {
4915 printf("fwohci_ir_intr: not running\n");
4916 return;
4917 }
4918
4919 bus_dmamap_sync(sc->sc_dmat, irc->irc_desc_dmamap,
4920 0, irc->irc_desc_size, BUS_DMASYNC_PREREAD);
4921
4922 ctx = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4923 OHCI_SUBREG_ContextControlSet);
4924
4925 cmd = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4926 OHCI_SUBREG_CommandPtr);
4927
4928 #define OHCI_CTXCTL_RUNNING (OHCI_CTXCTL_RUN|OHCI_CTXCTL_ACTIVE)
4929 #define OHCI_CTXCTL_RUNNING_MASK (OHCI_CTXCTL_RUNNING|OHCI_CTXCTL_DEAD)
4930
4931 idx = (cmd & 0xfffffff8) - (u_int32_t)irc->irc_desc_dmamap->dm_segs[0].ds_addr;
4932 idx /= sizeof(struct fwohci_desc);
4933
4934 if ((ctx & OHCI_CTXCTL_RUNNING_MASK) == OHCI_CTXCTL_RUNNING) {
4935 if (irc->irc_waitchan != NULL) {
4936 DPRINTF(("fwohci_ir_intr: wakeup "
4937 "ctx %d CmdPtr %08x Ctxctl %08x idx %d\n",
4938 irc->irc_num, cmd, ctx, idx));
4939 #ifdef FWOHCI_WAIT_DEBUG
4940 irc->irc_cycle[1] = fwohci_cycletimer(irc->irc_sc);
4941 #endif
4942 wakeup((void *)irc->irc_waitchan);
4943 }
4944 selwakeup(&irc->irc_sel);
4945 return;
4946 }
4947
4948 fd = irc->irc_desc_map + idx;
4949
4950 printf("fwohci_ir_intr: %s error "
4951 "ctx %d CmdPtr %08x Ctxctl %08x idx %d\n", xname,
4952 irc->irc_num, cmd, ctx, idx);
4953 printf("\tfd flag %x branch %x stat %x rescnt %x total pkt %d\n",
4954 fd->fd_flags, fd->fd_branch, fd->fd_status,fd->fd_rescount,
4955 irc->irc_pktcount);
4956 }
4957
4958
4959
4960
4961 /*
4962 * static int fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *irc)
4963 *
4964 * This function obtains the lenth of descriptors with data.
4965 */
4966 static int
4967 fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *irc)
4968 {
4969 struct fwohci_desc *fd = irc->irc_readtop;
4970 int i = 0;
4971
4972 /* XXX SYNC */
4973 while (fd->fd_status != 0) {
4974 if (fd == irc->irc_readtop && i > 0) {
4975 printf("descriptor filled %d at %d\n", i,
4976 irc->irc_pktcount);
4977 #ifdef FWOHCI_WAIT_DEBUG
4978 irc->irc_cycle[2] = fwohci_cycletimer(irc->irc_sc);
4979 printf("cycletimer %d:%d %d:%d %d:%d\n",
4980 irc->irc_cycle[0]>>13, irc->irc_cycle[0]&0x1fff,
4981 irc->irc_cycle[1]>>13, irc->irc_cycle[1]&0x1fff,
4982 irc->irc_cycle[2]>>13, irc->irc_cycle[2]&0x1fff);
4983 #endif
4984
4985 break;
4986 }
4987
4988 ++i;
4989 ++fd;
4990 if (fd == irc->irc_desc_map + irc->irc_desc_num) {
4991 fd = irc->irc_desc_map;
4992 }
4993
4994 }
4995
4996 return i;
4997 }
4998
4999
5000
5001
5002 /*
5003 * int fwohci_ir_read(struct device *dev, ieee1394_ir_tag_t tag,
5004 * struct uio *uio, int headoffs, int flags)
5005 *
5006 * This function reads data from fwohci's isochronous receive
5007 * buffer.
5008 */
5009 int
5010 fwohci_ir_read(struct device *dev, ieee1394_ir_tag_t tag, struct uio *uio,
5011 int headoffs, int flags)
5012 {
5013 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag;
5014 int packetnum;
5015 int copylen, hdrshim, fwisohdrsiz;
5016 struct fwohci_desc *fd, *fdprev;
5017 u_int8_t *data;
5018 int status = 0;
5019 u_int32_t tmpbranch;
5020 int pktcount_prev = irc->irc_pktcount;
5021 #ifdef FW_DEBUG
5022 int totalread = 0;
5023 #endif
5024
5025 if (irc->irc_status & IRC_STATUS_READY) {
5026 printf("fwohci_ir_read: starting iso read engine\n");
5027 fwohci_ir_start(irc);
5028 }
5029
5030 packetnum = fwohci_ir_ctx_packetnum(irc);
5031
5032 DPRINTF(("fwohci_ir_read resid %d DMA buf %d\n",
5033 uio->uio_resid, packetnum));
5034
5035 if (packetnum == 0) {
5036 return EAGAIN;
5037 }
5038
5039 #ifdef USEDRAIN
5040 if (packetnum > irc->irc_desc_num - irc->irc_desc_num/4) {
5041 packetnum -= fwohci_ir_ctx_drain(irc);
5042 if (irc->irc_pktcount != 0) {
5043 printf("fwohci_ir_read overrun %d\n",
5044 irc->irc_pktcount);
5045 }
5046 }
5047 #endif /* USEDRAIN */
5048
5049 fd = irc->irc_readtop;
5050
5051 #if 0
5052 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0
5053 && irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC) {
5054 unsigned int s;
5055 int i = 0;
5056
5057 fdprev = fd;
5058 while (fd->fd_status != 0) {
5059 s = data[14] << 8;
5060 s |= data[15];
5061
5062 if (s != 0x0000ffffu) {
5063 DPRINTF(("find header %x at %d\n",
5064 s, irc->irc_pktcount));
5065 irc->irc_status |= IRC_STATUS_RECEIVE;
5066 break;
5067 }
5068
5069 fd->fd_rescount = 0;
5070 fd->fd_status = 0;
5071
5072 fdprev = fd;
5073 if (++fd == irc->irc_desc_map + irc->irc_desc_num) {
5074 fd = irc->irc_desc_map;
5075 data = irc->irc_buf;
5076 }
5077 ++i;
5078 }
5079
5080 /* XXX SYNC */
5081 if (i > 0) {
5082 tmpbranch = fdprev->fd_branch;
5083 fdprev->fd_branch = 0;
5084 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5085 irc->irc_writeend = fdprev;
5086 irc->irc_savedbranch = tmpbranch;
5087 }
5088 /* XXX SYNC */
5089
5090 if (fd->fd_status == 0) {
5091 return EAGAIN;
5092 }
5093 }
5094 #endif
5095
5096 hdrshim = 8;
5097 fwisohdrsiz = 0;
5098 data = irc->irc_buf + (fd - irc->irc_desc_map) * irc->irc_maxsize;
5099 if (irc->irc_flags & IEEE1394_IR_NEEDHEADER) {
5100 fwisohdrsiz = sizeof(struct fwiso_header);
5101 }
5102
5103 while (fd->fd_status != 0 &&
5104 (copylen = fd->fd_reqcount - fd->fd_rescount - hdrshim - headoffs)
5105 + fwisohdrsiz < uio->uio_resid) {
5106
5107 DPRINTF(("pkt %04x:%04x uiomove %p, %d\n",
5108 fd->fd_status, fd->fd_rescount,
5109 (void *)(data + 8 + headoffs), copylen));
5110 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0) {
5111 DPRINTF(("[%d]", copylen));
5112 if (irc->irc_pktcount > 1000) {
5113 printf("no header found\n");
5114 status = EIO;
5115 break; /* XXX */
5116 }
5117 } else {
5118 DPRINTF(("<%d>", copylen));
5119 }
5120
5121 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0
5122 && irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC
5123 && copylen > 0) {
5124 unsigned int s;
5125
5126 s = data[14] << 8;
5127 s |= data[15];
5128
5129 if (s != 0x0000ffffu) {
5130 DPRINTF(("find header %x at %d\n",
5131 s, irc->irc_pktcount));
5132 irc->irc_status |= IRC_STATUS_RECEIVE;
5133 }
5134 }
5135
5136 if (irc->irc_status & IRC_STATUS_RECEIVE) {
5137 if (copylen > 0) {
5138 if (irc->irc_flags & IEEE1394_IR_NEEDHEADER) {
5139 struct fwiso_header fh;
5140
5141 fh.fh_timestamp = htonl((*(u_int32_t *)data) & 0xffff);
5142 fh.fh_speed = htonl((fd->fd_status >> 5)& 0x00000007);
5143 fh.fh_capture_size = htonl(copylen + 4);
5144 fh.fh_iso_header = htonl(*(u_int32_t *)(data + 4));
5145 status = uiomove((void *)&fh,
5146 sizeof(fh), uio);
5147 if (status != 0) {
5148 /* An error happens */
5149 printf("uio error in hdr\n");
5150 break;
5151 }
5152 }
5153 status = uiomove((void *)(data + 8 + headoffs),
5154 copylen, uio);
5155 if (status != 0) {
5156 /* An error happens */
5157 printf("uio error\n");
5158 break;
5159 }
5160 #ifdef FW_DEBUG
5161 totalread += copylen;
5162 #endif
5163 }
5164 }
5165
5166 fd->fd_rescount = 0;
5167 fd->fd_status = 0;
5168
5169 #if 0
5170 /* advance writeend pointer and fill branch */
5171
5172 tmpbranch = fd->fd_branch;
5173 fd->fd_branch = 0;
5174 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5175 irc->irc_writeend = fd;
5176 irc->irc_savedbranch = tmpbranch;
5177 #endif
5178 fdprev = fd;
5179
5180 data += irc->irc_maxsize;
5181 if (++fd == irc->irc_desc_map + irc->irc_desc_num) {
5182 fd = irc->irc_desc_map;
5183 data = irc->irc_buf;
5184 }
5185 ++irc->irc_pktcount;
5186 }
5187
5188 #if 1
5189 if (irc->irc_pktcount != pktcount_prev) {
5190 /* XXX SYNC */
5191 tmpbranch = fdprev->fd_branch;
5192 fdprev->fd_branch = 0;
5193 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5194 irc->irc_writeend = fdprev;
5195 irc->irc_savedbranch = tmpbranch;
5196 /* XXX SYNC */
5197 }
5198 #endif
5199
5200 if (!(OHCI_SYNC_RX_DMA_READ(irc->irc_sc, irc->irc_num,
5201 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE)) {
5202 /* do wake */
5203 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5204 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
5205 }
5206
5207 if (packetnum > irc->irc_maxqueuelen) {
5208 irc->irc_maxqueuelen = packetnum;
5209 irc->irc_maxqueuepos = irc->irc_pktcount;
5210 }
5211
5212 if (irc->irc_pktcount == pktcount_prev) {
5213 #if 0
5214 printf("fwohci_ir_read: process 0 packet, total %d\n",
5215 irc->irc_pktcount);
5216 if (++pktfail > 30) {
5217 return 0;
5218 }
5219 #endif
5220 return EAGAIN;
5221 }
5222
5223 irc->irc_readtop = fd;
5224
5225 DPRINTF(("fwochi_ir_read: process %d packet, total %d\n",
5226 totalread, irc->irc_pktcount));
5227
5228 return status;
5229 }
5230
5231
5232
5233
5234 /*
5235 * int fwohci_ir_wait(struct device *dev, ieee1394_ir_tag_t tag,
5236 * void *wchan, char *name)
5237 *
5238 * This function waits till new data comes.
5239 */
5240 int
5241 fwohci_ir_wait(struct device *dev, ieee1394_ir_tag_t tag, void *wchan, char *name)
5242 {
5243 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag;
5244 struct fwohci_desc *fd;
5245 int pktnum;
5246 int stat;
5247
5248 if ((pktnum = fwohci_ir_ctx_packetnum(irc)) > 4) {
5249 DPRINTF(("fwohci_ir_wait enough data %d\n", pktnum));
5250 return 0;
5251 }
5252
5253 fd = irc->irc_readtop + 32;
5254 if (fd >= irc->irc_desc_map + irc->irc_desc_num) {
5255 fd -= irc->irc_desc_num;
5256 }
5257
5258 irc->irc_waitchan = wchan;
5259 if ((irc->irc_flags & IEEE1394_IR_SHORTDELAY) == 0) {
5260 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
5261 DPRINTF(("fwohci_ir_wait stops %d set intr %d\n",
5262 irc->irc_readtop - irc->irc_desc_map,
5263 fd - irc->irc_desc_map));
5264 /* XXX SYNC */
5265 }
5266
5267 #ifdef FWOHCI_WAIT_DEBUG
5268 irc->irc_cycle[0] = fwohci_cycletimer(irc->irc_sc);
5269 #endif
5270
5271 irc->irc_status |= IRC_STATUS_SLEEPING;
5272 if ((stat = tsleep(wchan, PCATCH|PRIBIO, name, hz*10)) != 0) {
5273 irc->irc_waitchan = NULL;
5274 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
5275 if (stat == EWOULDBLOCK) {
5276 printf("fwohci_ir_wait: timeout\n");
5277 return EIO;
5278 } else {
5279 return EINTR;
5280 }
5281 }
5282
5283 irc->irc_waitchan = NULL;
5284 if ((irc->irc_flags & IEEE1394_IR_SHORTDELAY) == 0) {
5285 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
5286 /* XXX SYNC */
5287 }
5288
5289 DPRINTF(("fwohci_ir_wait: wakeup\n"));
5290
5291 return 0;
5292 }
5293
5294
5295
5296
5297 /*
5298 * int fwohci_ir_select(struct device *dev, ieee1394_ir_tag_t tag,
5299 * struct proc *p)
5300 *
5301 * This function returns the number of packets in queue.
5302 */
5303 int
5304 fwohci_ir_select(struct device *dev, ieee1394_ir_tag_t tag, struct proc *p)
5305 {
5306 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag;
5307 int pktnum;
5308
5309 if (irc->irc_status & IRC_STATUS_READY) {
5310 printf("fwohci_ir_select: starting iso read engine\n");
5311 fwohci_ir_start(irc);
5312 }
5313
5314 if ((pktnum = fwohci_ir_ctx_packetnum(irc)) == 0) {
5315 selrecord(p, &irc->irc_sel);
5316 }
5317
5318 return pktnum;
5319 }
5320
5321
5322
5323 #ifdef USEDRAIN
5324 /*
5325 * int fwohci_ir_ctx_drain(struct fwohci_ir_ctx *irc)
5326 *
5327 * This function will drain all the packets in receive DMA
5328 * buffer.
5329 */
5330 static int
5331 fwohci_ir_ctx_drain(struct fwohci_ir_ctx *irc)
5332 {
5333 struct fwohci_desc *fd = irc->irc_readtop;
5334 u_int32_t reg;
5335 int count = 0;
5336
5337 reg = OHCI_SYNC_RX_DMA_READ(irc->irc_sc, irc->irc_num,
5338 OHCI_SUBREG_ContextControlClear);
5339
5340 printf("fwohci_ir_ctx_drain ctx%s%s%s%s\n",
5341 reg & OHCI_CTXCTL_RUN ? " run" : "",
5342 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5343 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5344 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5345
5346 if ((reg & OHCI_CTXCTL_RUNNING_MASK) == OHCI_CTXCTL_RUN) {
5347 /* DMA engine is stopped */
5348 u_int32_t startadr;
5349
5350 for (fd = irc->irc_desc_map;
5351 fd < irc->irc_desc_map + irc->irc_desc_num;
5352 ++fd) {
5353 fd->fd_status = 0;
5354 }
5355
5356 /* Restore branch addr of the last descriptor */
5357 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5358
5359 irc->irc_readtop = irc->irc_desc_map;
5360 irc->irc_writeend = irc->irc_desc_map + irc->irc_desc_num - 1;
5361 irc->irc_savedbranch = irc->irc_writeend->fd_branch;
5362 irc->irc_writeend->fd_branch = 0;
5363
5364 count = irc->irc_desc_num;
5365
5366 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5367 OHCI_SUBREG_ContextControlClear,
5368 OHCI_CTXCTL_RUN | OHCI_CTXCTL_DEAD);
5369
5370 startadr = (u_int32_t)irc->irc_desc_dmamap->dm_segs[0].ds_addr;
5371
5372 printf("fwohci_ir_ctx_drain: remove %d pkts\n", count);
5373
5374 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5375 OHCI_SUBREG_CommandPtr, startadr | 1);
5376
5377 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5378 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
5379 } else {
5380 const int removecount = irc->irc_desc_num/2;
5381 u_int32_t tmpbranch;
5382
5383 for (count = 0; count < removecount; ++count) {
5384 if (fd->fd_status == 0) {
5385 break;
5386 }
5387
5388 fd->fd_status = 0;
5389
5390 tmpbranch = fd->fd_branch;
5391 fd->fd_branch = 0;
5392 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5393 irc->irc_writeend = fd;
5394 irc->irc_savedbranch = tmpbranch;
5395
5396 if (++fd == irc->irc_desc_map + irc->irc_desc_num) {
5397 fd = irc->irc_desc_map;
5398 }
5399 ++count;
5400 }
5401
5402 printf("fwohci_ir_ctx_drain: remove %d pkts\n", count);
5403 }
5404
5405 return count;
5406 }
5407 #endif /* USEDRAIN */
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417 /*
5418 * service routines for isochronous transmit
5419 */
5420
5421
5422 struct fwohci_it_ctx *
5423 fwohci_it_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tag, int maxsize)
5424 {
5425 struct fwohci_it_ctx *itc;
5426 size_t dmastrsize;
5427 struct fwohci_it_dmabuf *dmastr;
5428 struct fwohci_desc *desc;
5429 bus_addr_t descphys;
5430 int nodesc;
5431 int i, j;
5432
5433 if ((itc = malloc(sizeof(*itc), M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) {
5434 return itc;
5435 }
5436
5437 itc->itc_num = no;
5438 itc->itc_flags = 0;
5439 itc->itc_sc = sc;
5440 itc->itc_bufnum = FWOHCI_IT_BUFNUM;
5441
5442 itc->itc_channel = ch;
5443 itc->itc_tag = tag;
5444 itc->itc_speed = OHCI_CTXCTL_SPD_100; /* XXX */
5445
5446 itc->itc_outpkt = 0;
5447
5448 itc->itc_maxsize = maxsize;
5449
5450 dmastrsize = sizeof(struct fwohci_it_dmabuf)*itc->itc_bufnum;
5451
5452 if ((dmastr = malloc(dmastrsize, M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) {
5453 goto error_1;
5454 }
5455 itc->itc_buf = dmastr;
5456
5457 /*
5458 * Get memory for descriptors. One buffer will have 256
5459 * packet entry and 1 trailing descriptor for writing scratch.
5460 * 4-byte space for scratch.
5461 */
5462 itc->itc_descsize = (256*3 + 1)*itc->itc_bufnum;
5463
5464 if (fwohci_it_desc_alloc(itc)) {
5465 printf("%s: cannot get enough memory for descriptor\n",
5466 sc->sc_sc1394.sc1394_dev.dv_xname);
5467 goto error_2;
5468 }
5469
5470 /* prepare DMA buffer */
5471 nodesc = itc->itc_descsize/itc->itc_bufnum;
5472 desc = (struct fwohci_desc *)itc->itc_descmap;
5473 descphys = itc->itc_dseg.ds_addr;
5474
5475 for (i = 0; i < itc->itc_bufnum; ++i) {
5476
5477 if (fwohci_itd_construct(itc, &dmastr[i], i, desc,
5478 descphys, nodesc,
5479 itc->itc_maxsize, itc->itc_scratch_paddr)) {
5480 goto error_3;
5481 }
5482 desc += nodesc;
5483 descphys += sizeof(struct fwohci_desc)*nodesc;
5484 }
5485
5486 #if 1
5487 itc->itc_buf_start = itc->itc_buf;
5488 itc->itc_buf_end = itc->itc_buf;
5489 itc->itc_buf_linkend = itc->itc_buf;
5490 #else
5491 itc->itc_bufidx_start = 0;
5492 itc->itc_bufidx_end = 0;
5493 itc->itc_bufidx_linkend = 0;
5494 #endif
5495 itc->itc_buf_cnt = 0;
5496 itc->itc_waitchan = NULL;
5497 *itc->itc_scratch = 0xffffffff;
5498
5499 return itc;
5500
5501 error_3:
5502 for (j = 0; j < i; ++j) {
5503 fwohci_itd_destruct(&dmastr[j]);
5504 }
5505 fwohci_it_desc_free(itc);
5506 error_2:
5507 free(itc->itc_buf, M_DEVBUF);
5508 error_1:
5509 free(itc, M_DEVBUF);
5510
5511 return NULL;
5512 }
5513
5514
5515
5516 void
5517 fwohci_it_ctx_destruct(struct fwohci_it_ctx *itc)
5518 {
5519 int i;
5520
5521 for (i = 0; i < itc->itc_bufnum; ++i) {
5522 fwohci_itd_destruct(&itc->itc_buf[i]);
5523 }
5524
5525 fwohci_it_desc_free(itc);
5526 free(itc, M_DEVBUF);
5527 }
5528
5529
5530 /*
5531 * static int fwohci_it_desc_alloc(struct fwohci_it_ctx *itc)
5532 *
5533 * Allocates descriptors for context DMA dedicated for
5534 * isochronous transmit.
5535 *
5536 * This function returns 0 (zero) if it succeeds. Otherwise,
5537 * return negative value.
5538 */
5539 static int
5540 fwohci_it_desc_alloc(struct fwohci_it_ctx *itc)
5541 {
5542 bus_dma_tag_t dmat = itc->itc_sc->sc_dmat;
5543 const char *xname = itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
5544 int error, dsize;
5545
5546 /* add for scratch */
5547 itc->itc_descsize++;
5548
5549 /* rounding up to 256 */
5550 if ((itc->itc_descsize & 0x0ff) != 0) {
5551 itc->itc_descsize =
5552 (itc->itc_descsize & ~0x0ff) + 0x100;
5553 }
5554 /* remove for scratch */
5555
5556 itc->itc_descsize--;
5557 printf("%s: fwohci_it_desc_alloc will allocate %d descs\n",
5558 xname, itc->itc_descsize);
5559
5560 /*
5561 * allocate descriptor buffer
5562 */
5563 dsize = sizeof(struct fwohci_desc) * itc->itc_descsize;
5564
5565 printf("%s: fwohci_it_desc_alloc: descriptor %d, dsize %d\n",
5566 xname, itc->itc_descsize, dsize);
5567
5568 if ((error = bus_dmamem_alloc(dmat, dsize, PAGE_SIZE, 0,
5569 &itc->itc_dseg, 1, &itc->itc_dnsegs, 0)) != 0) {
5570 printf("%s: unable to allocate descriptor buffer, error = %d\n",
5571 xname, error);
5572 goto fail_0;
5573 }
5574
5575 printf("fwohci_it_desc_alloc: %d segment[s]\n", itc->itc_dnsegs);
5576
5577 if ((error = bus_dmamem_map(dmat, &itc->itc_dseg,
5578 itc->itc_dnsegs, dsize, (caddr_t *)&itc->itc_descmap,
5579 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) {
5580 printf("%s: unable to map descriptor buffer, error = %d\n",
5581 xname, error);
5582 goto fail_1;
5583 }
5584
5585 printf("fwohci_it_desc_alloc: bus_dmamem_map success dseg %lx:%lx\n",
5586 (long)itc->itc_dseg.ds_addr, (long)itc->itc_dseg.ds_len);
5587
5588 if ((error = bus_dmamap_create(dmat, dsize, itc->itc_dnsegs,
5589 dsize, 0, BUS_DMA_WAITOK, &itc->itc_ddmamap)) != 0) {
5590 printf("%s: unable to create descriptor buffer DMA map, "
5591 "error = %d\n", xname, error);
5592 goto fail_2;
5593 }
5594
5595 printf("fwohci_it_desc_alloc: bus_dmamem_create success\n");
5596
5597 {
5598 int loop;
5599
5600 for (loop = 0; loop < itc->itc_ddmamap->dm_nsegs; ++loop) {
5601 printf("\t%.2d: 0x%lx - 0x%lx\n", loop,
5602 (long)itc->itc_ddmamap->dm_segs[loop].ds_addr,
5603 (long)itc->itc_ddmamap->dm_segs[loop].ds_addr +
5604 (long)itc->itc_ddmamap->dm_segs[loop].ds_len - 1);
5605 }
5606 }
5607
5608 if ((error = bus_dmamap_load(dmat, itc->itc_ddmamap,
5609 itc->itc_descmap, dsize, NULL, BUS_DMA_WAITOK)) != 0) {
5610 printf("%s: unable to load descriptor buffer DMA map, "
5611 "error = %d\n", xname, error);
5612 goto fail_3;
5613 }
5614
5615 printf("%s: fwohci_it_desc_alloc: get DMA memory phys:0x%08x vm:%p\n",
5616 xname, (int)itc->itc_ddmamap->dm_segs[0].ds_addr, itc->itc_descmap);
5617
5618 itc->itc_scratch = (u_int32_t *)(itc->itc_descmap
5619 + (sizeof(struct fwohci_desc))*itc->itc_descsize);
5620 itc->itc_scratch_paddr =
5621 itc->itc_ddmamap->dm_segs[0].ds_addr
5622 + (sizeof(struct fwohci_desc))*itc->itc_descsize;
5623
5624 printf("%s: scratch %p, 0x%x\n", xname, itc->itc_scratch,
5625 (int)itc->itc_scratch_paddr);
5626
5627 /* itc->itc_scratch_paddr = vtophys(itc->itc_scratch); */
5628
5629 return 0;
5630
5631 fail_3:
5632 bus_dmamap_destroy(dmat, itc->itc_ddmamap);
5633 fail_2:
5634 bus_dmamem_unmap(dmat, (caddr_t)itc->itc_descmap, dsize);
5635 fail_1:
5636 bus_dmamem_free(dmat, &itc->itc_dseg, itc->itc_dnsegs);
5637 fail_0:
5638 itc->itc_dnsegs = 0;
5639 itc->itc_descmap = NULL;
5640 return error;
5641 }
5642
5643
5644 static void
5645 fwohci_it_desc_free(struct fwohci_it_ctx *itc)
5646 {
5647 bus_dma_tag_t dmat = itc->itc_sc->sc_dmat;
5648 int dsize = sizeof(struct fwohci_desc) * itc->itc_descsize + 4;
5649
5650 bus_dmamap_destroy(dmat, itc->itc_ddmamap);
5651 bus_dmamem_unmap(dmat, (caddr_t)itc->itc_descmap, dsize);
5652 bus_dmamem_free(dmat, &itc->itc_dseg, itc->itc_dnsegs);
5653
5654 itc->itc_dnsegs = 0;
5655 itc->itc_descmap = NULL;
5656 }
5657
5658
5659
5660 /*
5661 * int fwohci_it_ctx_writedata(ieee1394_it_tag_t it, int ndata,
5662 * struct ieee1394_it_datalist *itdata, int flags)
5663 *
5664 * This function will write packet data to DMA buffer in the
5665 * context. This function will parse ieee1394_it_datalist
5666 * command and fill DMA buffer. This function will return the
5667 * number of written packets, or error code if the return value
5668 * is negative.
5669 *
5670 * When this funtion returns positive value but smaller than
5671 * ndata, it reaches at the ent of DMA buffer.
5672 */
5673 int
5674 fwohci_it_ctx_writedata(ieee1394_it_tag_t it, int ndata,
5675 struct ieee1394_it_datalist *itdata, int flags)
5676 {
5677 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
5678 int rv;
5679 int writepkt = 0;
5680 struct fwohci_it_dmabuf *itd;
5681 int i = 0;
5682
5683 itd = itc->itc_buf_end;
5684
5685 while (ndata > 0) {
5686 int s;
5687
5688 if (fwohci_itd_isfull(itd) || fwohci_itd_islocked(itd)) {
5689 if (itc->itc_buf_cnt == itc->itc_bufnum) {
5690 /* no space to write */
5691 printf("sleeping: start linkend end %d %d %d "
5692 "bufcnt %d\n",
5693 itc->itc_buf_start->itd_num,
5694 itc->itc_buf_linkend->itd_num,
5695 itc->itc_buf_end->itd_num,
5696 itc->itc_buf_cnt);
5697
5698 itc->itc_waitchan = itc;
5699 if (tsleep((void *)itc->itc_waitchan,
5700 PCATCH, "fwohci it", 0) == EWOULDBLOCK) {
5701 itc->itc_waitchan = NULL;
5702 printf("fwohci0 signal\n");
5703 break;
5704 }
5705 printf("waking: start linkend end %d %d %d\n",
5706 itc->itc_buf_start->itd_num,
5707 itc->itc_buf_linkend->itd_num,
5708 itc->itc_buf_end->itd_num);
5709
5710 itc->itc_waitchan = itc;
5711 i = 0;
5712 } else {
5713 /*
5714 * Use next buffer. This DMA buffer is full
5715 * or locked.
5716 */
5717 INC_BUF(itc, itd);
5718 }
5719 }
5720
5721 if (++i > 10) {
5722 panic("why loop so much %d", itc->itc_buf_cnt);
5723 break;
5724 }
5725
5726 s = splbio();
5727
5728 if (fwohci_itd_hasdata(itd) == 0) {
5729 ++itc->itc_buf_cnt;
5730 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt));
5731 }
5732
5733 rv = fwohci_itd_writedata(itd, ndata, itdata);
5734 DPRINTF(("fwohci_it_ctx_writedata: buf %d ndata %d rv %d\n",
5735 itd->itd_num, ndata, rv));
5736
5737 if (itc->itc_buf_start == itc->itc_buf_linkend
5738 && (itc->itc_flags & ITC_FLAGS_RUN) != 0) {
5739
5740 #ifdef DEBUG_USERADD
5741 printf("fwohci_it_ctx_writedata: emergency!\n");
5742 #endif
5743 if (itc->itc_buf_linkend != itc->itc_buf_end
5744 && fwohci_itd_hasdata(itc->itc_buf_end)) {
5745 struct fwohci_it_dmabuf *itdn = itc->itc_buf_linkend;
5746
5747 INC_BUF(itc, itdn);
5748 printf("connecting %d after %d\n",
5749 itdn->itd_num,
5750 itc->itc_buf_linkend->itd_num);
5751 if (fwohci_itd_link(itc->itc_buf_linkend, itdn)) {
5752 printf("fwohci_it_ctx_writedata:"
5753 " cannot link correctly\n");
5754 return -1;
5755 }
5756 itc->itc_buf_linkend = itdn;
5757 }
5758 }
5759
5760 splx(s);
5761
5762 if (rv < 0) {
5763 /* some errors happend */
5764 break;
5765 }
5766
5767 writepkt += rv;
5768 ndata -= rv;
5769 itdata += rv;
5770 itc->itc_buf_end = itd;
5771 }
5772
5773 /* Start DMA engine if stopped */
5774 if ((itc->itc_flags & ITC_FLAGS_RUN) == 0) {
5775 if (itc->itc_buf_cnt > itc->itc_bufnum - 1 || flags) {
5776 /* run */
5777 printf("fwohci_itc_ctl_writedata: DMA engine start\n");
5778 fwohci_it_ctx_run(itc);
5779 }
5780 }
5781
5782 return writepkt;
5783 }
5784
5785
5786
5787 static void
5788 fwohci_it_ctx_run(struct fwohci_it_ctx *itc)
5789 {
5790 struct fwohci_softc *sc = itc->itc_sc;
5791 int ctx = itc->itc_num;
5792 struct fwohci_it_dmabuf *itd
5793 = (struct fwohci_it_dmabuf *)itc->itc_buf_start;
5794 u_int32_t reg;
5795 int i;
5796
5797 if (itc->itc_flags & ITC_FLAGS_RUN) {
5798 return;
5799 }
5800 itc->itc_flags |= ITC_FLAGS_RUN;
5801
5802 /*
5803 * dirty, but I can't imagine better place to save branch addr
5804 * of top DMA buffer and substitute 0 to it.
5805 */
5806 itd->itd_savedbranch = itd->itd_lastdesc->fd_branch;
5807 itd->itd_lastdesc->fd_branch = 0;
5808
5809 if (itc->itc_buf_cnt > 1) {
5810 struct fwohci_it_dmabuf *itdn = itd;
5811
5812 #if 0
5813 INC_BUF(itc, itdn);
5814
5815 if (fwohci_itd_link(itd, itdn)) {
5816 printf("fwohci_it_ctx_run: cannot link correctly\n");
5817 return;
5818 }
5819 itc->itc_buf_linkend = itdn;
5820 #else
5821 for (;;) {
5822 INC_BUF(itc, itdn);
5823
5824 if (itdn == itc->itc_buf_end) {
5825 break;
5826 }
5827 if (fwohci_itd_link(itd, itdn)) {
5828 printf("fwohci_it_ctx_run: cannot link\n");
5829 return;
5830 }
5831 itd = itdn;
5832 }
5833 itc->itc_buf_linkend = itd;
5834 #endif
5835 } else {
5836 itd->itd_lastdesc->fd_flags |= OHCI_DESC_INTR_ALWAYS;
5837 itc->itc_buf_linkend = itc->itc_buf_end;
5838 itc->itc_buf_end->itd_flags |= ITD_FLAGS_LOCK;
5839
5840 /* sanity check */
5841 if (itc->itc_buf_end != itc->itc_buf_start) {
5842 printf("buf start & end differs %p %p\n",
5843 itc->itc_buf_end, itc->itc_buf_start);
5844 }
5845 #if 0
5846 {
5847 u_int32_t *fdp;
5848 u_int32_t adr;
5849 int i;
5850
5851 printf("fwohci_it_ctx_run: itc_buf_cnt 1, DMA buf %d\n",
5852 itd->itd_num);
5853 printf(" last desc %p npacket %d, %d 0x%04x%04x",
5854 itd->itd_lastdesc, itd->itd_npacket,
5855 (itd->itd_lastdesc - itd->itd_desc)/3,
5856 itd->itd_lastdesc->fd_flags,
5857 itd->itd_lastdesc->fd_reqcount);
5858 fdp = (u_int32_t *)itd->itd_desc;
5859 adr = (u_int32_t)itd->itd_desc_phys; /* XXX */
5860
5861 for (i = 0; i < 7*4; ++i) {
5862 if (i % 4 == 0) {
5863 printf("\n%x:", adr + 4*i);
5864 }
5865 printf(" %08x", fdp[i]);
5866 }
5867
5868 if (itd->itd_npacket > 4) {
5869 printf("\n...");
5870 i = (itd->itd_npacket - 2)*12 + 4;
5871 } else {
5872 i = 2*12 + 4;
5873 }
5874 for (;i < itd->itd_npacket*12 + 4; ++i) {
5875 if (i % 4 == 0) {
5876 printf("\n%x:", adr + 4*i);
5877 }
5878 printf(" %08x", fdp[i]);
5879 }
5880 printf("\n");
5881 }
5882 #endif
5883 }
5884 {
5885 struct fwohci_desc *fd;
5886
5887 printf("fwohci_it_ctx_run: link start linkend end %d %d %d\n",
5888 itc->itc_buf_start->itd_num,
5889 itc->itc_buf_linkend->itd_num,
5890 itc->itc_buf_end->itd_num);
5891
5892 fd = itc->itc_buf_start->itd_desc;
5893 if ((fd->fd_flags & 0xff00) != OHCI_DESC_STORE_VALUE) {
5894 printf("fwohci_it_ctx_run: start buf not with STORE\n");
5895 }
5896 fd += 3;
5897 if ((fd->fd_flags & OHCI_DESC_INTR_ALWAYS) == 0) {
5898 printf("fwohci_it_ctx_run: start buf does not have intr\n");
5899 }
5900
5901 fd = itc->itc_buf_linkend->itd_desc;
5902 if ((fd->fd_flags & 0xff00) != OHCI_DESC_STORE_VALUE) {
5903 printf("fwohci_it_ctx_run: linkend buf not with STORE\n");
5904 }
5905 fd += 3;
5906 if ((fd->fd_flags & OHCI_DESC_INTR_ALWAYS) == 0) {
5907 printf("fwohci_it_ctx_run: linkend buf does not have intr\n");
5908 }
5909 }
5910
5911 *itc->itc_scratch = 0xffffffff;
5912
5913 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlClear,
5914 0xffff0000);
5915 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet);
5916
5917 printf("fwohci_it_ctx_run start for ctx %d\n", ctx);
5918 printf("%s: bfr IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
5919 sc->sc_sc1394.sc1394_dev.dv_xname,
5920 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr),
5921 reg,
5922 reg & OHCI_CTXCTL_RUN ? " run" : "",
5923 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5924 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5925 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5926
5927 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlClear,
5928 OHCI_CTXCTL_RUN);
5929
5930 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet);
5931 i = 0;
5932 while (reg & (OHCI_CTXCTL_ACTIVE | OHCI_CTXCTL_RUN)) {
5933 delay(100);
5934 if (++i > 1000) {
5935 printf("%s: cannot stop iso transmit engine\n",
5936 sc->sc_sc1394.sc1394_dev.dv_xname);
5937 break;
5938 }
5939 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx,
5940 OHCI_SUBREG_ContextControlSet);
5941 }
5942
5943 printf("%s: itm IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
5944 sc->sc_sc1394.sc1394_dev.dv_xname,
5945 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr),
5946 reg,
5947 reg & OHCI_CTXCTL_RUN ? " run" : "",
5948 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5949 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5950 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5951
5952 printf("%s: writing CommandPtr to 0x%08x\n",
5953 sc->sc_sc1394.sc1394_dev.dv_xname,
5954 (int)itc->itc_buf_start->itd_desc_phys);
5955 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_CommandPtr,
5956 fwohci_itd_list_head(itc->itc_buf_start) | 4);
5957
5958 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlSet,
5959 OHCI_CTXCTL_RUN | OHCI_CTXCTL_WAKE);
5960
5961 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet);
5962
5963 printf("%s: aft IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
5964 sc->sc_sc1394.sc1394_dev.dv_xname,
5965 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr),
5966 reg,
5967 reg & OHCI_CTXCTL_RUN ? " run" : "",
5968 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5969 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5970 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5971 }
5972
5973
5974
5975 int
5976 fwohci_it_ctx_flush(ieee1394_it_tag_t it)
5977 {
5978 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
5979 int rv = 0;
5980
5981 if ((itc->itc_flags & ITC_FLAGS_RUN) == 0
5982 && itc->itc_buf_cnt > 0) {
5983 printf("fwohci_it_ctx_flush: %s flushing\n",
5984 itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname);
5985
5986 fwohci_it_ctx_run(itc);
5987 rv = 1;
5988 }
5989
5990 return rv;
5991 }
5992
5993
5994 /*
5995 * static void fwohci_it_intr(struct fwohci_softc *sc,
5996 * struct fwochi_it_ctx *itc)
5997 *
5998 * This function is the interrupt handler for isochronous
5999 * transmit interrupt. This function will 1) unlink used
6000 * (already transmitted) buffers, 2) link new filled buffers, if
6001 * necessary and 3) say some free dma buffers exist to
6002 * fwiso_write()
6003 */
6004 static void
6005 fwohci_it_intr(struct fwohci_softc *sc, struct fwohci_it_ctx *itc)
6006 {
6007 struct fwohci_it_dmabuf *itd, *newstartbuf;
6008 u_int16_t scratchval;
6009 u_int32_t reg;
6010
6011 reg = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
6012 OHCI_SUBREG_ContextControlSet);
6013
6014 /* print out debug info */
6015 #ifdef FW_DEBUG
6016 printf("fwohci_it_intr: CTX %d\n", itc->itc_num);
6017
6018 printf("fwohci_it_intr: %s: IT_CommandPtr 0x%08x "
6019 "ContextCtrl 0x%08x%s%s%s%s\n",
6020 sc->sc_sc1394.sc1394_dev.dv_xname,
6021 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, OHCI_SUBREG_CommandPtr),
6022 reg,
6023 reg & OHCI_CTXCTL_RUN ? " run" : "",
6024 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
6025 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
6026 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
6027 printf("fwohci_it_intr: %s: scratch %x start %d end %d valid %d\n",
6028 sc->sc_sc1394.sc1394_dev.dv_xname, *itc->itc_scratch,
6029 itc->itc_buf_start->itd_num, itc->itc_buf_end->itd_num,
6030 itc->itc_buf_cnt);
6031 {
6032 u_int32_t reg
6033 = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
6034 printf("\t\tIsoCounter 0x%08x, %d %d %d\n", reg,
6035 (reg >> 25) & 0xfe, (reg >> 12) & 0x1fff, reg & 0xfff);
6036 }
6037 #endif /* FW_DEBUG */
6038 /* end print out debug info */
6039
6040 scratchval = (*itc->itc_scratch) & 0x0000ffff;
6041 *itc->itc_scratch = 0xffffffff;
6042
6043 if ((reg & OHCI_CTXCTL_ACTIVE) == 0 && scratchval != 0xffff) {
6044 /* DMA engine has been stopped */
6045 printf("DMA engine stopped\n");
6046 printf("fwohci_it_intr: %s: IT_CommandPtr 0x%08x "
6047 "ContextCtrl 0x%08x%s%s%s%s\n",
6048 sc->sc_sc1394.sc1394_dev.dv_xname,
6049 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, OHCI_SUBREG_CommandPtr),
6050 reg,
6051 reg & OHCI_CTXCTL_RUN ? " run" : "",
6052 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
6053 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
6054 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
6055 printf("fwohci_it_intr: %s: scratch %x start %d end %d valid %d\n",
6056 sc->sc_sc1394.sc1394_dev.dv_xname, *itc->itc_scratch,
6057 itc->itc_buf_start->itd_num, itc->itc_buf_end->itd_num,
6058 itc->itc_buf_cnt);
6059 {
6060 u_int32_t reg
6061 = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
6062 printf("\t\tIsoCounter 0x%08x, %d %d %d\n", reg,
6063 (reg >> 25) & 0xfe, (reg >> 12) & 0x1fff, reg & 0xfff);
6064 }
6065 printf("\t\tbranch of lastdesc 0x%08x\n",
6066 itc->itc_buf_start->itd_lastdesc->fd_branch);
6067
6068 scratchval = 0xffff;
6069 itc->itc_flags &= ~ITC_FLAGS_RUN;
6070 }
6071
6072 /* unlink old buffers */
6073 if (scratchval != 0xffff) {
6074 /* normal path */
6075 newstartbuf = &itc->itc_buf[scratchval];
6076 } else {
6077 /* DMA engine stopped */
6078 newstartbuf = itc->itc_buf_linkend;
6079 INC_BUF(itc, newstartbuf);
6080 }
6081
6082 itd = (struct fwohci_it_dmabuf *)itc->itc_buf_start;
6083 itc->itc_buf_start = newstartbuf;
6084 while (itd != newstartbuf) {
6085 itc->itc_outpkt += itd->itd_npacket;
6086 fwohci_itd_unlink(itd);
6087 INC_BUF(itc, itd);
6088 --itc->itc_buf_cnt;
6089 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt));
6090 }
6091
6092 #ifdef DEBUG_USERADD
6093 if (scratchval != 0xffff) {
6094 printf("fwohci0: intr start %d dataend %d %d\n", scratchval,
6095 itc->itc_buf_end->itd_num, itc->itc_outpkt);
6096 }
6097 #endif
6098
6099 if (scratchval == 0xffff) {
6100 /* no data supplied */
6101 printf("fwohci_it_intr: no it data. output total %d\n",
6102 itc->itc_outpkt);
6103
6104 if (itc->itc_buf_cnt > 0) {
6105 printf("fwohci_it_intr: it DMA stops "
6106 "w/ valid databuf %d buf %d data %d"
6107 " intr reg 0x%08x\n",
6108 itc->itc_buf_cnt,
6109 itc->itc_buf_end->itd_num,
6110 fwohci_itd_hasdata(itc->itc_buf_end),
6111 OHCI_CSR_READ(sc, OHCI_REG_IntEventSet));
6112 } else {
6113 /* All the data gone */
6114 itc->itc_buf_start
6115 = itc->itc_buf_end
6116 = itc->itc_buf_linkend
6117 = &itc->itc_buf[0];
6118 printf("fwohci_it_intr: all packets gone\n");
6119 }
6120
6121 itc->itc_flags &= ~ITC_FLAGS_RUN;
6122
6123 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num,
6124 OHCI_SUBREG_ContextControlClear, 0xffffffff);
6125 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num,
6126 OHCI_SUBREG_CommandPtr, 0);
6127 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num,
6128 OHCI_SUBREG_ContextControlClear, 0x1f);
6129
6130 /* send message */
6131 if (itc->itc_waitchan != NULL) {
6132 wakeup((void *)itc->itc_waitchan);
6133 }
6134
6135 return;
6136 }
6137
6138 #if 0
6139 /* unlink old buffers */
6140 newstartbuf = &itc->itc_buf[scratchval];
6141
6142 itd = (struct fwohci_it_dmabuf *)itc->itc_buf_start;
6143 itc->itc_buf_start = newstartbuf;
6144 while (itd != newstartbuf) {
6145 itc->itc_outpkt += itd->itd_npacket;
6146 fwohci_itd_unlink(itd);
6147 INC_BUF(itc, itd);
6148 --itc->itc_buf_cnt;
6149 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt));
6150 }
6151 #endif
6152
6153 /* sanity check */
6154 {
6155 int startidx, endidx, linkendidx;
6156
6157 startidx = itc->itc_buf_start->itd_num;
6158 endidx = itc->itc_buf_end->itd_num;
6159 linkendidx = itc->itc_buf_linkend->itd_num;
6160
6161 if (startidx < endidx) {
6162 if (linkendidx < startidx
6163 || endidx < linkendidx) {
6164 printf("funny, linkend is not between start "
6165 "and end [%d, %d]: %d\n",
6166 startidx, endidx, linkendidx);
6167 }
6168 } else if (startidx > endidx) {
6169 if (linkendidx < startidx
6170 && endidx < linkendidx) {
6171 printf("funny, linkend is not between start "
6172 "and end [%d, %d]: %d\n",
6173 startidx, endidx, linkendidx);
6174 }
6175 } else {
6176 if (linkendidx != startidx) {
6177 printf("funny, linkend is not between start "
6178 "and end [%d, %d]: %d\n",
6179 startidx, endidx, linkendidx);
6180 }
6181
6182 }
6183 }
6184
6185 /* link if some valid DMA buffers exist */
6186 if (itc->itc_buf_cnt > 1
6187 && itc->itc_buf_linkend != itc->itc_buf_end) {
6188 struct fwohci_it_dmabuf *itdprev;
6189 int i;
6190
6191 DPRINTF(("CTX %d: start linkend dataend bufs %d, %d, %d, %d\n",
6192 itc->itc_num,
6193 itc->itc_buf_start->itd_num,
6194 itc->itc_buf_linkend->itd_num,
6195 itc->itc_buf_end->itd_num,
6196 itc->itc_buf_cnt));
6197
6198 itd = itdprev = itc->itc_buf_linkend;
6199 INC_BUF(itc, itd);
6200
6201 #if 0
6202 if (fwohci_itd_isfilled(itd) || itc->itc_buf_cnt == 2) {
6203 while (itdprev != itc->itc_buf_end) {
6204
6205 if (fwohci_itd_link(itdprev, itd)) {
6206 break;
6207 }
6208
6209 itdprev = itd;
6210 INC_BUF(itc, itd);
6211 }
6212 itc->itc_buf_linkend = itdprev;
6213 }
6214 #endif
6215 i = 0;
6216 while (itdprev != itc->itc_buf_end) {
6217 if (!fwohci_itd_isfilled(itd) && itc->itc_buf_cnt > 2) {
6218 break;
6219 }
6220
6221 if (fwohci_itd_link(itdprev, itd)) {
6222 break;
6223 }
6224
6225 itdprev = itd;
6226 INC_BUF(itc, itd);
6227
6228 itc->itc_buf_linkend = itdprev;
6229 ++i;
6230 }
6231
6232 if (i > 0) {
6233 DPRINTF(("CTX %d: start linkend dataend bufs %d, %d, %d, %d\n",
6234 itc->itc_num,
6235 itc->itc_buf_start->itd_num,
6236 itc->itc_buf_linkend->itd_num,
6237 itc->itc_buf_end->itd_num,
6238 itc->itc_buf_cnt));
6239 }
6240 } else {
6241 struct fwohci_it_dmabuf *le;
6242
6243 le = itc->itc_buf_linkend;
6244
6245 printf("CTX %d: start linkend dataend bufs %d, %d, %d, %d no buffer added\n",
6246 itc->itc_num,
6247 itc->itc_buf_start->itd_num,
6248 itc->itc_buf_linkend->itd_num,
6249 itc->itc_buf_end->itd_num,
6250 itc->itc_buf_cnt);
6251 printf("\tlast descriptor %s %04x %08x\n",
6252 le->itd_lastdesc->fd_flags & OHCI_DESC_INTR_ALWAYS ? "intr" : "",
6253 le->itd_lastdesc->fd_flags,
6254 le->itd_lastdesc->fd_branch);
6255 }
6256
6257 /* send message */
6258 if (itc->itc_waitchan != NULL) {
6259 /* */
6260 wakeup((void *)itc->itc_waitchan);
6261 }
6262 }
6263
6264
6265
6266 /*
6267 * int fwohci_itd_construct(struct fwohci_it_ctx *itc,
6268 * struct fwohci_it_dmabuf *itd, int num,
6269 * struct fwohci_desc *desc, bus_addr_t phys,
6270 * int descsize, int maxsize, paddr_t scratch)
6271 *
6272 *
6273 *
6274 */
6275 int
6276 fwohci_itd_construct(struct fwohci_it_ctx *itc, struct fwohci_it_dmabuf *itd,
6277 int num, struct fwohci_desc *desc, bus_addr_t phys, int descsize,
6278 int maxsize, paddr_t scratch)
6279 {
6280 const char *xname = itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
6281 struct fwohci_desc *fd;
6282 struct fwohci_desc *descend;
6283 int npkt;
6284 int bufno = 0; /* DMA segment */
6285 bus_size_t bufused = 0; /* offset in a DMA segment */
6286 int roundsize;
6287 int tag = itc->itc_tag;
6288 int ch = itc->itc_channel;
6289
6290 itd->itd_ctx = itc;
6291 itd->itd_num = num;
6292
6293 if (descsize > 1024*3) {
6294 printf("%s: fwohci_itd_construct[%d] descsize %d too big\n",
6295 xname, num, descsize);
6296 return -1;
6297 }
6298
6299 itd->itd_desc = desc;
6300 itd->itd_descsize = descsize;
6301 itd->itd_desc_phys = phys;
6302
6303 itd->itd_lastdesc = desc;
6304 itd->itd_npacket = 0;
6305
6306 printf("%s: fwohci_itd_construct[%d] desc %p descsize %d, maxsize %d\n",
6307 xname, itd->itd_num, itd->itd_desc, itd->itd_descsize, maxsize);
6308
6309 if (descsize < 4) {
6310 /* too small descriptor array. at least 4 */
6311 return -1;
6312 }
6313
6314 /* count up how many packet can handle */
6315 itd->itd_maxpacket = (descsize - 1)/3;
6316
6317 /* rounding up to power of 2. minimum 16 */
6318 roundsize = 16;
6319 for (roundsize = 16; roundsize < maxsize; roundsize <<= 1);
6320 itd->itd_maxsize = roundsize;
6321
6322 printf("\t\tdesc%d [%x, %lx]\n", itd->itd_num,
6323 (u_int32_t)phys,
6324 (unsigned long)((u_int32_t)phys
6325 + (itd->itd_maxpacket*3 + 1)*sizeof(struct fwohci_desc)));
6326 printf("%s: fwohci_itd_construct[%d] npkt %d maxsize round up to %d\n",
6327 xname, itd->itd_num, itd->itd_maxpacket, itd->itd_maxsize);
6328
6329 /* obtain DMA buffer */
6330 if (fwohci_itd_dmabuf_alloc(itd)) {
6331 /* cannot allocate memory for DMA buffer */
6332 return -1;
6333 }
6334
6335 /*
6336 * make descriptor chain
6337 *
6338 * First descriptor group has a STORE_VALUE, OUTPUT_IMMEDIATE
6339 * and OUTPUT_LAST descriptors Second and after that, a
6340 * descriptor group has an OUTPUT_IMMEDIATE and an OUTPUT_LAST
6341 * descriptor.
6342 */
6343 descend = desc + descsize;
6344
6345 /* set store value descriptor for 1st descriptor group */
6346 desc->fd_flags = OHCI_DESC_STORE_VALUE;
6347 desc->fd_reqcount = num; /* write number of DMA buffer class */
6348 desc->fd_data = scratch; /* at physical memory 'scratch' */
6349 desc->fd_branch = 0;
6350 desc->fd_status = desc->fd_rescount = 0;
6351
6352 itd->itd_store = desc;
6353 itd->itd_store_phys = phys;
6354
6355 ++desc;
6356 phys += 16;
6357
6358 npkt = 0;
6359 /* make OUTPUT_DESC chain for packets */
6360 for (fd = desc; fd + 2 < descend; fd += 3, ++npkt) {
6361 struct fwohci_desc *fi = fd;
6362 struct fwohci_desc *fl = fd + 2;
6363 u_int32_t *fi_data = (u_int32_t *)(fd + 1);
6364
6365 #if 0
6366 if (npkt > itd->itd_maxpacket - 3) {
6367 printf("%s: %3d fi fl %p %p\n", xname, npkt, fi,fl);
6368 }
6369 #endif
6370
6371 fi->fd_reqcount = 8; /* data size for OHCI command */
6372 fi->fd_flags = OHCI_DESC_IMMED;
6373 fi->fd_data = 0;
6374 fi->fd_branch = 0; /* branch for error */
6375 fi->fd_status = fi->fd_rescount = 0;
6376
6377 /* channel and tag is unchanged */
6378 *fi_data = OHCI_ITHEADER_VAL(TAG, tag) |
6379 OHCI_ITHEADER_VAL(CHAN, ch) |
6380 OHCI_ITHEADER_VAL(TCODE, IEEE1394_TCODE_STREAM_DATA);
6381 *++fi_data = 0;
6382 *++fi_data = 0;
6383 *++fi_data = 0;
6384
6385 fl->fd_flags = OHCI_DESC_OUTPUT | OHCI_DESC_LAST |
6386 OHCI_DESC_BRANCH;
6387 fl->fd_branch =
6388 (phys + sizeof(struct fwohci_desc)*(npkt + 1)*3) | 0x03;
6389 fl->fd_status = fl->fd_rescount = 0;
6390
6391 #ifdef FW_DEBUG
6392 if (npkt > itd->itd_maxpacket - 3) {
6393 DPRINTF(("%s: %3d fi fl fl branch %p %p 0x%x\n",
6394 xname, npkt, fi, fl, (int)fl->fd_branch));
6395 }
6396 #endif
6397
6398 /* physical addr to data? */
6399 fl->fd_data =
6400 (u_int32_t)((itd->itd_seg[bufno].ds_addr + bufused));
6401 bufused += itd->itd_maxsize;
6402 if (bufused > itd->itd_seg[bufno].ds_len) {
6403 bufused = 0;
6404 if (++bufno == itd->itd_nsegs) {
6405 /* fail */
6406 break;
6407 }
6408 }
6409 }
6410
6411 #if 0
6412 if (itd->itd_num == 0) {
6413 u_int32_t *fdp;
6414 u_int32_t adr;
6415 int i = 0;
6416
6417 fdp = (u_int32_t *)itd->itd_desc;
6418 adr = (u_int32_t)itd->itd_desc_phys; /* XXX */
6419
6420 printf("fwohci_itd_construct: audit DMA desc chain. %d\n",
6421 itd->itd_maxpacket);
6422 for (i = 0; i < itd->itd_maxpacket*12 + 4; ++i) {
6423 if (i % 4 == 0) {
6424 printf("\n%x:", adr + 4*i);
6425 }
6426 printf(" %08x", fdp[i]);
6427 }
6428 printf("\n");
6429
6430 }
6431 #endif
6432 /* last branch should be 0 */
6433 --fd;
6434 fd->fd_branch = 0;
6435
6436 printf("%s: pkt %d %d maxdesc %p\n",
6437 xname, npkt, itd->itd_maxpacket, descend);
6438
6439 return 0;
6440 }
6441
6442 void
6443 fwohci_itd_destruct(struct fwohci_it_dmabuf *itd)
6444 {
6445 const char *xname = itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
6446
6447 printf("%s: fwohci_itd_destruct %d\n", xname, itd->itd_num);
6448
6449 fwohci_itd_dmabuf_free(itd);
6450 }
6451
6452
6453 /*
6454 * static int fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *itd)
6455 *
6456 * This function allocates DMA memory for fwohci_it_dmabuf. This
6457 * function will return 0 when it succeeds and return non-zero
6458 * value when it fails.
6459 */
6460 static int
6461 fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *itd)
6462 {
6463 const char *xname = itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
6464 bus_dma_tag_t dmat = itd->itd_ctx->itc_sc->sc_dmat;
6465
6466 int dmasize = itd->itd_maxsize * itd->itd_maxpacket;
6467 int error;
6468
6469 DPRINTF(("%s: fwohci_itd_dmabuf_alloc[%d] dmasize %d maxpkt %d\n",
6470 xname, itd->itd_num, dmasize, itd->itd_maxpacket));
6471
6472 if ((error = bus_dmamem_alloc(dmat, dmasize, PAGE_SIZE, 0,
6473 itd->itd_seg, FWOHCI_MAX_ITDATASEG, &itd->itd_nsegs, 0)) != 0) {
6474 printf("%s: unable to allocate data buffer, error = %d\n",
6475 xname, error);
6476 goto fail_0;
6477 }
6478
6479 /* checking memory range */
6480 #ifdef FW_DEBUG
6481 {
6482 int loop;
6483
6484 for (loop = 0; loop < itd->itd_nsegs; ++loop) {
6485 DPRINTF(("\t%.2d: 0x%lx - 0x%lx\n", loop,
6486 (long)itd->itd_seg[loop].ds_addr,
6487 (long)itd->itd_seg[loop].ds_addr
6488 + (long)itd->itd_seg[loop].ds_len - 1));
6489 }
6490 }
6491 #endif
6492
6493 if ((error = bus_dmamem_map(dmat, itd->itd_seg, itd->itd_nsegs,
6494 dmasize, (caddr_t *)&itd->itd_buf,
6495 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) {
6496 printf("%s: unable to map data buffer, error = %d\n",
6497 xname, error);
6498 goto fail_1;
6499 }
6500
6501 DPRINTF(("fwohci_it_data_alloc[%d]: bus_dmamem_map addr %p\n",
6502 itd->itd_num, itd->itd_buf));
6503
6504 if ((error = bus_dmamap_create(dmat, /*chunklen*/dmasize,
6505 itd->itd_nsegs, dmasize, 0, BUS_DMA_WAITOK,
6506 &itd->itd_dmamap)) != 0) {
6507 printf("%s: unable to create data buffer DMA map, "
6508 "error = %d\n", xname, error);
6509 goto fail_2;
6510 }
6511
6512 DPRINTF(("fwohci_it_data_alloc: bus_dmamem_create\n"));
6513
6514 if ((error = bus_dmamap_load(dmat, itd->itd_dmamap,
6515 itd->itd_buf, dmasize, NULL, BUS_DMA_WAITOK)) != 0) {
6516 printf("%s: unable to load data buffer DMA map, error = %d\n",
6517 xname, error);
6518 goto fail_3;
6519 }
6520
6521 DPRINTF(("fwohci_itd_dmabuf_alloc: load DMA memory vm %p\n",
6522 itd->itd_buf));
6523 DPRINTF(("\tmapsize %ld nsegs %d\n",
6524 (long)itd->itd_dmamap->dm_mapsize, itd->itd_dmamap->dm_nsegs));
6525
6526 #ifdef FW_DEBUG
6527 {
6528 int loop;
6529
6530 for (loop = 0; loop < itd->itd_dmamap->dm_nsegs; ++loop) {
6531 DPRINTF(("\t%.2d: 0x%lx - 0x%lx\n", loop,
6532 (long)itd->itd_dmamap->dm_segs[loop].ds_addr,
6533 (long)itd->itd_dmamap->dm_segs[loop].ds_addr +
6534 (long)itd->itd_dmamap->dm_segs[loop].ds_len - 1));
6535 }
6536 }
6537 #endif
6538
6539 return 0;
6540
6541 fail_3:
6542 bus_dmamap_destroy(dmat, itd->itd_dmamap);
6543 fail_2:
6544 bus_dmamem_unmap(dmat, (caddr_t)itd->itd_buf, dmasize);
6545 fail_1:
6546 bus_dmamem_free(dmat, itd->itd_seg, itd->itd_nsegs);
6547 fail_0:
6548 itd->itd_nsegs = 0;
6549 itd->itd_maxpacket = 0;
6550 return error;
6551 }
6552
6553 /*
6554 * static void fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *itd)
6555 *
6556 * This function will release memory resource allocated by
6557 * fwohci_itd_dmabuf_alloc().
6558 */
6559 static void
6560 fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *itd)
6561 {
6562 bus_dma_tag_t dmat = itd->itd_ctx->itc_sc->sc_dmat;
6563 int dmasize = itd->itd_maxsize * itd->itd_maxpacket;
6564
6565 bus_dmamap_destroy(dmat, itd->itd_dmamap);
6566 bus_dmamem_unmap(dmat, (caddr_t)itd->itd_buf, dmasize);
6567 bus_dmamem_free(dmat, itd->itd_seg, itd->itd_nsegs);
6568
6569 itd->itd_nsegs = 0;
6570 itd->itd_maxpacket = 0;
6571 }
6572
6573
6574
6575 /*
6576 * int fwohci_itd_link(struct fwohci_it_dmabuf *itd,
6577 * struct fwohci_it_dmabuf *itdc)
6578 *
6579 * This function will concatinate two descriptor chains in dmabuf
6580 * itd and itdc. The descriptor link in itdc follows one in itd.
6581 * This function will move interrrupt packet from the end of itd
6582 * to the top of itdc.
6583 *
6584 * This function will return 0 whel this funcion suceeds. If an
6585 * error happens, return a negative value.
6586 */
6587 int
6588 fwohci_itd_link(struct fwohci_it_dmabuf *itd, struct fwohci_it_dmabuf *itdc)
6589 {
6590 struct fwohci_desc *fd1, *fdc;
6591
6592 if (itdc->itd_lastdesc == itdc->itd_desc) {
6593 /* no valid data */
6594 printf("fwohci_itd_link: no data\n");
6595 return -1;
6596 }
6597
6598 if (itdc->itd_flags & ITD_FLAGS_LOCK) {
6599 /* used already */
6600 printf("fwohci_itd_link: link locked\n");
6601 return -1;
6602 }
6603 itdc->itd_flags |= ITD_FLAGS_LOCK;
6604 /* for the first one */
6605 itd->itd_flags |= ITD_FLAGS_LOCK;
6606
6607 DPRINTF(("linking %d after %d: add %d pkts\n",
6608 itdc->itd_num, itd->itd_num, itdc->itd_npacket));
6609
6610 /* XXX: should sync cache */
6611
6612 fd1 = itd->itd_lastdesc;
6613 fdc = itdc->itd_desc + 3; /* OUTPUT_LAST in the first descriptor */
6614
6615 /* sanity check */
6616 #define OUTPUT_LAST_DESC (OHCI_DESC_OUTPUT | OHCI_DESC_LAST | OHCI_DESC_BRANCH)
6617 if ((fd1->fd_flags & OUTPUT_LAST_DESC) != OUTPUT_LAST_DESC) {
6618 printf("funny! not OUTPUT_LAST descriptor %p\n", fd1);
6619 }
6620 if (itd->itd_lastdesc - itd->itd_desc != 3 * itd->itd_npacket) {
6621 printf("funny! packet number inconsistency %ld <=> %ld\n",
6622 (long)(itd->itd_lastdesc - itd->itd_desc),
6623 (long)(3*itd->itd_npacket));
6624 }
6625
6626 fd1->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6627 fdc->fd_flags |= OHCI_DESC_INTR_ALWAYS;
6628 fd1->fd_branch = itdc->itd_desc_phys | 4;
6629
6630 itdc->itd_lastdesc->fd_flags |= OHCI_DESC_INTR_ALWAYS;
6631 /* save branch addr of lastdesc and substitute 0 to it */
6632 itdc->itd_savedbranch = itdc->itd_lastdesc->fd_branch;
6633 itdc->itd_lastdesc->fd_branch = 0;
6634
6635 DPRINTF(("%s: link (%d %d), add pkt %d/%d branch 0x%x next saved 0x%x\n",
6636 itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname,
6637 itd->itd_num, itdc->itd_num,
6638 itdc->itd_npacket, itdc->itd_maxpacket,
6639 (int)fd1->fd_branch, (int)itdc->itd_savedbranch));
6640
6641 /* XXX: should sync cache */
6642
6643 return 0;
6644 }
6645
6646
6647 /*
6648 * int fwohci_itd_unlink(struct fwohci_it_dmabuf *itd)
6649 *
6650 * This function will unlink the descriptor chain from valid link
6651 * of descriptors. The target descriptor is specified by the
6652 * arguent.
6653 */
6654 int
6655 fwohci_itd_unlink(struct fwohci_it_dmabuf *itd)
6656 {
6657 struct fwohci_desc *fd;
6658
6659 /* XXX: should sync cache */
6660
6661 fd = itd->itd_lastdesc;
6662
6663 fd->fd_branch = itd->itd_savedbranch;
6664 DPRINTF(("%s: unlink buf %d branch restored 0x%x\n",
6665 itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname,
6666 itd->itd_num, (int)fd->fd_branch));
6667
6668 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6669 itd->itd_lastdesc = itd->itd_desc;
6670
6671 fd = itd->itd_desc + 3; /* 1st OUTPUT_LAST */
6672 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6673
6674 /* XXX: should sync cache */
6675
6676 itd->itd_npacket = 0;
6677 itd->itd_lastdesc = itd->itd_desc;
6678 itd->itd_flags &= ~ITD_FLAGS_LOCK;
6679
6680 return 0;
6681 }
6682
6683
6684 /*
6685 * static int fwohci_itd_writedata(struct fwohci_it_dmabuf *, int ndata,
6686 * struct ieee1394_it_datalist *);
6687 *
6688 * This function will return the number of written data, or
6689 * negative value if an error happens
6690 */
6691 int
6692 fwohci_itd_writedata(struct fwohci_it_dmabuf *itd, int ndata,
6693 struct ieee1394_it_datalist *itdata)
6694 {
6695 int writepkt;
6696 int i;
6697 u_int8_t *p;
6698 struct fwohci_desc *fd;
6699 u_int32_t *fd_idata;
6700 const int dspace =
6701 itd->itd_maxpacket - itd->itd_npacket < ndata ?
6702 itd->itd_maxpacket - itd->itd_npacket : ndata;
6703
6704 if (itd->itd_flags & ITD_FLAGS_LOCK || dspace == 0) {
6705 /* it is locked: cannot write anything */
6706 if (itd->itd_flags & ITD_FLAGS_LOCK) {
6707 DPRINTF(("fwohci_itd_writedata: buf %d lock flag %s,"
6708 " dspace %d\n",
6709 itd->itd_num,
6710 itd->itd_flags & ITD_FLAGS_LOCK ? "ON" : "OFF",
6711 dspace));
6712 return 0; /* not an error */
6713 }
6714 }
6715
6716 /* sanity check */
6717 if (itd->itd_maxpacket < itd->itd_npacket) {
6718 printf("fwohci_itd_writedata: funny! # pkt > maxpkt"
6719 "%d %d\n", itd->itd_npacket, itd->itd_maxpacket);
6720 }
6721
6722 p = itd->itd_buf + itd->itd_maxsize * itd->itd_npacket;
6723 fd = itd->itd_lastdesc;
6724
6725 DPRINTF(("fwohci_itd_writedata(%d[%p], %d, 0x%p) invoked:\n",
6726 itd->itd_num, itd, ndata, itdata));
6727
6728 for (writepkt = 0; writepkt < dspace; ++writepkt) {
6729 u_int8_t *p1 = p;
6730 int cpysize;
6731 int totalsize = 0;
6732
6733 DPRINTF(("writing %d ", writepkt));
6734
6735 for (i = 0; i < 4; ++i) {
6736 switch (itdata->it_cmd[i]&IEEE1394_IT_CMD_MASK) {
6737 case IEEE1394_IT_CMD_IMMED:
6738 memcpy(p1, &itdata->it_u[i].id_data, 8);
6739 p1 += 8;
6740 totalsize += 8;
6741 break;
6742 case IEEE1394_IT_CMD_PTR:
6743 cpysize = itdata->it_cmd[i]&IEEE1394_IT_CMD_SIZE;
6744 DPRINTF(("fwohci_itd_writedata: cpy %d %p\n",
6745 cpysize, itdata->it_u[i].id_addr));
6746 if (totalsize + cpysize > itd->itd_maxsize) {
6747 /* error: too big size */
6748 break;
6749 }
6750 memcpy(p1, itdata->it_u[i].id_addr, cpysize);
6751 totalsize += cpysize;
6752 break;
6753 case IEEE1394_IT_CMD_NOP:
6754 break;
6755 default:
6756 /* unknown command */
6757 break;
6758 }
6759 }
6760
6761 /* only for DV test */
6762 if (totalsize != 488) {
6763 printf("error: totalsize %d at %d\n",
6764 totalsize, writepkt);
6765 }
6766
6767 DPRINTF(("totalsize %d ", totalsize));
6768
6769 /* fill iso command in OUTPUT_IMMED descriptor */
6770
6771 /* XXX: sync cache */
6772 fd += 2; /* next to first descriptor */
6773 fd_idata = (u_int32_t *)fd;
6774
6775 /*
6776 * Umm, should tag, channel and tcode be written
6777 * previously in itd_construct?
6778 */
6779 #if 0
6780 *fd_idata = OHCI_ITHEADER_VAL(TAG, tag) |
6781 OHCI_ITHEADER_VAL(CHAN, ch) |
6782 OHCI_ITHEADER_VAL(TCODE, IEEE1394_TCODE_STREAM_DATA);
6783 #endif
6784 *++fd_idata = totalsize << 16;
6785
6786 /* fill data in OUTPUT_LAST descriptor */
6787 ++fd;
6788 /* intr check... */
6789 if (fd->fd_flags & OHCI_DESC_INTR_ALWAYS) {
6790 printf("uncleared INTR flag in desc %ld\n",
6791 (long)(fd - itd->itd_desc - 1)/3);
6792 }
6793 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6794
6795 if ((fd - itd->itd_desc - 1)/3 != itd->itd_maxpacket - 1) {
6796 u_int32_t bcal;
6797
6798 bcal = (fd - itd->itd_desc + 1)*sizeof(struct fwohci_desc) + (u_int32_t)itd->itd_desc_phys;
6799 if (bcal != (fd->fd_branch & 0xfffffff0)) {
6800
6801 printf("uum, branch differ at %d, %x %x %ld/%d\n",
6802 itd->itd_num,
6803 bcal,
6804 fd->fd_branch,
6805 (long)((fd - itd->itd_desc - 1)/3),
6806 itd->itd_maxpacket);
6807 }
6808 } else {
6809 /* the last pcaket */
6810 if (fd->fd_branch != 0) {
6811 printf("uum, branch differ at %d, %x %x %ld/%d\n",
6812 itd->itd_num,
6813 0,
6814 fd->fd_branch,
6815 (long)((fd - itd->itd_desc - 1)/3),
6816 itd->itd_maxpacket);
6817 }
6818 }
6819
6820 /* sanity check */
6821 if (fd->fd_flags != OUTPUT_LAST_DESC) {
6822 printf("fwohci_itd_writedata: dmabuf %d desc inconsistent %d\n",
6823 itd->itd_num, writepkt + itd->itd_npacket);
6824 break;
6825 }
6826 fd->fd_reqcount = totalsize;
6827 /* XXX: sync cache */
6828
6829 ++itdata;
6830 p += itd->itd_maxsize;
6831 }
6832
6833 DPRINTF(("loop start %d, %d times %d\n",
6834 itd->itd_npacket, dspace, writepkt));
6835
6836 itd->itd_npacket += writepkt;
6837 itd->itd_lastdesc = fd;
6838
6839 return writepkt;
6840 }
6841
6842
6843
6844
6845
6846 int
6847 fwohci_itd_isfilled(struct fwohci_it_dmabuf *itd)
6848 {
6849
6850 return itd->itd_npacket*2 > itd->itd_maxpacket ? 1 : 0;
6851 }
6852