fwohci.c revision 1.16.2.14 1 /* $NetBSD: fwohci.c,v 1.16.2.14 2002/12/19 00:48:06 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * IEEE1394 Open Host Controller Interface
41 * based on OHCI Specification 1.1 (January 6, 2000)
42 * The first version to support network interface part is wrtten by
43 * Atsushi Onoe <onoe (at) netbsd.org>.
44 */
45
46 /*
47 * The first version to support isochronous acquisition part is wrtten
48 * by HAYAKAWA Koichi <haya (at) netbsd.org>.
49 */
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: fwohci.c,v 1.16.2.14 2002/12/19 00:48:06 thorpej Exp $");
53
54 #define FWOHCI_WAIT_DEBUG 1
55
56 #define FWOHCI_IT_BUFNUM 4
57
58 #include "opt_inet.h"
59 #include "fwiso.h"
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kthread.h>
64 #include <sys/socket.h>
65 #include <sys/callout.h>
66 #include <sys/device.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/mbuf.h>
70 #include <sys/poll.h>
71 #include <sys/select.h>
72
73 #if __NetBSD_Version__ >= 105010000
74 #include <uvm/uvm_extern.h>
75 #else
76 #include <vm/vm.h>
77 #endif
78
79 #include <machine/bus.h>
80 #include <machine/intr.h>
81
82 #include <dev/ieee1394/ieee1394reg.h>
83 #include <dev/ieee1394/fwohcireg.h>
84
85 #include <dev/ieee1394/ieee1394var.h>
86 #include <dev/ieee1394/fwohcivar.h>
87 #include <dev/ieee1394/fwisovar.h>
88
89 static const char * const ieee1394_speeds[] = { IEEE1394_SPD_STRINGS };
90
91 #if 0
92 static int fwohci_dnamem_alloc(struct fwohci_softc *sc, int size,
93 int alignment, bus_dmamap_t *mapp, caddr_t *kvap, int flags);
94 #endif
95 static void fwohci_create_event_thread(void *);
96 static void fwohci_thread_init(void *);
97
98 static void fwohci_event_thread(struct fwohci_softc *);
99 static void fwohci_hw_init(struct fwohci_softc *);
100 static void fwohci_power(int, void *);
101 static void fwohci_shutdown(void *);
102
103 static int fwohci_desc_alloc(struct fwohci_softc *);
104 static struct fwohci_desc *fwohci_desc_get(struct fwohci_softc *, int);
105 static void fwohci_desc_put(struct fwohci_softc *, struct fwohci_desc *, int);
106
107 static int fwohci_ctx_alloc(struct fwohci_softc *, struct fwohci_ctx **,
108 int, int, int);
109 static void fwohci_ctx_free(struct fwohci_softc *, struct fwohci_ctx *);
110 static void fwohci_ctx_init(struct fwohci_softc *, struct fwohci_ctx *);
111
112 static int fwohci_misc_dmabuf_alloc(bus_dma_tag_t, int, int,
113 bus_dma_segment_t *, bus_dmamap_t *, void **, const char *);
114 static void fwohci_misc_dmabuf_free(bus_dma_tag_t, int, int,
115 bus_dma_segment_t *, bus_dmamap_t *, caddr_t);
116
117 static struct fwohci_ir_ctx *fwohci_ir_ctx_construct(struct fwohci_softc *,
118 int, int, int, int, int, int);
119 static void fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *);
120
121 static int fwohci_ir_buf_setup(struct fwohci_ir_ctx *);
122 static int fwohci_ir_init(struct fwohci_ir_ctx *);
123 static int fwohci_ir_start(struct fwohci_ir_ctx *);
124 static void fwohci_ir_intr(struct fwohci_softc *, struct fwohci_ir_ctx *);
125 static int fwohci_ir_stop(struct fwohci_ir_ctx *);
126 static int fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *);
127 #ifdef USEDRAIN
128 static int fwohci_ir_ctx_drain(struct fwohci_ir_ctx *);
129 #endif /* USEDRAIN */
130
131 static int fwohci_it_desc_alloc(struct fwohci_it_ctx *);
132 static void fwohci_it_desc_free(struct fwohci_it_ctx *itc);
133 struct fwohci_it_ctx *fwohci_it_ctx_construct(struct fwohci_softc *,
134 int, int, int, int);
135 void fwohci_it_ctx_destruct(struct fwohci_it_ctx *);
136 int fwohci_it_ctx_writedata(ieee1394_it_tag_t, int,
137 struct ieee1394_it_datalist *, int);
138 static void fwohci_it_ctx_run(struct fwohci_it_ctx *);
139 int fwohci_it_ctx_flush(ieee1394_it_tag_t);
140 static void fwohci_it_intr(struct fwohci_softc *, struct fwohci_it_ctx *);
141
142 int fwohci_itd_construct(struct fwohci_it_ctx *, struct fwohci_it_dmabuf *,
143 int, struct fwohci_desc *, bus_addr_t, int, int, paddr_t);
144 void fwohci_itd_destruct(struct fwohci_it_dmabuf *);
145 static int fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *);
146 static void fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *);
147 int fwohci_itd_link(struct fwohci_it_dmabuf *, struct fwohci_it_dmabuf *);
148 int fwohci_itd_unlink(struct fwohci_it_dmabuf *);
149 int fwohci_itd_writedata(struct fwohci_it_dmabuf *, int,
150 struct ieee1394_it_datalist *);
151 int fwohci_itd_isfilled(struct fwohci_it_dmabuf *);
152
153 static int fwohci_buf_alloc(struct fwohci_softc *, struct fwohci_buf *);
154 static void fwohci_buf_free(struct fwohci_softc *, struct fwohci_buf *);
155 static void fwohci_buf_init_rx(struct fwohci_softc *);
156 static void fwohci_buf_start_rx(struct fwohci_softc *);
157 static void fwohci_buf_stop_tx(struct fwohci_softc *);
158 static void fwohci_buf_stop_rx(struct fwohci_softc *);
159 static void fwohci_buf_next(struct fwohci_softc *, struct fwohci_ctx *);
160 static int fwohci_buf_pktget(struct fwohci_softc *, struct fwohci_buf **,
161 caddr_t *, int);
162 static int fwohci_buf_input(struct fwohci_softc *, struct fwohci_ctx *,
163 struct fwohci_pkt *);
164 static int fwohci_buf_input_ppb(struct fwohci_softc *, struct fwohci_ctx *,
165 struct fwohci_pkt *);
166
167 static u_int8_t fwohci_phy_read(struct fwohci_softc *, u_int8_t);
168 static void fwohci_phy_write(struct fwohci_softc *, u_int8_t, u_int8_t);
169 static void fwohci_phy_busreset(struct fwohci_softc *);
170 static void fwohci_phy_input(struct fwohci_softc *, struct fwohci_pkt *);
171
172 static int fwohci_handler_set(struct fwohci_softc *, int, u_int32_t, u_int32_t,
173 u_int32_t, int (*)(struct fwohci_softc *, void *, struct fwohci_pkt *),
174 void *);
175
176 ieee1394_ir_tag_t fwohci_ir_ctx_set(struct device *, int, int, int, int, int);
177 int fwohci_ir_ctx_clear(struct device *, ieee1394_ir_tag_t);
178 int fwohci_ir_read(struct device *, ieee1394_ir_tag_t, struct uio *,
179 int, int);
180 int fwohci_ir_wait(struct device *, ieee1394_ir_tag_t, void *, char *name);
181 int fwohci_ir_select(struct device *, ieee1394_ir_tag_t, struct proc *);
182
183
184
185 ieee1394_it_tag_t fwohci_it_set(struct ieee1394_softc *, int, int);
186 static ieee1394_it_tag_t fwohci_it_ctx_set(struct fwohci_softc *, int, int, int);
187 int fwohci_it_ctx_clear(ieee1394_it_tag_t *);
188
189 static void fwohci_arrq_input(struct fwohci_softc *, struct fwohci_ctx *);
190 static void fwohci_arrs_input(struct fwohci_softc *, struct fwohci_ctx *);
191 static void fwohci_as_input(struct fwohci_softc *, struct fwohci_ctx *);
192
193 static int fwohci_at_output(struct fwohci_softc *, struct fwohci_ctx *,
194 struct fwohci_pkt *);
195 static void fwohci_at_done(struct fwohci_softc *, struct fwohci_ctx *, int);
196 static void fwohci_atrs_output(struct fwohci_softc *, int, struct fwohci_pkt *,
197 struct fwohci_pkt *);
198
199 static int fwohci_guidrom_init(struct fwohci_softc *);
200 static void fwohci_configrom_init(struct fwohci_softc *);
201 static int fwohci_configrom_input(struct fwohci_softc *, void *,
202 struct fwohci_pkt *);
203 static void fwohci_selfid_init(struct fwohci_softc *);
204 static int fwohci_selfid_input(struct fwohci_softc *);
205
206 static void fwohci_csr_init(struct fwohci_softc *);
207 static int fwohci_csr_input(struct fwohci_softc *, void *,
208 struct fwohci_pkt *);
209
210 static void fwohci_uid_collect(struct fwohci_softc *);
211 static void fwohci_uid_req(struct fwohci_softc *, int);
212 static int fwohci_uid_input(struct fwohci_softc *, void *,
213 struct fwohci_pkt *);
214 static int fwohci_uid_lookup(struct fwohci_softc *, const u_int8_t *);
215 static void fwohci_check_nodes(struct fwohci_softc *);
216
217 static int fwohci_if_inreg(struct device *, u_int32_t, u_int32_t,
218 void (*)(struct device *, struct mbuf *));
219 static int fwohci_if_input(struct fwohci_softc *, void *, struct fwohci_pkt *);
220 static int fwohci_if_input_iso(struct fwohci_softc *, void *, struct fwohci_pkt *);
221
222 static int fwohci_if_output(struct device *, struct mbuf *,
223 void (*)(struct device *, struct mbuf *));
224 static int fwohci_if_setiso(struct device *, u_int32_t, u_int32_t, u_int32_t,
225 void (*)(struct device *, struct mbuf *));
226 static int fwohci_read(struct ieee1394_abuf *);
227 static int fwohci_write(struct ieee1394_abuf *);
228 static int fwohci_read_resp(struct fwohci_softc *, void *, struct fwohci_pkt *);
229 static int fwohci_write_ack(struct fwohci_softc *, void *, struct fwohci_pkt *);
230 static int fwohci_read_multi_resp(struct fwohci_softc *, void *,
231 struct fwohci_pkt *);
232 static int fwohci_inreg(struct ieee1394_abuf *, int);
233 static int fwohci_unreg(struct ieee1394_abuf *, int);
234 static int fwohci_parse_input(struct fwohci_softc *, void *,
235 struct fwohci_pkt *);
236 static int fwohci_submatch(struct device *, struct cfdata *, void *);
237
238 /* XXX */
239 u_int16_t fwohci_cycletimer(struct fwohci_softc *);
240 u_int16_t fwohci_it_cycletimer(ieee1394_it_tag_t);
241
242 #ifdef FW_DEBUG
243 static void fwohci_show_intr(struct fwohci_softc *, u_int32_t);
244 static void fwohci_show_phypkt(struct fwohci_softc *, u_int32_t);
245
246 /* 1 is normal debug, 2 is verbose debug, 3 is complete (packet dumps). */
247
248 #define DPRINTF(x) if (fwdebug) printf x
249 #define DPRINTFN(n,x) if (fwdebug>(n)) printf x
250 int fwdebug = 1;
251 #else
252 #define DPRINTF(x)
253 #define DPRINTFN(n,x)
254 #endif
255
256 #define OHCI_ITHEADER_SPD_MASK 0x00070000
257 #define OHCI_ITHEADER_SPD_BITPOS 16
258 #define OHCI_ITHEADER_TAG_MASK 0x0000c000
259 #define OHCI_ITHEADER_TAG_BITPOS 14
260 #define OHCI_ITHEADER_CHAN_MASK 0x00003f00
261 #define OHCI_ITHEADER_CHAN_BITPOS 8
262 #define OHCI_ITHEADER_TCODE_MASK 0x000000f0
263 #define OHCI_ITHEADER_TCODE_BITPOS 4
264 #define OHCI_ITHEADER_SY_MASK 0x0000000f
265 #define OHCI_ITHEADER_SY_BITPOS 0
266
267 #define OHCI_ITHEADER_VAL(fld, val) \
268 (OHCI_ITHEADER_##fld##_MASK & ((val) << OHCI_ITHEADER_##fld##_BITPOS))
269
270 int
271 fwohci_init(struct fwohci_softc *sc, const struct evcnt *ev)
272 {
273 int i;
274 u_int32_t val;
275 #if 0
276 int error;
277 #endif
278
279 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ev,
280 sc->sc_sc1394.sc1394_dev.dv_xname, "intr");
281
282 evcnt_attach_dynamic(&sc->sc_isocnt, EVCNT_TYPE_MISC, ev,
283 sc->sc_sc1394.sc1394_dev.dv_xname, "isorcvs");
284 evcnt_attach_dynamic(&sc->sc_ascnt, EVCNT_TYPE_MISC, ev,
285 sc->sc_sc1394.sc1394_dev.dv_xname, "asrcvs");
286 evcnt_attach_dynamic(&sc->sc_itintrcnt, EVCNT_TYPE_INTR, ev,
287 sc->sc_sc1394.sc1394_dev.dv_xname, "itintr");
288
289 /*
290 * Wait for reset completion
291 */
292 for (i = 0; i < OHCI_LOOP; i++) {
293 val = OHCI_CSR_READ(sc, OHCI_REG_HCControlClear);
294 if ((val & OHCI_HCControl_SoftReset) == 0)
295 break;
296 DELAY(10);
297 }
298
299 /* What dialect of OHCI is this device?
300 */
301 val = OHCI_CSR_READ(sc, OHCI_REG_Version);
302 printf("%s: OHCI %u.%u", sc->sc_sc1394.sc1394_dev.dv_xname,
303 OHCI_Version_GET_Version(val), OHCI_Version_GET_Revision(val));
304
305 LIST_INIT(&sc->sc_nodelist);
306
307 if (fwohci_guidrom_init(sc) != 0) {
308 printf("\n%s: fatal: no global UID ROM\n",
309 sc->sc_sc1394.sc1394_dev.dv_xname);
310 return -1;
311 }
312
313 printf(", %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
314 sc->sc_sc1394.sc1394_guid[0], sc->sc_sc1394.sc1394_guid[1],
315 sc->sc_sc1394.sc1394_guid[2], sc->sc_sc1394.sc1394_guid[3],
316 sc->sc_sc1394.sc1394_guid[4], sc->sc_sc1394.sc1394_guid[5],
317 sc->sc_sc1394.sc1394_guid[6], sc->sc_sc1394.sc1394_guid[7]);
318
319 /* Get the maximum link speed and receive size
320 */
321 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions);
322 sc->sc_sc1394.sc1394_link_speed =
323 OHCI_BITVAL(val, OHCI_BusOptions_LinkSpd);
324 if (sc->sc_sc1394.sc1394_link_speed < IEEE1394_SPD_MAX) {
325 printf(", %s",
326 ieee1394_speeds[sc->sc_sc1394.sc1394_link_speed]);
327 } else {
328 printf(", unknown speed %u", sc->sc_sc1394.sc1394_link_speed);
329 }
330
331 /* MaxRec is encoded as log2(max_rec_octets)-1
332 */
333 sc->sc_sc1394.sc1394_max_receive =
334 1 << (OHCI_BITVAL(val, OHCI_BusOptions_MaxRec) + 1);
335 printf(", %u max_rec", sc->sc_sc1394.sc1394_max_receive);
336
337 /*
338 * Count how many isochronous receive ctx we have.
339 */
340 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskSet, ~0);
341 val = OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntMaskClear);
342 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskClear, ~0);
343 for (i = 0; val != 0; val >>= 1) {
344 if (val & 0x1)
345 i++;
346 }
347 sc->sc_isoctx = i;
348 printf(", %d ir_ctx", sc->sc_isoctx);
349
350 /*
351 * Count how many isochronous transmit ctx we have.
352 */
353 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskSet, ~0);
354 val = OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntMaskClear);
355 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskClear, ~0);
356 for (i = 0; val != 0; val >>= 1) {
357 if (val & 0x1) {
358 i++;
359 OHCI_SYNC_TX_DMA_WRITE(sc, i,OHCI_SUBREG_CommandPtr,0);
360 }
361 }
362 sc->sc_itctx = i;
363
364 printf(", %d it_ctx", sc->sc_itctx);
365
366 printf("\n");
367
368 #if 0
369 error = fwohci_dnamem_alloc(sc, OHCI_CONFIG_SIZE,
370 OHCI_CONFIG_ALIGNMENT, &sc->sc_configrom_map,
371 (caddr_t *) &sc->sc_configrom, BUS_DMA_WAITOK|BUS_DMA_COHERENT);
372 return error;
373 #endif
374
375 sc->sc_dying = 0;
376 sc->sc_nodeid = 0xffff; /* invalid */
377
378 sc->sc_sc1394.sc1394_callback.sc1394_read = fwohci_read;
379 sc->sc_sc1394.sc1394_callback.sc1394_write = fwohci_write;
380 sc->sc_sc1394.sc1394_callback.sc1394_inreg = fwohci_inreg;
381 sc->sc_sc1394.sc1394_callback.sc1394_unreg = fwohci_unreg;
382
383 kthread_create(fwohci_create_event_thread, sc);
384 return 0;
385 }
386
387 static int
388 fwohci_if_setiso(struct device *self, u_int32_t channel, u_int32_t tag,
389 u_int32_t direction, void (*handler)(struct device *, struct mbuf *))
390 {
391 struct fwohci_softc *sc = (struct fwohci_softc *)self;
392 int retval;
393 int s;
394
395 if (direction == 1) {
396 return EIO;
397 }
398
399 s = splnet();
400 retval = fwohci_handler_set(sc, IEEE1394_TCODE_STREAM_DATA,
401 channel, 1 << tag, 0, fwohci_if_input_iso, handler);
402 splx(s);
403
404 if (!retval) {
405 printf("%s: dummy iso handler set\n",
406 sc->sc_sc1394.sc1394_dev.dv_xname);
407 } else {
408 printf("%s: dummy iso handler cannot set\n",
409 sc->sc_sc1394.sc1394_dev.dv_xname);
410 }
411
412 return retval;
413 }
414
415 int
416 fwohci_intr(void *arg)
417 {
418 struct fwohci_softc * const sc = arg;
419 int progress = 0;
420 u_int32_t intmask, iso;
421
422 for (;;) {
423 intmask = OHCI_CSR_READ(sc, OHCI_REG_IntEventClear);
424
425 /*
426 * On a bus reset, everything except bus reset gets
427 * cleared. That can't get cleared until the selfid
428 * phase completes (which happens outside the
429 * interrupt routines). So if just a bus reset is left
430 * in the mask and it's already in the sc_intmask,
431 * just return.
432 */
433
434 if ((intmask == 0) ||
435 (progress && (intmask == OHCI_Int_BusReset) &&
436 (sc->sc_intmask & OHCI_Int_BusReset))) {
437 if (progress)
438 wakeup(fwohci_event_thread);
439 return progress;
440 }
441 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear,
442 intmask & ~OHCI_Int_BusReset);
443 #ifdef FW_DEBUG
444 if (fwdebug > 1)
445 fwohci_show_intr(sc, intmask);
446 #endif
447
448 if (intmask & OHCI_Int_BusReset) {
449 /*
450 * According to OHCI spec 6.1.1 "busReset",
451 * All asynchronous transmit must be stopped before
452 * clearing BusReset. Moreover, the BusReset
453 * interrupt bit should not be cleared during the
454 * SelfID phase. Thus we turned off interrupt mask
455 * bit of BusReset instead until SelfID completion
456 * or SelfID timeout.
457 */
458 intmask &= OHCI_Int_SelfIDComplete;
459 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear,
460 OHCI_Int_BusReset);
461 sc->sc_intmask = OHCI_Int_BusReset;
462 }
463 sc->sc_intmask |= intmask;
464
465 if (intmask & OHCI_Int_IsochTx) {
466 int i;
467
468 iso = OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntEventClear);
469 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntEventClear, iso);
470
471 sc->sc_itintrcnt.ev_count++;
472 for (i = 0; i < sc->sc_itctx; ++i) {
473 if ((iso & (1<<i)) == 0 ||
474 sc->sc_ctx_it[i] == NULL) {
475 continue;
476 }
477
478 fwohci_it_intr(sc, sc->sc_ctx_it[i]);
479 }
480 }
481 if (intmask & OHCI_Int_IsochRx) {
482 int i;
483
484 iso = OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntEventClear);
485 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear, iso);
486
487 for (i = 0; i < sc->sc_isoctx; i++) {
488 if ((iso & (1 << i))
489 && sc->sc_ctx_ir[i] != NULL) {
490 iso &= ~(1 << i);
491 fwohci_ir_intr(sc, sc->sc_ctx_ir[i]);
492 }
493 }
494
495 if (iso == 0) {
496 sc->sc_intmask &= ~OHCI_Int_IsochRx;
497 }
498 sc->sc_iso |= iso;
499 }
500
501 if (!progress) {
502 sc->sc_intrcnt.ev_count++;
503 progress = 1;
504 }
505 }
506 }
507
508 static void
509 fwohci_create_event_thread(void *arg)
510 {
511 struct fwohci_softc *sc = arg;
512
513 if (kthread_create1(fwohci_thread_init, sc, &sc->sc_event_thread, "%s",
514 sc->sc_sc1394.sc1394_dev.dv_xname)) {
515 printf("%s: unable to create event thread\n",
516 sc->sc_sc1394.sc1394_dev.dv_xname);
517 panic("fwohci_create_event_thread");
518 }
519 }
520
521 static void
522 fwohci_thread_init(void *arg)
523 {
524 struct fwohci_softc *sc = arg;
525 int i;
526
527 /*
528 * Allocate descriptors
529 */
530 if (fwohci_desc_alloc(sc)) {
531 printf("%s: not enabling interrupts\n",
532 sc->sc_sc1394.sc1394_dev.dv_xname);
533 kthread_exit(1);
534 }
535
536 /*
537 * Enable Link Power
538 */
539
540 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LPS);
541
542 /*
543 * Allocate DMA Context
544 */
545 fwohci_ctx_alloc(sc, &sc->sc_ctx_arrq, OHCI_BUF_ARRQ_CNT,
546 OHCI_CTX_ASYNC_RX_REQUEST, FWOHCI_CTX_ASYNC);
547 fwohci_ctx_alloc(sc, &sc->sc_ctx_arrs, OHCI_BUF_ARRS_CNT,
548 OHCI_CTX_ASYNC_RX_RESPONSE, FWOHCI_CTX_ASYNC);
549 fwohci_ctx_alloc(sc, &sc->sc_ctx_atrq, 0, OHCI_CTX_ASYNC_TX_REQUEST,
550 FWOHCI_CTX_ASYNC);
551 fwohci_ctx_alloc(sc, &sc->sc_ctx_atrs, 0, OHCI_CTX_ASYNC_TX_RESPONSE,
552 FWOHCI_CTX_ASYNC);
553 sc->sc_ctx_as = malloc(sizeof(sc->sc_ctx_as[0]) * sc->sc_isoctx,
554 M_DEVBUF, M_WAITOK);
555 if (sc->sc_ctx_as == NULL) {
556 printf("no asynchronous stream\n");
557 } else {
558 for (i = 0; i < sc->sc_isoctx; i++)
559 sc->sc_ctx_as[i] = NULL;
560 }
561 sc->sc_ctx_ir = malloc(sizeof(sc->sc_ctx_ir[0]) * sc->sc_isoctx,
562 M_DEVBUF, M_WAITOK|M_ZERO);
563 sc->sc_ctx_it = malloc(sizeof(sc->sc_ctx_it[0]) * sc->sc_itctx,
564 M_DEVBUF, M_WAITOK|M_ZERO);
565
566 /*
567 * Allocate buffer for configuration ROM and SelfID buffer
568 */
569 fwohci_buf_alloc(sc, &sc->sc_buf_cnfrom);
570 fwohci_buf_alloc(sc, &sc->sc_buf_selfid);
571
572 callout_init(&sc->sc_selfid_callout);
573
574 sc->sc_sc1394.sc1394_ifinreg = fwohci_if_inreg;
575 sc->sc_sc1394.sc1394_ifoutput = fwohci_if_output;
576 sc->sc_sc1394.sc1394_ifsetiso = fwohci_if_setiso;
577
578 sc->sc_sc1394.sc1394_ir_open = fwohci_ir_ctx_set;
579 sc->sc_sc1394.sc1394_ir_close = fwohci_ir_ctx_clear;
580 sc->sc_sc1394.sc1394_ir_read = fwohci_ir_read;
581 sc->sc_sc1394.sc1394_ir_wait = fwohci_ir_wait;
582 sc->sc_sc1394.sc1394_ir_select = fwohci_ir_select;
583
584 #if 0
585 sc->sc_sc1394.sc1394_it_open = fwohci_it_open;
586 sc->sc_sc1394.sc1394_it_write = fwohci_it_write;
587 sc->sc_sc1394.sc1394_it_close = fwohci_it_close;
588 /* XXX: need fwohci_it_flush? */
589 #endif
590
591 /*
592 * establish hooks for shutdown and suspend/resume
593 */
594 sc->sc_shutdownhook = shutdownhook_establish(fwohci_shutdown, sc);
595 sc->sc_powerhook = powerhook_establish(fwohci_power, sc);
596
597 sc->sc_sc1394.sc1394_if = config_found(&sc->sc_sc1394.sc1394_dev, "fw",
598 fwohci_print);
599
600 #if NFWISO > 0
601 fwiso_register_if(&sc->sc_sc1394);
602 #endif
603
604 /* Main loop. It's not coming back normally. */
605
606 fwohci_event_thread(sc);
607
608 kthread_exit(0);
609 }
610
611 static void
612 fwohci_event_thread(struct fwohci_softc *sc)
613 {
614 int i, s;
615 u_int32_t intmask, iso;
616
617 s = splbio();
618
619 /*
620 * Initialize hardware registers.
621 */
622
623 fwohci_hw_init(sc);
624
625 /* Initial Bus Reset */
626 fwohci_phy_busreset(sc);
627 splx(s);
628
629 while (!sc->sc_dying) {
630 s = splbio();
631 intmask = sc->sc_intmask;
632 if (intmask == 0) {
633 tsleep(fwohci_event_thread, PZERO, "fwohciev", 0);
634 splx(s);
635 continue;
636 }
637 sc->sc_intmask = 0;
638 splx(s);
639
640 if (intmask & OHCI_Int_BusReset) {
641 fwohci_buf_stop_tx(sc);
642 if (sc->sc_uidtbl != NULL) {
643 free(sc->sc_uidtbl, M_DEVBUF);
644 sc->sc_uidtbl = NULL;
645 }
646
647 callout_reset(&sc->sc_selfid_callout,
648 OHCI_SELFID_TIMEOUT,
649 (void (*)(void *))fwohci_phy_busreset, sc);
650 sc->sc_nodeid = 0xffff; /* indicate invalid */
651 sc->sc_rootid = 0;
652 sc->sc_irmid = IEEE1394_BCAST_PHY_ID;
653 }
654 if (intmask & OHCI_Int_SelfIDComplete) {
655 s = splbio();
656 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear,
657 OHCI_Int_BusReset);
658 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet,
659 OHCI_Int_BusReset);
660 splx(s);
661 callout_stop(&sc->sc_selfid_callout);
662 if (fwohci_selfid_input(sc) == 0) {
663 fwohci_buf_start_rx(sc);
664 fwohci_uid_collect(sc);
665 }
666 }
667 if (intmask & OHCI_Int_ReqTxComplete)
668 fwohci_at_done(sc, sc->sc_ctx_atrq, 0);
669 if (intmask & OHCI_Int_RespTxComplete)
670 fwohci_at_done(sc, sc->sc_ctx_atrs, 0);
671 if (intmask & OHCI_Int_RQPkt)
672 fwohci_arrq_input(sc, sc->sc_ctx_arrq);
673 if (intmask & OHCI_Int_RSPkt)
674 fwohci_arrs_input(sc, sc->sc_ctx_arrs);
675 if (intmask & OHCI_Int_IsochRx) {
676 if (sc->sc_ctx_as == NULL) {
677 continue;
678 }
679 s = splbio();
680 iso = sc->sc_iso;
681 sc->sc_iso = 0;
682 splx(s);
683 for (i = 0; i < sc->sc_isoctx; i++) {
684 if ((iso & (1 << i)) &&
685 sc->sc_ctx_as[i] != NULL) {
686 fwohci_as_input(sc, sc->sc_ctx_as[i]);
687 sc->sc_ascnt.ev_count++;
688 }
689 }
690 }
691 }
692 }
693
694 #if 0
695 static int
696 fwohci_dnamem_alloc(struct fwohci_softc *sc, int size, int alignment,
697 bus_dmamap_t *mapp, caddr_t *kvap, int flags)
698 {
699 bus_dma_segment_t segs[1];
700 int error, nsegs, steps;
701
702 steps = 0;
703 error = bus_dmamem_alloc(sc->sc_dmat, size, alignment, alignment,
704 segs, 1, &nsegs, flags);
705 if (error)
706 goto cleanup;
707
708 steps = 1;
709 error = bus_dmamem_map(sc->sc_dmat, segs, nsegs, segs[0].ds_len,
710 kvap, flags);
711 if (error)
712 goto cleanup;
713
714 if (error == 0)
715 error = bus_dmamap_create(sc->sc_dmat, size, 1, alignment,
716 size, flags, mapp);
717 if (error)
718 goto cleanup;
719 if (error == 0)
720 error = bus_dmamap_load(sc->sc_dmat, *mapp, *kvap, size, NULL,
721 flags);
722 if (error)
723 goto cleanup;
724
725 cleanup:
726 switch (steps) {
727 case 1:
728 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
729 }
730
731 return error;
732 }
733 #endif
734
735 int
736 fwohci_print(void *aux, const char *pnp)
737 {
738 char *name = aux;
739
740 if (pnp)
741 printf("%s at %s", name, pnp);
742
743 return UNCONF;
744 }
745
746 static void
747 fwohci_hw_init(struct fwohci_softc *sc)
748 {
749 int i;
750 u_int32_t val;
751
752 /*
753 * Software Reset.
754 */
755 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_SoftReset);
756 for (i = 0; i < OHCI_LOOP; i++) {
757 val = OHCI_CSR_READ(sc, OHCI_REG_HCControlClear);
758 if ((val & OHCI_HCControl_SoftReset) == 0)
759 break;
760 DELAY(10);
761 }
762
763 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LPS);
764
765 /*
766 * First, initilize CSRs with undefined value to default settings.
767 */
768 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions);
769 val |= OHCI_BusOptions_ISC | OHCI_BusOptions_CMC;
770 #if 0
771 val |= OHCI_BusOptions_BMC | OHCI_BusOptions_IRMC;
772 #else
773 val &= ~(OHCI_BusOptions_BMC | OHCI_BusOptions_IRMC);
774 #endif
775 OHCI_CSR_WRITE(sc, OHCI_REG_BusOptions, val);
776 for (i = 0; i < sc->sc_isoctx; i++) {
777 OHCI_SYNC_RX_DMA_WRITE(sc, i, OHCI_SUBREG_ContextControlClear,
778 ~0);
779 }
780 for (i = 0; i < sc->sc_itctx; i++) {
781 OHCI_SYNC_TX_DMA_WRITE(sc, i, OHCI_SUBREG_ContextControlClear,
782 ~0);
783 }
784 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlClear, ~0);
785
786 fwohci_configrom_init(sc);
787 fwohci_selfid_init(sc);
788 fwohci_buf_init_rx(sc);
789 fwohci_csr_init(sc);
790
791 /*
792 * Final CSR settings.
793 */
794 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlSet,
795 OHCI_LinkControl_CycleTimerEnable |
796 OHCI_LinkControl_RcvSelfID | OHCI_LinkControl_RcvPhyPkt);
797
798 OHCI_CSR_WRITE(sc, OHCI_REG_ATRetries, 0x00000888); /*XXX*/
799
800 /* clear receive filter */
801 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiClear, ~0);
802 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoClear, ~0);
803 OHCI_CSR_WRITE(sc, OHCI_REG_AsynchronousRequestFilterHiSet, 0x80000000);
804
805 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear,
806 OHCI_HCControl_NoByteSwapData | OHCI_HCControl_APhyEnhanceEnable);
807 #if BYTE_ORDER == BIG_ENDIAN
808 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet,
809 OHCI_HCControl_NoByteSwapData);
810 #endif
811
812 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear, ~0);
813 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_BusReset |
814 OHCI_Int_SelfIDComplete | OHCI_Int_IsochRx | OHCI_Int_IsochTx |
815 OHCI_Int_RSPkt | OHCI_Int_RQPkt | OHCI_Int_ARRS | OHCI_Int_ARRQ |
816 OHCI_Int_RespTxComplete | OHCI_Int_ReqTxComplete);
817 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_CycleTooLong |
818 OHCI_Int_UnrecoverableError | OHCI_Int_CycleInconsistent |
819 OHCI_Int_LockRespErr | OHCI_Int_PostedWriteErr);
820 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskSet, ~0);
821 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskSet, ~0);
822 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_MasterEnable);
823
824 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LinkEnable);
825
826 /*
827 * Start the receivers
828 */
829 fwohci_buf_start_rx(sc);
830 }
831
832 static void
833 fwohci_power(int why, void *arg)
834 {
835 struct fwohci_softc *sc = arg;
836 int s;
837
838 s = splbio();
839 switch (why) {
840 case PWR_SUSPEND:
841 case PWR_STANDBY:
842 fwohci_shutdown(sc);
843 break;
844 case PWR_RESUME:
845 fwohci_hw_init(sc);
846 fwohci_phy_busreset(sc);
847 break;
848 case PWR_SOFTSUSPEND:
849 case PWR_SOFTSTANDBY:
850 case PWR_SOFTRESUME:
851 break;
852 }
853 splx(s);
854 }
855
856 static void
857 fwohci_shutdown(void *arg)
858 {
859 struct fwohci_softc *sc = arg;
860 u_int32_t val;
861
862 callout_stop(&sc->sc_selfid_callout);
863 /* disable all interrupt */
864 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear, OHCI_Int_MasterEnable);
865 fwohci_buf_stop_tx(sc);
866 fwohci_buf_stop_rx(sc);
867 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions);
868 val &= ~(OHCI_BusOptions_BMC | OHCI_BusOptions_ISC |
869 OHCI_BusOptions_CMC | OHCI_BusOptions_IRMC);
870 OHCI_CSR_WRITE(sc, OHCI_REG_BusOptions, val);
871 fwohci_phy_busreset(sc);
872 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear, OHCI_HCControl_LinkEnable);
873 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear, OHCI_HCControl_LPS);
874 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_SoftReset);
875 }
876
877 /*
878 * COMMON FUNCTIONS
879 */
880
881 /*
882 * read the PHY Register.
883 */
884 static u_int8_t
885 fwohci_phy_read(struct fwohci_softc *sc, u_int8_t reg)
886 {
887 int i;
888 u_int32_t val;
889
890 OHCI_CSR_WRITE(sc, OHCI_REG_PhyControl,
891 OHCI_PhyControl_RdReg | (reg << OHCI_PhyControl_RegAddr_BITPOS));
892 for (i = 0; i < OHCI_LOOP; i++) {
893 if (OHCI_CSR_READ(sc, OHCI_REG_PhyControl) &
894 OHCI_PhyControl_RdDone)
895 break;
896 DELAY(10);
897 }
898 val = OHCI_CSR_READ(sc, OHCI_REG_PhyControl);
899 return (val & OHCI_PhyControl_RdData) >> OHCI_PhyControl_RdData_BITPOS;
900 }
901
902 /*
903 * write the PHY Register.
904 */
905 static void
906 fwohci_phy_write(struct fwohci_softc *sc, u_int8_t reg, u_int8_t val)
907 {
908 int i;
909
910 OHCI_CSR_WRITE(sc, OHCI_REG_PhyControl, OHCI_PhyControl_WrReg |
911 (reg << OHCI_PhyControl_RegAddr_BITPOS) |
912 (val << OHCI_PhyControl_WrData_BITPOS));
913 for (i = 0; i < OHCI_LOOP; i++) {
914 if (!(OHCI_CSR_READ(sc, OHCI_REG_PhyControl) &
915 OHCI_PhyControl_WrReg))
916 break;
917 DELAY(10);
918 }
919 }
920
921 /*
922 * Initiate Bus Reset
923 */
924 static void
925 fwohci_phy_busreset(struct fwohci_softc *sc)
926 {
927 int s;
928 u_int8_t val;
929
930 s = splbio();
931 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear,
932 OHCI_Int_BusReset | OHCI_Int_SelfIDComplete);
933 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_BusReset);
934 callout_stop(&sc->sc_selfid_callout);
935 val = fwohci_phy_read(sc, 1);
936 val = (val & 0x80) | /* preserve RHB (force root) */
937 0x40 | /* Initiate Bus Reset */
938 0x3f; /* default GAP count */
939 fwohci_phy_write(sc, 1, val);
940 splx(s);
941 }
942
943 /*
944 * PHY Packet
945 */
946 static void
947 fwohci_phy_input(struct fwohci_softc *sc, struct fwohci_pkt *pkt)
948 {
949 u_int32_t val;
950
951 val = pkt->fp_hdr[1];
952 if (val != ~pkt->fp_hdr[2]) {
953 if (val == 0 && ((*pkt->fp_trail & 0x001f0000) >> 16) ==
954 OHCI_CTXCTL_EVENT_BUS_RESET) {
955 DPRINTFN(1, ("fwohci_phy_input: BusReset: 0x%08x\n",
956 pkt->fp_hdr[2]));
957 } else {
958 printf("%s: phy packet corrupted (0x%08x, 0x%08x)\n",
959 sc->sc_sc1394.sc1394_dev.dv_xname, val,
960 pkt->fp_hdr[2]);
961 }
962 return;
963 }
964 #ifdef FW_DEBUG
965 if (fwdebug > 1)
966 fwohci_show_phypkt(sc, val);
967 #endif
968 }
969
970 /*
971 * Descriptor for context DMA.
972 */
973 static int
974 fwohci_desc_alloc(struct fwohci_softc *sc)
975 {
976 int error, mapsize, dsize;
977
978 /*
979 * allocate descriptor buffer
980 */
981
982 sc->sc_descsize = OHCI_BUF_ARRQ_CNT + OHCI_BUF_ARRS_CNT +
983 OHCI_BUF_ATRQ_CNT + OHCI_BUF_ATRS_CNT +
984 OHCI_BUF_IR_CNT * sc->sc_isoctx + 2;
985 dsize = sizeof(struct fwohci_desc) * sc->sc_descsize;
986 mapsize = howmany(sc->sc_descsize, NBBY);
987 sc->sc_descmap = malloc(mapsize, M_DEVBUF, M_WAITOK|M_ZERO);
988
989 if (sc->sc_descmap == NULL) {
990 printf("fwohci_desc_alloc: cannot get memory\n");
991 return -1;
992 }
993
994 if ((error = bus_dmamem_alloc(sc->sc_dmat, dsize, PAGE_SIZE, 0,
995 &sc->sc_dseg, 1, &sc->sc_dnseg, 0)) != 0) {
996 printf("%s: unable to allocate descriptor buffer, error = %d\n",
997 sc->sc_sc1394.sc1394_dev.dv_xname, error);
998 goto fail_0;
999 }
1000
1001 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg,
1002 dsize, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT | BUS_DMA_WAITOK))
1003 != 0) {
1004 printf("%s: unable to map descriptor buffer, error = %d\n",
1005 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1006 goto fail_1;
1007 }
1008
1009 if ((error = bus_dmamap_create(sc->sc_dmat, dsize, sc->sc_dnseg,
1010 dsize, 0, BUS_DMA_WAITOK, &sc->sc_ddmamap)) != 0) {
1011 printf("%s: unable to create descriptor buffer DMA map, "
1012 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname, error);
1013 goto fail_2;
1014 }
1015
1016 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc,
1017 dsize, NULL, BUS_DMA_WAITOK)) != 0) {
1018 printf("%s: unable to load descriptor buffer DMA map, "
1019 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname, error);
1020 goto fail_3;
1021 }
1022
1023 return 0;
1024
1025 fail_3:
1026 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap);
1027 fail_2:
1028 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, dsize);
1029 fail_1:
1030 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg);
1031 fail_0:
1032 return error;
1033 }
1034
1035 static struct fwohci_desc *
1036 fwohci_desc_get(struct fwohci_softc *sc, int ndesc)
1037 {
1038 int i, n;
1039
1040 for (n = 0; n <= sc->sc_descsize - ndesc; n++) {
1041 for (i = 0; ; i++) {
1042 if (i == ndesc) {
1043 for (i = 0; i < ndesc; i++)
1044 setbit(sc->sc_descmap, n + i);
1045 return sc->sc_desc + n;
1046 }
1047 if (isset(sc->sc_descmap, n + i))
1048 break;
1049 }
1050 }
1051 return NULL;
1052 }
1053
1054 static void
1055 fwohci_desc_put(struct fwohci_softc *sc, struct fwohci_desc *fd, int ndesc)
1056 {
1057 int i, n;
1058
1059 n = fd - sc->sc_desc;
1060 for (i = 0; i < ndesc; i++, n++) {
1061 #ifdef DIAGNOSTIC
1062 if (isclr(sc->sc_descmap, n))
1063 panic("fwohci_desc_put: duplicated free");
1064 #endif
1065 clrbit(sc->sc_descmap, n);
1066 }
1067 }
1068
1069 /*
1070 * Asyncronous/Isochronous Transmit/Receive Context
1071 */
1072 static int
1073 fwohci_ctx_alloc(struct fwohci_softc *sc, struct fwohci_ctx **fcp,
1074 int bufcnt, int ctx, int ctxtype)
1075 {
1076 int i, error;
1077 struct fwohci_ctx *fc;
1078 struct fwohci_buf *fb;
1079 struct fwohci_desc *fd;
1080 #if DOUBLEBUF
1081 int buf2cnt;
1082 #endif
1083
1084 fc = malloc(sizeof(*fc), M_DEVBUF, M_WAITOK|M_ZERO);
1085 LIST_INIT(&fc->fc_handler);
1086 TAILQ_INIT(&fc->fc_buf);
1087 fc->fc_ctx = ctx;
1088 fc->fc_buffers = fb = malloc(sizeof(*fb) * bufcnt, M_DEVBUF, M_WAITOK|M_ZERO);
1089 fc->fc_bufcnt = bufcnt;
1090 #if DOUBLEBUF
1091 TAILQ_INIT(&fc->fc_buf2); /* for isochronous */
1092 if (ctxtype == FWOHCI_CTX_ISO_MULTI) {
1093 buf2cnt = bufcnt/2;
1094 bufcnt -= buf2cnt;
1095 if (buf2cnt == 0) {
1096 panic("cannot allocate iso buffer");
1097 }
1098 }
1099 #endif
1100 for (i = 0; i < bufcnt; i++, fb++) {
1101 if ((error = fwohci_buf_alloc(sc, fb)) != 0)
1102 goto fail;
1103 if ((fd = fwohci_desc_get(sc, 1)) == NULL) {
1104 error = ENOBUFS;
1105 goto fail;
1106 }
1107 fb->fb_desc = fd;
1108 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr +
1109 ((caddr_t)fd - (caddr_t)sc->sc_desc);
1110 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_STATUS |
1111 OHCI_DESC_INTR_ALWAYS | OHCI_DESC_BRANCH;
1112 fd->fd_reqcount = fb->fb_dmamap->dm_segs[0].ds_len;
1113 fd->fd_data = fb->fb_dmamap->dm_segs[0].ds_addr;
1114 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list);
1115 }
1116 #if DOUBLEBUF
1117 if (ctxtype == FWOHCI_CTX_ISO_MULTI) {
1118 for (i = bufcnt; i < bufcnt + buf2cnt; i++, fb++) {
1119 if ((error = fwohci_buf_alloc(sc, fb)) != 0)
1120 goto fail;
1121 if ((fd = fwohci_desc_get(sc, 1)) == NULL) {
1122 error = ENOBUFS;
1123 goto fail;
1124 }
1125 fb->fb_desc = fd;
1126 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr +
1127 ((caddr_t)fd - (caddr_t)sc->sc_desc);
1128 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1129 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1130 BUS_DMASYNC_PREWRITE);
1131 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_STATUS |
1132 OHCI_DESC_INTR_ALWAYS | OHCI_DESC_BRANCH;
1133 fd->fd_reqcount = fb->fb_dmamap->dm_segs[0].ds_len;
1134 fd->fd_data = fb->fb_dmamap->dm_segs[0].ds_addr;
1135 TAILQ_INSERT_TAIL(&fc->fc_buf2, fb, fb_list);
1136 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1137 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1138 BUS_DMASYNC_POSTWRITE);
1139 }
1140 }
1141 #endif /* DOUBLEBUF */
1142 fc->fc_type = ctxtype;
1143 *fcp = fc;
1144 return 0;
1145
1146 fail:
1147 while (i-- > 0) {
1148 fb--;
1149 if (fb->fb_desc)
1150 fwohci_desc_put(sc, fb->fb_desc, 1);
1151 fwohci_buf_free(sc, fb);
1152 }
1153 free(fc, M_DEVBUF);
1154 return error;
1155 }
1156
1157 static void
1158 fwohci_ctx_free(struct fwohci_softc *sc, struct fwohci_ctx *fc)
1159 {
1160 struct fwohci_buf *fb;
1161 struct fwohci_handler *fh;
1162
1163 #if DOUBLEBUF
1164 if ((fc->fc_type == FWOHCI_CTX_ISO_MULTI) &&
1165 (TAILQ_FIRST(&fc->fc_buf) > TAILQ_FIRST(&fc->fc_buf2))) {
1166 struct fwohci_buf_s fctmp;
1167
1168 fctmp = fc->fc_buf;
1169 fc->fc_buf = fc->fc_buf2;
1170 fc->fc_buf2 = fctmp;
1171 }
1172 #endif
1173 while ((fh = LIST_FIRST(&fc->fc_handler)) != NULL)
1174 fwohci_handler_set(sc, fh->fh_tcode, fh->fh_key1, fh->fh_key2,
1175 fh->fh_key3, NULL, NULL);
1176 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) {
1177 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list);
1178 if (fb->fb_desc)
1179 fwohci_desc_put(sc, fb->fb_desc, 1);
1180 fwohci_buf_free(sc, fb);
1181 }
1182 #if DOUBLEBUF
1183 while ((fb = TAILQ_FIRST(&fc->fc_buf2)) != NULL) {
1184 TAILQ_REMOVE(&fc->fc_buf2, fb, fb_list);
1185 if (fb->fb_desc)
1186 fwohci_desc_put(sc, fb->fb_desc, 1);
1187 fwohci_buf_free(sc, fb);
1188 }
1189 #endif /* DOUBLEBUF */
1190 free(fc->fc_buffers, M_DEVBUF);
1191 free(fc, M_DEVBUF);
1192 }
1193
1194 static void
1195 fwohci_ctx_init(struct fwohci_softc *sc, struct fwohci_ctx *fc)
1196 {
1197 struct fwohci_buf *fb, *nfb;
1198 struct fwohci_desc *fd;
1199 struct fwohci_handler *fh;
1200 int n;
1201
1202 for (fb = TAILQ_FIRST(&fc->fc_buf); fb != NULL; fb = nfb) {
1203 nfb = TAILQ_NEXT(fb, fb_list);
1204 fb->fb_off = 0;
1205 fd = fb->fb_desc;
1206 fd->fd_branch = (nfb != NULL) ? (nfb->fb_daddr | 1) : 0;
1207 fd->fd_rescount = fd->fd_reqcount;
1208 }
1209
1210 #if DOUBLEBUF
1211 for (fb = TAILQ_FIRST(&fc->fc_buf2); fb != NULL; fb = nfb) {
1212 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1213 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1214 BUS_DMASYNC_PREWRITE);
1215 nfb = TAILQ_NEXT(fb, fb_list);
1216 fb->fb_off = 0;
1217 fd = fb->fb_desc;
1218 fd->fd_branch = (nfb != NULL) ? (nfb->fb_daddr | 1) : 0;
1219 fd->fd_rescount = fd->fd_reqcount;
1220 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1221 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1222 BUS_DMASYNC_POSTWRITE);
1223 }
1224 #endif /* DOUBLEBUF */
1225
1226 n = fc->fc_ctx;
1227 fb = TAILQ_FIRST(&fc->fc_buf);
1228 if (fc->fc_type != FWOHCI_CTX_ASYNC) {
1229 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_CommandPtr,
1230 fb->fb_daddr | 1);
1231 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlClear,
1232 OHCI_CTXCTL_RX_BUFFER_FILL |
1233 OHCI_CTXCTL_RX_CYCLE_MATCH_ENABLE |
1234 OHCI_CTXCTL_RX_MULTI_CHAN_MODE |
1235 OHCI_CTXCTL_RX_DUAL_BUFFER_MODE);
1236 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlSet,
1237 OHCI_CTXCTL_RX_ISOCH_HEADER);
1238 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) {
1239 OHCI_SYNC_RX_DMA_WRITE(sc, n,
1240 OHCI_SUBREG_ContextControlSet,
1241 OHCI_CTXCTL_RX_BUFFER_FILL);
1242 }
1243 fh = LIST_FIRST(&fc->fc_handler);
1244
1245 if (fh->fh_key1 == IEEE1394_ISO_CHANNEL_ANY) {
1246 OHCI_SYNC_RX_DMA_WRITE(sc, n,
1247 OHCI_SUBREG_ContextControlSet,
1248 OHCI_CTXCTL_RX_MULTI_CHAN_MODE);
1249
1250 /* Receive all the isochronous channels */
1251 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiSet,
1252 0xffffffff);
1253 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoSet,
1254 0xffffffff);
1255 DPRINTF(("%s: CTXCTL 0x%08x\n",
1256 sc->sc_sc1394.sc1394_dev.dv_xname,
1257 OHCI_SYNC_RX_DMA_READ(sc, n,
1258 OHCI_SUBREG_ContextControlSet)));
1259 }
1260 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextMatch,
1261 (fh->fh_key2 << OHCI_CTXMATCH_TAG_BITPOS) |
1262 (fh->fh_key1 & IEEE1394_ISO_CHANNEL_MASK));
1263 } else {
1264 OHCI_ASYNC_DMA_WRITE(sc, n, OHCI_SUBREG_CommandPtr,
1265 fb->fb_daddr | 1);
1266 }
1267 }
1268
1269 /*
1270 * DMA data buffer
1271 */
1272 static int
1273 fwohci_buf_alloc(struct fwohci_softc *sc, struct fwohci_buf *fb)
1274 {
1275 int error;
1276
1277 if ((error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1278 PAGE_SIZE, &fb->fb_seg, 1, &fb->fb_nseg, BUS_DMA_WAITOK)) != 0) {
1279 printf("%s: unable to allocate buffer, error = %d\n",
1280 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1281 goto fail_0;
1282 }
1283
1284 if ((error = bus_dmamem_map(sc->sc_dmat, &fb->fb_seg,
1285 fb->fb_nseg, PAGE_SIZE, &fb->fb_buf, BUS_DMA_WAITOK)) != 0) {
1286 printf("%s: unable to map buffer, error = %d\n",
1287 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1288 goto fail_1;
1289 }
1290
1291 if ((error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, fb->fb_nseg,
1292 PAGE_SIZE, 0, BUS_DMA_WAITOK, &fb->fb_dmamap)) != 0) {
1293 printf("%s: unable to create buffer DMA map, "
1294 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname,
1295 error);
1296 goto fail_2;
1297 }
1298
1299 if ((error = bus_dmamap_load(sc->sc_dmat, fb->fb_dmamap,
1300 fb->fb_buf, PAGE_SIZE, NULL, BUS_DMA_WAITOK)) != 0) {
1301 printf("%s: unable to load buffer DMA map, "
1302 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname,
1303 error);
1304 goto fail_3;
1305 }
1306
1307 return 0;
1308
1309 bus_dmamap_unload(sc->sc_dmat, fb->fb_dmamap);
1310 fail_3:
1311 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
1312 fail_2:
1313 bus_dmamem_unmap(sc->sc_dmat, fb->fb_buf, PAGE_SIZE);
1314 fail_1:
1315 bus_dmamem_free(sc->sc_dmat, &fb->fb_seg, fb->fb_nseg);
1316 fail_0:
1317 return error;
1318 }
1319
1320 static void
1321 fwohci_buf_free(struct fwohci_softc *sc, struct fwohci_buf *fb)
1322 {
1323
1324 bus_dmamap_unload(sc->sc_dmat, fb->fb_dmamap);
1325 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
1326 bus_dmamem_unmap(sc->sc_dmat, fb->fb_buf, PAGE_SIZE);
1327 bus_dmamem_free(sc->sc_dmat, &fb->fb_seg, fb->fb_nseg);
1328 }
1329
1330 static void
1331 fwohci_buf_init_rx(struct fwohci_softc *sc)
1332 {
1333 int i;
1334
1335 /*
1336 * Initialize for Asynchronous Receive Queue.
1337 */
1338 fwohci_ctx_init(sc, sc->sc_ctx_arrq);
1339 fwohci_ctx_init(sc, sc->sc_ctx_arrs);
1340
1341 /*
1342 * Initialize for Isochronous Receive Queue.
1343 */
1344 if (sc->sc_ctx_as != NULL) {
1345 for (i = 0; i < sc->sc_isoctx; i++) {
1346 if (sc->sc_ctx_as[i] != NULL)
1347 fwohci_ctx_init(sc, sc->sc_ctx_as[i]);
1348 }
1349 }
1350 }
1351
1352 static void
1353 fwohci_buf_start_rx(struct fwohci_softc *sc)
1354 {
1355 int i;
1356
1357 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_REQUEST,
1358 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
1359 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
1360 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
1361 if (sc->sc_ctx_as != NULL) {
1362 for (i = 0; i < sc->sc_isoctx; i++) {
1363 if (sc->sc_ctx_as[i] != NULL)
1364 OHCI_SYNC_RX_DMA_WRITE(sc, i,
1365 OHCI_SUBREG_ContextControlSet,
1366 OHCI_CTXCTL_RUN);
1367 }
1368 }
1369 }
1370
1371 static void
1372 fwohci_buf_stop_tx(struct fwohci_softc *sc)
1373 {
1374 int i;
1375
1376 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_TX_REQUEST,
1377 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1378 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_TX_RESPONSE,
1379 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1380
1381 /*
1382 * Make sure the transmitter is stopped.
1383 */
1384 for (i = 0; i < OHCI_LOOP; i++) {
1385 DELAY(10);
1386 if (OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_REQUEST,
1387 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE)
1388 continue;
1389 if (OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_RESPONSE,
1390 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE)
1391 continue;
1392 break;
1393 }
1394
1395 /*
1396 * Initialize for Asynchronous Transmit Queue.
1397 */
1398 fwohci_at_done(sc, sc->sc_ctx_atrq, 1);
1399 fwohci_at_done(sc, sc->sc_ctx_atrs, 1);
1400 }
1401
1402 static void
1403 fwohci_buf_stop_rx(struct fwohci_softc *sc)
1404 {
1405 int i;
1406
1407 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_REQUEST,
1408 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1409 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
1410 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1411 for (i = 0; i < sc->sc_isoctx; i++) {
1412 OHCI_SYNC_RX_DMA_WRITE(sc, i,
1413 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1414 }
1415 }
1416
1417 static void
1418 fwohci_buf_next(struct fwohci_softc *sc, struct fwohci_ctx *fc)
1419 {
1420 struct fwohci_buf *fb, *tfb;
1421
1422 #if DOUBLEBUF
1423 if (fc->fc_type != FWOHCI_CTX_ISO_MULTI) {
1424 #endif
1425 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) {
1426 if (fc->fc_type) {
1427 if (fb->fb_off == 0)
1428 break;
1429 } else {
1430 if (fb->fb_off != fb->fb_desc->fd_reqcount ||
1431 fb->fb_desc->fd_rescount != 0)
1432 break;
1433 }
1434 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list);
1435 fb->fb_desc->fd_rescount = fb->fb_desc->fd_reqcount;
1436 fb->fb_off = 0;
1437 fb->fb_desc->fd_branch = 0;
1438 tfb = TAILQ_LAST(&fc->fc_buf, fwohci_buf_s);
1439 tfb->fb_desc->fd_branch = fb->fb_daddr | 1;
1440 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list);
1441 }
1442 #if DOUBLEBUF
1443 } else {
1444 struct fwohci_buf_s fctmp;
1445
1446 /* cleaning buffer */
1447 for (fb = TAILQ_FIRST(&fc->fc_buf); fb != NULL;
1448 fb = TAILQ_NEXT(fb, fb_list)) {
1449 fb->fb_off = 0;
1450 fb->fb_desc->fd_rescount = fb->fb_desc->fd_reqcount;
1451 }
1452
1453 /* rotating buffer */
1454 fctmp = fc->fc_buf;
1455 fc->fc_buf = fc->fc_buf2;
1456 fc->fc_buf2 = fctmp;
1457 }
1458 #endif
1459 }
1460
1461 static int
1462 fwohci_buf_pktget(struct fwohci_softc *sc, struct fwohci_buf **fbp, caddr_t *pp,
1463 int len)
1464 {
1465 struct fwohci_buf *fb;
1466 struct fwohci_desc *fd;
1467 int bufend;
1468
1469 fb = *fbp;
1470 again:
1471 fd = fb->fb_desc;
1472 DPRINTFN(1, ("fwohci_buf_pktget: desc %ld, off %d, req %d, res %d,"
1473 " len %d, avail %d\n", (long)(fd - sc->sc_desc), fb->fb_off,
1474 fd->fd_reqcount, fd->fd_rescount, len,
1475 fd->fd_reqcount - fd->fd_rescount - fb->fb_off));
1476 bufend = fd->fd_reqcount - fd->fd_rescount;
1477 if (fb->fb_off >= bufend) {
1478 DPRINTFN(5, ("buf %x finish req %d res %d off %d ",
1479 fb->fb_desc->fd_data, fd->fd_reqcount, fd->fd_rescount,
1480 fb->fb_off));
1481 if (fd->fd_rescount == 0) {
1482 *fbp = fb = TAILQ_NEXT(fb, fb_list);
1483 if (fb != NULL)
1484 goto again;
1485 }
1486 return 0;
1487 }
1488 if (fb->fb_off + len > bufend)
1489 len = bufend - fb->fb_off;
1490 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, fb->fb_off, len,
1491 BUS_DMASYNC_POSTREAD);
1492 *pp = fb->fb_buf + fb->fb_off;
1493 fb->fb_off += roundup(len, 4);
1494 return len;
1495 }
1496
1497 static int
1498 fwohci_buf_input(struct fwohci_softc *sc, struct fwohci_ctx *fc,
1499 struct fwohci_pkt *pkt)
1500 {
1501 caddr_t p;
1502 struct fwohci_buf *fb;
1503 int len, count, i;
1504 #ifdef FW_DEBUG
1505 int tlabel;
1506 #endif
1507
1508 memset(pkt, 0, sizeof(*pkt));
1509 pkt->fp_uio.uio_iov = pkt->fp_iov;
1510 pkt->fp_uio.uio_rw = UIO_WRITE;
1511 pkt->fp_uio.uio_segflg = UIO_SYSSPACE;
1512
1513 /* get first quadlet */
1514 fb = TAILQ_FIRST(&fc->fc_buf);
1515 count = 4;
1516 len = fwohci_buf_pktget(sc, &fb, &p, count);
1517 if (len <= 0) {
1518 DPRINTFN(1, ("fwohci_buf_input: no input for %d\n",
1519 fc->fc_ctx));
1520 return 0;
1521 }
1522 pkt->fp_hdr[0] = *(u_int32_t *)p;
1523 pkt->fp_tcode = (pkt->fp_hdr[0] & 0x000000f0) >> 4;
1524 switch (pkt->fp_tcode) {
1525 case IEEE1394_TCODE_WRITE_REQ_QUAD:
1526 case IEEE1394_TCODE_READ_RESP_QUAD:
1527 pkt->fp_hlen = 12;
1528 pkt->fp_dlen = 4;
1529 break;
1530 case IEEE1394_TCODE_READ_REQ_BLOCK:
1531 pkt->fp_hlen = 16;
1532 pkt->fp_dlen = 0;
1533 break;
1534 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
1535 case IEEE1394_TCODE_READ_RESP_BLOCK:
1536 case IEEE1394_TCODE_LOCK_REQ:
1537 case IEEE1394_TCODE_LOCK_RESP:
1538 pkt->fp_hlen = 16;
1539 break;
1540 case IEEE1394_TCODE_STREAM_DATA:
1541 #ifdef DIAGNOSTIC
1542 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI)
1543 #endif
1544 {
1545 pkt->fp_hlen = 4;
1546 pkt->fp_dlen = pkt->fp_hdr[0] >> 16;
1547 DPRINTFN(5, ("[%d]", pkt->fp_dlen));
1548 break;
1549 }
1550 #ifdef DIAGNOSTIC
1551 else {
1552 printf("fwohci_buf_input: bad tcode: STREAM_DATA\n");
1553 return 0;
1554 }
1555 #endif
1556 default:
1557 pkt->fp_hlen = 12;
1558 pkt->fp_dlen = 0;
1559 break;
1560 }
1561
1562 /* get header */
1563 while (count < pkt->fp_hlen) {
1564 len = fwohci_buf_pktget(sc, &fb, &p, pkt->fp_hlen - count);
1565 if (len == 0) {
1566 printf("fwohci_buf_input: malformed input 1: %d\n",
1567 pkt->fp_hlen - count);
1568 return 0;
1569 }
1570 memcpy((caddr_t)pkt->fp_hdr + count, p, len);
1571 count += len;
1572 }
1573 if (pkt->fp_hlen == 16 &&
1574 pkt->fp_tcode != IEEE1394_TCODE_READ_REQ_BLOCK)
1575 pkt->fp_dlen = pkt->fp_hdr[3] >> 16;
1576 #ifdef FW_DEBUG
1577 tlabel = (pkt->fp_hdr[0] & 0x0000fc00) >> 10;
1578 #endif
1579 DPRINTFN(1, ("fwohci_buf_input: tcode=0x%x, tlabel=0x%x, hlen=%d, "
1580 "dlen=%d\n", pkt->fp_tcode, tlabel, pkt->fp_hlen, pkt->fp_dlen));
1581
1582 /* get data */
1583 count = 0;
1584 i = 0;
1585 while (count < pkt->fp_dlen) {
1586 len = fwohci_buf_pktget(sc, &fb,
1587 (caddr_t *)&pkt->fp_iov[i].iov_base,
1588 pkt->fp_dlen - count);
1589 if (len == 0) {
1590 printf("fwohci_buf_input: malformed input 2: %d\n",
1591 pkt->fp_dlen - count);
1592 return 0;
1593 }
1594 pkt->fp_iov[i++].iov_len = len;
1595 count += len;
1596 }
1597 pkt->fp_uio.uio_iovcnt = i;
1598 pkt->fp_uio.uio_resid = count;
1599
1600 /* get trailer */
1601 len = fwohci_buf_pktget(sc, &fb, (caddr_t *)&pkt->fp_trail,
1602 sizeof(*pkt->fp_trail));
1603 if (len <= 0) {
1604 printf("fwohci_buf_input: malformed input 3: %d\n",
1605 pkt->fp_hlen - count);
1606 return 0;
1607 }
1608 return 1;
1609 }
1610
1611 static int
1612 fwohci_buf_input_ppb(struct fwohci_softc *sc, struct fwohci_ctx *fc,
1613 struct fwohci_pkt *pkt)
1614 {
1615 caddr_t p;
1616 int len;
1617 struct fwohci_buf *fb;
1618 struct fwohci_desc *fd;
1619
1620 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) {
1621 return fwohci_buf_input(sc, fc, pkt);
1622 }
1623
1624 memset(pkt, 0, sizeof(*pkt));
1625 pkt->fp_uio.uio_iov = pkt->fp_iov;
1626 pkt->fp_uio.uio_rw = UIO_WRITE;
1627 pkt->fp_uio.uio_segflg = UIO_SYSSPACE;
1628
1629 for (fb = TAILQ_FIRST(&fc->fc_buf); ; fb = TAILQ_NEXT(fb, fb_list)) {
1630 if (fb == NULL)
1631 return 0;
1632 if (fb->fb_off == 0)
1633 break;
1634 }
1635 fd = fb->fb_desc;
1636 len = fd->fd_reqcount - fd->fd_rescount;
1637 if (len == 0)
1638 return 0;
1639 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, fb->fb_off, len,
1640 BUS_DMASYNC_POSTREAD);
1641
1642 p = fb->fb_buf;
1643 fb->fb_off += roundup(len, 4);
1644 if (len < 8) {
1645 printf("fwohci_buf_input_ppb: malformed input 1: %d\n", len);
1646 return 0;
1647 }
1648
1649 /*
1650 * get trailer first, may be bogus data unless status update
1651 * in descriptor is set.
1652 */
1653 pkt->fp_trail = (u_int32_t *)p;
1654 *pkt->fp_trail = (*pkt->fp_trail & 0xffff) | (fd->fd_status << 16);
1655 pkt->fp_hdr[0] = ((u_int32_t *)p)[1];
1656 pkt->fp_tcode = (pkt->fp_hdr[0] & 0x000000f0) >> 4;
1657 #ifdef DIAGNOSTIC
1658 if (pkt->fp_tcode != IEEE1394_TCODE_STREAM_DATA) {
1659 printf("fwohci_buf_input_ppb: bad tcode: 0x%x\n",
1660 pkt->fp_tcode);
1661 return 0;
1662 }
1663 #endif
1664 pkt->fp_hlen = 4;
1665 pkt->fp_dlen = pkt->fp_hdr[0] >> 16;
1666 p += 8;
1667 len -= 8;
1668 if (pkt->fp_dlen != len) {
1669 printf("fwohci_buf_input_ppb: malformed input 2: %d != %d\n",
1670 pkt->fp_dlen, len);
1671 return 0;
1672 }
1673 DPRINTFN(1, ("fwohci_buf_input_ppb: tcode=0x%x, hlen=%d, dlen=%d\n",
1674 pkt->fp_tcode, pkt->fp_hlen, pkt->fp_dlen));
1675 pkt->fp_iov[0].iov_base = p;
1676 pkt->fp_iov[0].iov_len = len;
1677 pkt->fp_uio.uio_iovcnt = 0;
1678 pkt->fp_uio.uio_resid = len;
1679 return 1;
1680 }
1681
1682 static int
1683 fwohci_handler_set(struct fwohci_softc *sc,
1684 int tcode, u_int32_t key1, u_int32_t key2, u_int32_t key3,
1685 int (*handler)(struct fwohci_softc *, void *, struct fwohci_pkt *),
1686 void *arg)
1687 {
1688 struct fwohci_ctx *fc;
1689 struct fwohci_handler *fh;
1690 u_int64_t addr, naddr;
1691 u_int32_t off;
1692 int i, j;
1693
1694 if (tcode == IEEE1394_TCODE_STREAM_DATA &&
1695 (((key1 & OHCI_ASYNC_STREAM) && sc->sc_ctx_as != NULL)
1696 || (key1 & OHCI_ASYNC_STREAM) == 0)) {
1697 int isasync = key1 & OHCI_ASYNC_STREAM;
1698
1699 key1 = key1 & IEEE1394_ISO_CHANNEL_ANY ?
1700 IEEE1394_ISO_CHANNEL_ANY : (key1 & IEEE1394_ISOCH_MASK);
1701 if (key1 & IEEE1394_ISO_CHANNEL_ANY) {
1702 printf("%s: key changed to %x\n",
1703 sc->sc_sc1394.sc1394_dev.dv_xname, key1);
1704 }
1705 j = sc->sc_isoctx;
1706 fh = NULL;
1707
1708 for (i = 0; i < sc->sc_isoctx; i++) {
1709 if ((fc = sc->sc_ctx_as[i]) == NULL) {
1710 if (j == sc->sc_isoctx)
1711 j = i;
1712 continue;
1713 }
1714 fh = LIST_FIRST(&fc->fc_handler);
1715 if (fh->fh_tcode == tcode &&
1716 fh->fh_key1 == key1 && fh->fh_key2 == key2)
1717 break;
1718 fh = NULL;
1719 }
1720 if (fh == NULL) {
1721 if (handler == NULL)
1722 return 0;
1723 if (j == sc->sc_isoctx) {
1724 DPRINTF(("fwohci_handler_set: no more free "
1725 "context\n"));
1726 return ENOMEM;
1727 }
1728 if ((fc = sc->sc_ctx_as[j]) == NULL) {
1729 fwohci_ctx_alloc(sc, &fc, OHCI_BUF_IR_CNT, j,
1730 isasync ? FWOHCI_CTX_ISO_SINGLE :
1731 FWOHCI_CTX_ISO_MULTI);
1732 sc->sc_ctx_as[j] = fc;
1733 }
1734 }
1735 #ifdef FW_DEBUG
1736 if (fh == NULL && handler != NULL) {
1737 printf("use ir context %d\n", j);
1738 } else if (fh != NULL && handler == NULL) {
1739 printf("remove ir context %d\n", i);
1740 }
1741 #endif
1742 } else {
1743 switch (tcode) {
1744 case IEEE1394_TCODE_WRITE_REQ_QUAD:
1745 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
1746 case IEEE1394_TCODE_READ_REQ_QUAD:
1747 case IEEE1394_TCODE_READ_REQ_BLOCK:
1748 case IEEE1394_TCODE_LOCK_REQ:
1749 fc = sc->sc_ctx_arrq;
1750 break;
1751 case IEEE1394_TCODE_WRITE_RESP:
1752 case IEEE1394_TCODE_READ_RESP_QUAD:
1753 case IEEE1394_TCODE_READ_RESP_BLOCK:
1754 case IEEE1394_TCODE_LOCK_RESP:
1755 fc = sc->sc_ctx_arrs;
1756 break;
1757 default:
1758 return EIO;
1759 }
1760 naddr = ((u_int64_t)key1 << 32) + key2;
1761
1762 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
1763 fh = LIST_NEXT(fh, fh_list)) {
1764 if (fh->fh_tcode == tcode) {
1765 if (fh->fh_key1 == key1 &&
1766 fh->fh_key2 == key2 && fh->fh_key3 == key3)
1767 break;
1768 /* Make sure it's not within a current range. */
1769 addr = ((u_int64_t)fh->fh_key1 << 32) +
1770 fh->fh_key2;
1771 off = fh->fh_key3;
1772 if (key3 &&
1773 (((naddr >= addr) &&
1774 (naddr < (addr + off))) ||
1775 (((naddr + key3) > addr) &&
1776 ((naddr + key3) <= (addr + off))) ||
1777 ((addr > naddr) &&
1778 (addr < (naddr + key3)))))
1779 if (handler)
1780 return EEXIST;
1781 }
1782 }
1783 }
1784 if (handler == NULL) {
1785 if (fh != NULL) {
1786 LIST_REMOVE(fh, fh_list);
1787 free(fh, M_DEVBUF);
1788 }
1789 if (tcode == IEEE1394_TCODE_STREAM_DATA) {
1790 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
1791 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1792 sc->sc_ctx_as[fc->fc_ctx] = NULL;
1793 fwohci_ctx_free(sc, fc);
1794 }
1795 return 0;
1796 }
1797 if (fh == NULL) {
1798 fh = malloc(sizeof(*fh), M_DEVBUF, M_WAITOK);
1799 LIST_INSERT_HEAD(&fc->fc_handler, fh, fh_list);
1800 }
1801 fh->fh_tcode = tcode;
1802 fh->fh_key1 = key1;
1803 fh->fh_key2 = key2;
1804 fh->fh_key3 = key3;
1805 fh->fh_handler = handler;
1806 fh->fh_handarg = arg;
1807 DPRINTFN(1, ("fwohci_handler_set: ctx %d, tcode %x, key 0x%x, 0x%x, "
1808 "0x%x\n", fc->fc_ctx, tcode, key1, key2, key3));
1809
1810 if (tcode == IEEE1394_TCODE_STREAM_DATA) {
1811 fwohci_ctx_init(sc, fc);
1812 DPRINTFN(1, ("fwohci_handler_set: SYNC desc %ld\n",
1813 (long)(TAILQ_FIRST(&fc->fc_buf)->fb_desc - sc->sc_desc)));
1814 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
1815 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
1816 }
1817 return 0;
1818 }
1819
1820 /*
1821 * static ieee1394_ir_tag_t
1822 * fwohci_ir_ctx_set(struct device *dev, int channel, int tagbm,
1823 * int bufnum, int maxsize, int flags)
1824 *
1825 * This function will return non-negative value if it succeeds.
1826 * This return value is pointer to the context of isochronous
1827 * transmission. This function will return NULL value if it
1828 * fails.
1829 */
1830 ieee1394_ir_tag_t
1831 fwohci_ir_ctx_set(struct device *dev, int channel, int tagbm,
1832 int bufnum, int maxsize, int flags)
1833 {
1834 int i, openctx;
1835 struct fwohci_ir_ctx *irc;
1836 struct fwohci_softc *sc = (struct fwohci_softc *)dev;
1837 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname;
1838
1839 printf("%s: ir_ctx_set channel %d tagbm 0x%x maxsize %d bufnum %d\n",
1840 xname, channel, tagbm, maxsize, bufnum);
1841 /*
1842 * This loop will find the smallest vacant context and check
1843 * whether other channel uses the same channel.
1844 */
1845 openctx = sc->sc_isoctx;
1846 for (i = 0; i < sc->sc_isoctx; ++i) {
1847 if (sc->sc_ctx_ir[i] == NULL) {
1848 /*
1849 * Find a vacant contet. If this has the
1850 * smallest context number, register it.
1851 */
1852 if (openctx == sc->sc_isoctx) {
1853 openctx = i;
1854 }
1855 } else {
1856 /*
1857 * This context is used. Check whether this
1858 * context uses the same channel as ours.
1859 */
1860 if (sc->sc_ctx_ir[i]->irc_channel == channel) {
1861 /* Using same channel. */
1862 printf("%s: channel %d occupied by ctx%d\n",
1863 xname, channel, i);
1864 return NULL;
1865 }
1866 }
1867 }
1868
1869 /*
1870 * If there is a vacant context, allocate isochronous transmit
1871 * context for it.
1872 */
1873 if (openctx != sc->sc_isoctx) {
1874 printf("%s using ctx %d for iso receive\n", xname, openctx);
1875 if ((irc = fwohci_ir_ctx_construct(sc, openctx, channel,
1876 tagbm, bufnum, maxsize, flags)) == NULL) {
1877 return NULL;
1878 }
1879 #ifndef IR_CTX_OPENTEST
1880 sc->sc_ctx_ir[openctx] = irc;
1881 #else
1882 fwohci_ir_ctx_destruct(irc);
1883 irc = NULL;
1884 #endif
1885 } else {
1886 printf("%s: cannot find any vacant contexts\n", xname);
1887 irc = NULL;
1888 }
1889
1890 return (ieee1394_ir_tag_t)irc;
1891 }
1892
1893
1894 /*
1895 * int fwohci_ir_ctx_clear(struct device *dev, ieee1394_ir_tag_t *ir)
1896 *
1897 * This function will return 0 if it succeed. Otherwise return
1898 * negative value.
1899 */
1900 int
1901 fwohci_ir_ctx_clear(struct device *dev, ieee1394_ir_tag_t ir)
1902 {
1903 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)ir;
1904 struct fwohci_softc *sc = irc->irc_sc;
1905 int i;
1906
1907 if (sc->sc_ctx_ir[irc->irc_num] != irc) {
1908 printf("fwohci_ir_ctx_clear: irc differs %p %p\n",
1909 sc->sc_ctx_ir[irc->irc_num], irc);
1910 return -1;
1911 }
1912
1913 i = 0;
1914 while (irc->irc_status & IRC_STATUS_RUN) {
1915 tsleep((void *)irc, PWAIT|PCATCH, "IEEE1394 iso receive", 100);
1916 if (irc->irc_status & IRC_STATUS_RUN) {
1917 if (fwohci_ir_stop(irc) == 0) {
1918 irc->irc_status &= ~IRC_STATUS_RUN;
1919 }
1920
1921 }
1922 if (++i > 20) {
1923 u_int32_t reg
1924 = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
1925 OHCI_SUBREG_ContextControlSet);
1926
1927 printf("fwochi_ir_ctx_clear: "
1928 "Cannot stop iso receive engine\n");
1929 printf("%s: intr IR_CommandPtr 0x%08x "
1930 "ContextCtrl 0x%08x%s%s%s%s\n",
1931 sc->sc_sc1394.sc1394_dev.dv_xname,
1932 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
1933 OHCI_SUBREG_CommandPtr),
1934 reg,
1935 reg & OHCI_CTXCTL_RUN ? " run" : "",
1936 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
1937 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
1938 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
1939
1940 return EBUSY;
1941 }
1942 }
1943
1944 printf("fwohci_ir_ctx_clear: DMA engine is stopped. get %d frames max queuelen %d pos %d\n",
1945 irc->irc_pktcount, irc->irc_maxqueuelen, irc->irc_maxqueuepos);
1946
1947 fwohci_ir_ctx_destruct(irc);
1948
1949 sc->sc_ctx_ir[irc->irc_num] = NULL;
1950
1951 return 0;
1952 }
1953
1954
1955
1956
1957
1958
1959
1960
1961 ieee1394_it_tag_t
1962 fwohci_it_set(struct ieee1394_softc *isc, int channel, int tagbm)
1963 {
1964 ieee1394_it_tag_t rv;
1965 int tag;
1966
1967 for (tag = 0; tagbm != 0 && (tagbm & 0x01) == 0; tagbm >>= 1, ++tag);
1968
1969 rv = fwohci_it_ctx_set((struct fwohci_softc *)isc, channel, tag, 488);
1970
1971 return rv;
1972 }
1973
1974 /*
1975 * static ieee1394_it_tag_t
1976 * fwohci_it_ctx_set(struct fwohci_softc *sc,
1977 * u_int32_t key1 (channel), u_int32_t key2 (tag), int maxsize)
1978 *
1979 * This function will return non-negative value if it succeeds.
1980 * This return value is pointer to the context of isochronous
1981 * transmission. This function will return NULL value if it
1982 * fails.
1983 */
1984 static ieee1394_it_tag_t
1985 fwohci_it_ctx_set(struct fwohci_softc *sc, int channel, int tag, int maxsize)
1986 {
1987 int i, openctx;
1988 struct fwohci_it_ctx *itc;
1989 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname;
1990 #ifdef TEST_CHAIN
1991 extern int fwohci_test_chain(struct fwohci_it_ctx *);
1992 #endif /* TEST_CHAIN */
1993 #ifdef TEST_WRITE
1994 extern void fwohci_test_write(struct fwohci_it_ctx *itc);
1995 #endif /* TEST_WRITE */
1996
1997 printf("%s: it_ctx_set channel %d tag %d maxsize %d\n",
1998 xname, channel, tag, maxsize);
1999
2000 /*
2001 * This loop will find the smallest vacant context and check
2002 * whether other channel uses the same channel.
2003 */
2004 openctx = sc->sc_itctx;
2005 for (i = 0; i < sc->sc_itctx; ++i) {
2006 if (sc->sc_ctx_it[i] == NULL) {
2007 /*
2008 * Find a vacant contet. If this has the
2009 * smallest context number, register it.
2010 */
2011 if (openctx == sc->sc_itctx) {
2012 openctx = i;
2013 }
2014 } else {
2015 /*
2016 * This context is used. Check whether this
2017 * context uses the same channel as ours.
2018 */
2019 if (sc->sc_ctx_it[i]->itc_channel == channel) {
2020 /* Using same channel. */
2021 printf("%s: channel %d occupied by ctx%d\n",
2022 xname, channel, i);
2023 return NULL;
2024 }
2025 }
2026 }
2027
2028 /*
2029 * If there is a vacant context, allocate isochronous transmit
2030 * context for it.
2031 */
2032 if (openctx != sc->sc_itctx) {
2033 printf("%s using ctx %d for iso trasmit\n", xname, openctx);
2034 if ((itc = fwohci_it_ctx_construct(sc, openctx, channel,
2035 tag, maxsize)) == NULL) {
2036 return NULL;
2037 }
2038 sc->sc_ctx_it[openctx] = itc;
2039
2040 #ifdef TEST_CHAIN
2041 fwohci_test_chain(itc);
2042 #endif /* TEST_CHAIN */
2043 #ifdef TEST_WRITE
2044 fwohci_test_write(itc);
2045 itc = NULL;
2046 #endif /* TEST_WRITE */
2047
2048 } else {
2049 printf("%s: cannot find any vacant contexts\n", xname);
2050 itc = NULL;
2051 }
2052
2053 return (ieee1394_it_tag_t)itc;
2054 }
2055
2056
2057 /*
2058 * int fwohci_it_ctx_clear(ieee1394_it_tag_t *it)
2059 *
2060 * This function will return 0 if it succeed. Otherwise return
2061 * negative value.
2062 */
2063 int
2064 fwohci_it_ctx_clear(ieee1394_it_tag_t *it)
2065 {
2066 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
2067 struct fwohci_softc *sc = itc->itc_sc;
2068 int i;
2069
2070 if (sc->sc_ctx_it[itc->itc_num] != itc) {
2071 printf("fwohci_it_ctx_clear: itc differs %p %p\n",
2072 sc->sc_ctx_it[itc->itc_num], itc);
2073 return -1;
2074 }
2075
2076 fwohci_it_ctx_flush(it);
2077
2078 i = 0;
2079 while (itc->itc_flags & ITC_FLAGS_RUN) {
2080 tsleep((void *)itc, PWAIT|PCATCH, "IEEE1394 iso transmit", 100);
2081 if (itc->itc_flags & ITC_FLAGS_RUN) {
2082 u_int32_t reg;
2083
2084 reg = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2085 OHCI_SUBREG_ContextControlSet);
2086
2087 if ((reg & OHCI_CTXCTL_WAKE) == 0) {
2088 itc->itc_flags &= ~ITC_FLAGS_RUN;
2089 printf("fwochi_it_ctx_clear: "
2090 "DMA engine stopped without intr\n");
2091 }
2092 printf("%s: %d intr IT_CommandPtr 0x%08x "
2093 "ContextCtrl 0x%08x%s%s%s%s\n",
2094 sc->sc_sc1394.sc1394_dev.dv_xname, i,
2095 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2096 OHCI_SUBREG_CommandPtr),
2097 reg,
2098 reg & OHCI_CTXCTL_RUN ? " run" : "",
2099 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
2100 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
2101 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
2102
2103
2104 }
2105 if (++i > 20) {
2106 u_int32_t reg
2107 = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2108 OHCI_SUBREG_ContextControlSet);
2109
2110 printf("fwochi_it_ctx_clear: "
2111 "Cannot stop iso transmit engine\n");
2112 printf("%s: intr IT_CommandPtr 0x%08x "
2113 "ContextCtrl 0x%08x%s%s%s%s\n",
2114 sc->sc_sc1394.sc1394_dev.dv_xname,
2115 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2116 OHCI_SUBREG_CommandPtr),
2117 reg,
2118 reg & OHCI_CTXCTL_RUN ? " run" : "",
2119 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
2120 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
2121 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
2122
2123 return EBUSY;
2124 }
2125 }
2126
2127 printf("fwohci_it_ctx_clear: DMA engine is stopped.\n");
2128
2129 fwohci_it_ctx_destruct(itc);
2130
2131 sc->sc_ctx_it[itc->itc_num] = NULL;
2132
2133
2134 return 0;
2135 }
2136
2137
2138
2139
2140
2141
2142 /*
2143 * Asyncronous Receive Requests input frontend.
2144 */
2145 static void
2146 fwohci_arrq_input(struct fwohci_softc *sc, struct fwohci_ctx *fc)
2147 {
2148 int rcode;
2149 u_int16_t len;
2150 u_int32_t key1, key2, off;
2151 u_int64_t addr, naddr;
2152 struct fwohci_handler *fh;
2153 struct fwohci_pkt pkt, res;
2154
2155 /*
2156 * Do not return if next packet is in the buffer, or the next
2157 * packet cannot be received until the next receive interrupt.
2158 */
2159 while (fwohci_buf_input(sc, fc, &pkt)) {
2160 if (pkt.fp_tcode == OHCI_TCODE_PHY) {
2161 fwohci_phy_input(sc, &pkt);
2162 continue;
2163 }
2164 key1 = pkt.fp_hdr[1] & 0xffff;
2165 key2 = pkt.fp_hdr[2];
2166 if ((pkt.fp_tcode == IEEE1394_TCODE_WRITE_REQ_BLOCK) ||
2167 (pkt.fp_tcode == IEEE1394_TCODE_READ_REQ_BLOCK)) {
2168 len = (pkt.fp_hdr[3] & 0xffff0000) >> 16;
2169 naddr = ((u_int64_t)key1 << 32) + key2;
2170 } else
2171 len = 0;
2172 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
2173 fh = LIST_NEXT(fh, fh_list)) {
2174 if (pkt.fp_tcode == fh->fh_tcode) {
2175 /* Assume length check happens in handler */
2176 if (key1 == fh->fh_key1 &&
2177 key2 == fh->fh_key2) {
2178 rcode = (*fh->fh_handler)(sc,
2179 fh->fh_handarg, &pkt);
2180 break;
2181 }
2182 addr = ((u_int64_t)fh->fh_key1 << 32) +
2183 fh->fh_key2;
2184 off = fh->fh_key3;
2185 /* Check for a range qualifier */
2186 if (len &&
2187 ((naddr >= addr) && (naddr < (addr + off))
2188 && (naddr + len <= (addr + off)))) {
2189 rcode = (*fh->fh_handler)(sc,
2190 fh->fh_handarg, &pkt);
2191 break;
2192 }
2193 }
2194 }
2195 if (fh == NULL) {
2196 rcode = IEEE1394_RCODE_ADDRESS_ERROR;
2197 DPRINTFN(1, ("fwohci_arrq_input: no listener: tcode "
2198 "0x%x, addr=0x%04x %08x\n", pkt.fp_tcode, key1,
2199 key2));
2200 }
2201 if (((*pkt.fp_trail & 0x001f0000) >> 16) !=
2202 OHCI_CTXCTL_EVENT_ACK_PENDING)
2203 continue;
2204 if (rcode != -1) {
2205 memset(&res, 0, sizeof(res));
2206 res.fp_uio.uio_rw = UIO_WRITE;
2207 res.fp_uio.uio_segflg = UIO_SYSSPACE;
2208 fwohci_atrs_output(sc, rcode, &pkt, &res);
2209 }
2210 }
2211 fwohci_buf_next(sc, fc);
2212 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2213 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
2214 }
2215
2216
2217 /*
2218 * Asynchronous Receive Response input frontend.
2219 */
2220 static void
2221 fwohci_arrs_input(struct fwohci_softc *sc, struct fwohci_ctx *fc)
2222 {
2223 struct fwohci_pkt pkt;
2224 struct fwohci_handler *fh;
2225 u_int16_t srcid;
2226 int rcode, tlabel;
2227
2228 while (fwohci_buf_input(sc, fc, &pkt)) {
2229 srcid = pkt.fp_hdr[1] >> 16;
2230 rcode = (pkt.fp_hdr[1] & 0x0000f000) >> 12;
2231 tlabel = (pkt.fp_hdr[0] & 0x0000fc00) >> 10;
2232 DPRINTFN(1, ("fwohci_arrs_input: tcode 0x%x, from 0x%04x,"
2233 " tlabel 0x%x, rcode 0x%x, hlen %d, dlen %d\n",
2234 pkt.fp_tcode, srcid, tlabel, rcode, pkt.fp_hlen,
2235 pkt.fp_dlen));
2236 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
2237 fh = LIST_NEXT(fh, fh_list)) {
2238 if (pkt.fp_tcode == fh->fh_tcode &&
2239 (srcid & OHCI_NodeId_NodeNumber) == fh->fh_key1 &&
2240 tlabel == fh->fh_key2) {
2241 (*fh->fh_handler)(sc, fh->fh_handarg, &pkt);
2242 LIST_REMOVE(fh, fh_list);
2243 free(fh, M_DEVBUF);
2244 break;
2245 }
2246 }
2247 if (fh == NULL)
2248 DPRINTFN(1, ("fwohci_arrs_input: no listner\n"));
2249 }
2250 fwohci_buf_next(sc, fc);
2251 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2252 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
2253 }
2254
2255 /*
2256 * Isochronous Receive input frontend.
2257 */
2258 static void
2259 fwohci_as_input(struct fwohci_softc *sc, struct fwohci_ctx *fc)
2260 {
2261 int rcode, chan, tag;
2262 struct iovec *iov;
2263 struct fwohci_handler *fh;
2264 struct fwohci_pkt pkt;
2265
2266 #if DOUBLEBUF
2267 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) {
2268 struct fwohci_buf *fb;
2269 int i;
2270 u_int32_t reg;
2271
2272 /* stop dma engine before read buffer */
2273 reg = OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx,
2274 OHCI_SUBREG_ContextControlClear);
2275 DPRINTFN(5, ("ir_input %08x =>", reg));
2276 if (reg & OHCI_CTXCTL_RUN) {
2277 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
2278 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
2279 }
2280 DPRINTFN(5, (" %08x\n", OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx, OHCI_SUBREG_ContextControlClear)));
2281
2282 i = 0;
2283 while ((reg = OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx, OHCI_SUBREG_ContextControlSet)) & OHCI_CTXCTL_ACTIVE) {
2284 delay(10);
2285 if (++i > 10000) {
2286 printf("cannot stop dma engine 0x%08x\n", reg);
2287 return;
2288 }
2289 }
2290
2291 /* rotate dma buffer */
2292 fb = TAILQ_FIRST(&fc->fc_buf2);
2293 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx, OHCI_SUBREG_CommandPtr,
2294 fb->fb_daddr | 1);
2295 /* start dma engine */
2296 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
2297 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
2298 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear,
2299 (1 << fc->fc_ctx));
2300 }
2301 #endif
2302
2303 while (fwohci_buf_input_ppb(sc, fc, &pkt)) {
2304 chan = (pkt.fp_hdr[0] & 0x00003f00) >> 8;
2305 tag = (pkt.fp_hdr[0] & 0x0000c000) >> 14;
2306 DPRINTFN(1, ("fwohci_as_input: hdr 0x%08x, tcode 0x%0x, hlen %d"
2307 ", dlen %d\n", pkt.fp_hdr[0], pkt.fp_tcode, pkt.fp_hlen,
2308 pkt.fp_dlen));
2309 if (tag == IEEE1394_TAG_GASP &&
2310 fc->fc_type == FWOHCI_CTX_ISO_SINGLE) {
2311 /*
2312 * The pkt with tag=3 is GASP format.
2313 * Move GASP header to header part.
2314 */
2315 if (pkt.fp_dlen < 8)
2316 continue;
2317 iov = pkt.fp_iov;
2318 /* assuming pkt per buffer mode */
2319 pkt.fp_hdr[1] = ntohl(((u_int32_t *)iov->iov_base)[0]);
2320 pkt.fp_hdr[2] = ntohl(((u_int32_t *)iov->iov_base)[1]);
2321 iov->iov_base = (caddr_t)iov->iov_base + 8;
2322 iov->iov_len -= 8;
2323 pkt.fp_hlen += 8;
2324 pkt.fp_dlen -= 8;
2325 }
2326 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
2327 fh = LIST_NEXT(fh, fh_list)) {
2328 if (pkt.fp_tcode == fh->fh_tcode &&
2329 (chan == fh->fh_key1 ||
2330 fh->fh_key1 == IEEE1394_ISO_CHANNEL_ANY) &&
2331 ((1 << tag) & fh->fh_key2) != 0) {
2332 rcode = (*fh->fh_handler)(sc, fh->fh_handarg,
2333 &pkt);
2334 break;
2335 }
2336 }
2337 #ifdef FW_DEBUG
2338 if (fh == NULL) {
2339 DPRINTFN(1, ("fwohci_as_input: no handler\n"));
2340 } else {
2341 DPRINTFN(1, ("fwohci_as_input: rcode %d\n", rcode));
2342 }
2343 #endif
2344 }
2345 fwohci_buf_next(sc, fc);
2346
2347 if (fc->fc_type == FWOHCI_CTX_ISO_SINGLE) {
2348 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
2349 OHCI_SUBREG_ContextControlSet,
2350 OHCI_CTXCTL_WAKE);
2351 }
2352 }
2353
2354 /*
2355 * Asynchronous Transmit common routine.
2356 */
2357 static int
2358 fwohci_at_output(struct fwohci_softc *sc, struct fwohci_ctx *fc,
2359 struct fwohci_pkt *pkt)
2360 {
2361 struct fwohci_buf *fb;
2362 struct fwohci_desc *fd;
2363 struct mbuf *m, *m0;
2364 int i, ndesc, error, off, len;
2365 u_int32_t val;
2366 #ifdef FW_DEBUG
2367 struct iovec *iov;
2368 int tlabel = (pkt->fp_hdr[0] & 0x0000fc00) >> 10;
2369 #endif
2370
2371 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) == IEEE1394_BCAST_PHY_ID)
2372 /* We can't send anything during selfid duration */
2373 return EAGAIN;
2374
2375 #ifdef FW_DEBUG
2376 DPRINTFN(1, ("fwohci_at_output: tcode 0x%x, tlabel 0x%x hlen %d, "
2377 "dlen %d", pkt->fp_tcode, tlabel, pkt->fp_hlen, pkt->fp_dlen));
2378 for (i = 0; i < pkt->fp_hlen/4; i++)
2379 DPRINTFN(2, ("%s%08x", i?" ":"\n ", pkt->fp_hdr[i]));
2380 DPRINTFN(2, ("$"));
2381 for (ndesc = 0, iov = pkt->fp_iov;
2382 ndesc < pkt->fp_uio.uio_iovcnt; ndesc++, iov++) {
2383 for (i = 0; i < iov->iov_len; i++)
2384 DPRINTFN(2, ("%s%02x", (i%32)?((i%4)?"":" "):"\n ",
2385 ((u_int8_t *)iov->iov_base)[i]));
2386 DPRINTFN(2, ("$"));
2387 }
2388 DPRINTFN(1, ("\n"));
2389 #endif
2390
2391 if ((m = pkt->fp_m) != NULL) {
2392 for (ndesc = 2; m != NULL; m = m->m_next)
2393 ndesc++;
2394 if (ndesc > OHCI_DESC_MAX) {
2395 m0 = NULL;
2396 ndesc = 2;
2397 for (off = 0; off < pkt->fp_dlen; off += len) {
2398 if (m0 == NULL) {
2399 MGETHDR(m0, M_DONTWAIT, MT_DATA);
2400 if (m0 != NULL)
2401 M_COPY_PKTHDR(m0, pkt->fp_m);
2402 m = m0;
2403 } else {
2404 MGET(m->m_next, M_DONTWAIT, MT_DATA);
2405 m = m->m_next;
2406 }
2407 if (m != NULL)
2408 MCLGET(m, M_DONTWAIT);
2409 if (m == NULL || (m->m_flags & M_EXT) == 0) {
2410 m_freem(m0);
2411 return ENOMEM;
2412 }
2413 len = pkt->fp_dlen - off;
2414 if (len > m->m_ext.ext_size)
2415 len = m->m_ext.ext_size;
2416 m_copydata(pkt->fp_m, off, len,
2417 mtod(m, caddr_t));
2418 m->m_len = len;
2419 ndesc++;
2420 }
2421 m_freem(pkt->fp_m);
2422 pkt->fp_m = m0;
2423 }
2424 } else
2425 ndesc = 2 + pkt->fp_uio.uio_iovcnt;
2426
2427 if (ndesc > OHCI_DESC_MAX)
2428 return ENOBUFS;
2429
2430 if (fc->fc_bufcnt > 50) /*XXX*/
2431 return ENOBUFS;
2432 fb = malloc(sizeof(*fb), M_DEVBUF, M_WAITOK);
2433 if (ndesc > 2) {
2434 if ((error = bus_dmamap_create(sc->sc_dmat, pkt->fp_dlen,
2435 OHCI_DESC_MAX - 2, pkt->fp_dlen, 0, BUS_DMA_WAITOK,
2436 &fb->fb_dmamap)) != 0) {
2437 fwohci_desc_put(sc, fb->fb_desc, ndesc);
2438 free(fb, M_DEVBUF);
2439 return error;
2440 }
2441
2442 if (pkt->fp_m != NULL)
2443 error = bus_dmamap_load_mbuf(sc->sc_dmat, fb->fb_dmamap,
2444 pkt->fp_m, BUS_DMA_WAITOK);
2445 else
2446 error = bus_dmamap_load_uio(sc->sc_dmat, fb->fb_dmamap,
2447 &pkt->fp_uio, BUS_DMA_WAITOK);
2448 if (error != 0) {
2449 DPRINTFN(1, ("Can't load DMA map: %d\n", error));
2450 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
2451 fwohci_desc_put(sc, fb->fb_desc, ndesc);
2452 free(fb, M_DEVBUF);
2453 return error;
2454 }
2455 ndesc = fb->fb_dmamap->dm_nsegs + 2;
2456
2457 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0, pkt->fp_dlen,
2458 BUS_DMASYNC_PREWRITE);
2459 }
2460
2461 fb->fb_nseg = ndesc;
2462 fb->fb_desc = fwohci_desc_get(sc, ndesc);
2463 if (fb->fb_desc == NULL) {
2464 free(fb, M_DEVBUF);
2465 return ENOBUFS;
2466 }
2467 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr +
2468 ((caddr_t)fb->fb_desc - (caddr_t)sc->sc_desc);
2469 fb->fb_m = pkt->fp_m;
2470 fb->fb_callback = pkt->fp_callback;
2471 fb->fb_statuscb = pkt->fp_statuscb;
2472 fb->fb_statusarg = pkt->fp_statusarg;
2473
2474 fd = fb->fb_desc;
2475 fd->fd_flags = OHCI_DESC_IMMED;
2476 fd->fd_reqcount = pkt->fp_hlen;
2477 fd->fd_data = 0;
2478 fd->fd_branch = 0;
2479 fd->fd_status = 0;
2480 if (fc->fc_ctx == OHCI_CTX_ASYNC_TX_RESPONSE) {
2481 i = 3; /* XXX: 3 sec */
2482 val = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
2483 fd->fd_timestamp = ((val >> 12) & 0x1fff) |
2484 ((((val >> 25) + i) & 0x7) << 13);
2485 } else
2486 fd->fd_timestamp = 0;
2487 memcpy(fd + 1, pkt->fp_hdr, pkt->fp_hlen);
2488 for (i = 0; i < ndesc - 2; i++) {
2489 fd = fb->fb_desc + 2 + i;
2490 fd->fd_flags = 0;
2491 fd->fd_reqcount = fb->fb_dmamap->dm_segs[i].ds_len;
2492 fd->fd_data = fb->fb_dmamap->dm_segs[i].ds_addr;
2493 fd->fd_branch = 0;
2494 fd->fd_status = 0;
2495 fd->fd_timestamp = 0;
2496 }
2497 fd->fd_flags |= OHCI_DESC_LAST | OHCI_DESC_BRANCH;
2498 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
2499
2500 #ifdef FW_DEBUG
2501 DPRINTFN(1, ("fwohci_at_output: desc %ld",
2502 (long)(fb->fb_desc - sc->sc_desc)));
2503 for (i = 0; i < ndesc * 4; i++)
2504 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ",
2505 ((u_int32_t *)fb->fb_desc)[i]));
2506 DPRINTFN(1, ("\n"));
2507 #endif
2508
2509 val = OHCI_ASYNC_DMA_READ(sc, fc->fc_ctx,
2510 OHCI_SUBREG_ContextControlClear);
2511
2512 if (val & OHCI_CTXCTL_RUN) {
2513 if (fc->fc_branch == NULL) {
2514 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2515 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
2516 goto run;
2517 }
2518 *fc->fc_branch = fb->fb_daddr | ndesc;
2519 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2520 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
2521 } else {
2522 run:
2523 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2524 OHCI_SUBREG_CommandPtr, fb->fb_daddr | ndesc);
2525 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2526 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
2527 }
2528 fc->fc_branch = &fd->fd_branch;
2529
2530 fc->fc_bufcnt++;
2531 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list);
2532 pkt->fp_m = NULL;
2533 return 0;
2534 }
2535
2536 static void
2537 fwohci_at_done(struct fwohci_softc *sc, struct fwohci_ctx *fc, int force)
2538 {
2539 struct fwohci_buf *fb;
2540 struct fwohci_desc *fd;
2541 struct fwohci_pkt pkt;
2542 int i;
2543
2544 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) {
2545 fd = fb->fb_desc;
2546 #ifdef FW_DEBUG
2547 DPRINTFN(1, ("fwohci_at_done: %sdesc %ld (%d)",
2548 force ? "force " : "", (long)(fd - sc->sc_desc),
2549 fb->fb_nseg));
2550 for (i = 0; i < fb->fb_nseg * 4; i++)
2551 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ",
2552 ((u_int32_t *)fd)[i]));
2553 DPRINTFN(1, ("\n"));
2554 #endif
2555 if (fb->fb_nseg > 2)
2556 fd += fb->fb_nseg - 1;
2557 if (!force && !(fd->fd_status & OHCI_CTXCTL_ACTIVE))
2558 break;
2559 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list);
2560 if (fc->fc_branch == &fd->fd_branch) {
2561 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2562 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
2563 fc->fc_branch = NULL;
2564 for (i = 0; i < OHCI_LOOP; i++) {
2565 if (!(OHCI_ASYNC_DMA_READ(sc, fc->fc_ctx,
2566 OHCI_SUBREG_ContextControlClear) &
2567 OHCI_CTXCTL_ACTIVE))
2568 break;
2569 DELAY(10);
2570 }
2571 }
2572
2573 if (fb->fb_statuscb) {
2574 memset(&pkt, 0, sizeof(pkt));
2575 pkt.fp_status = fd->fd_status;
2576 memcpy(pkt.fp_hdr, fd + 1, sizeof(pkt.fp_hdr[0]));
2577
2578 /* Indicate this is just returning the status bits. */
2579 pkt.fp_tcode = -1;
2580 (*fb->fb_statuscb)(sc, fb->fb_statusarg, &pkt);
2581 fb->fb_statuscb = NULL;
2582 fb->fb_statusarg = NULL;
2583 }
2584 fwohci_desc_put(sc, fb->fb_desc, fb->fb_nseg);
2585 if (fb->fb_nseg > 2)
2586 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
2587 fc->fc_bufcnt--;
2588 if (fb->fb_callback) {
2589 (*fb->fb_callback)(sc->sc_sc1394.sc1394_if, fb->fb_m);
2590 fb->fb_callback = NULL;
2591 } else if (fb->fb_m != NULL)
2592 m_freem(fb->fb_m);
2593 free(fb, M_DEVBUF);
2594 }
2595 }
2596
2597 /*
2598 * Asynchronous Transmit Reponse -- in response of request packet.
2599 */
2600 static void
2601 fwohci_atrs_output(struct fwohci_softc *sc, int rcode, struct fwohci_pkt *req,
2602 struct fwohci_pkt *res)
2603 {
2604
2605 if (((*req->fp_trail & 0x001f0000) >> 16) !=
2606 OHCI_CTXCTL_EVENT_ACK_PENDING)
2607 return;
2608
2609 res->fp_hdr[0] = (req->fp_hdr[0] & 0x0000fc00) | 0x00000100;
2610 res->fp_hdr[1] = (req->fp_hdr[1] & 0xffff0000) | (rcode << 12);
2611 switch (req->fp_tcode) {
2612 case IEEE1394_TCODE_WRITE_REQ_QUAD:
2613 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
2614 res->fp_tcode = IEEE1394_TCODE_WRITE_RESP;
2615 res->fp_hlen = 12;
2616 break;
2617 case IEEE1394_TCODE_READ_REQ_QUAD:
2618 res->fp_tcode = IEEE1394_TCODE_READ_RESP_QUAD;
2619 res->fp_hlen = 16;
2620 res->fp_dlen = 0;
2621 if (res->fp_uio.uio_iovcnt == 1 && res->fp_iov[0].iov_len == 4)
2622 res->fp_hdr[3] =
2623 *(u_int32_t *)res->fp_iov[0].iov_base;
2624 res->fp_uio.uio_iovcnt = 0;
2625 break;
2626 case IEEE1394_TCODE_READ_REQ_BLOCK:
2627 case IEEE1394_TCODE_LOCK_REQ:
2628 if (req->fp_tcode == IEEE1394_TCODE_LOCK_REQ)
2629 res->fp_tcode = IEEE1394_TCODE_LOCK_RESP;
2630 else
2631 res->fp_tcode = IEEE1394_TCODE_READ_RESP_BLOCK;
2632 res->fp_hlen = 16;
2633 res->fp_dlen = res->fp_uio.uio_resid;
2634 res->fp_hdr[3] = res->fp_dlen << 16;
2635 break;
2636 }
2637 res->fp_hdr[0] |= (res->fp_tcode << 4);
2638 fwohci_at_output(sc, sc->sc_ctx_atrs, res);
2639 }
2640
2641 /*
2642 * APPLICATION LAYER SERVICES
2643 */
2644
2645 /*
2646 * Retrieve Global UID from GUID ROM
2647 */
2648 static int
2649 fwohci_guidrom_init(struct fwohci_softc *sc)
2650 {
2651 int i, n, off;
2652 u_int32_t val1, val2;
2653
2654 /* Extract the Global UID
2655 */
2656 val1 = OHCI_CSR_READ(sc, OHCI_REG_GUIDHi);
2657 val2 = OHCI_CSR_READ(sc, OHCI_REG_GUIDLo);
2658
2659 if (val1 != 0 || val2 != 0) {
2660 sc->sc_sc1394.sc1394_guid[0] = (val1 >> 24) & 0xff;
2661 sc->sc_sc1394.sc1394_guid[1] = (val1 >> 16) & 0xff;
2662 sc->sc_sc1394.sc1394_guid[2] = (val1 >> 8) & 0xff;
2663 sc->sc_sc1394.sc1394_guid[3] = (val1 >> 0) & 0xff;
2664 sc->sc_sc1394.sc1394_guid[4] = (val2 >> 24) & 0xff;
2665 sc->sc_sc1394.sc1394_guid[5] = (val2 >> 16) & 0xff;
2666 sc->sc_sc1394.sc1394_guid[6] = (val2 >> 8) & 0xff;
2667 sc->sc_sc1394.sc1394_guid[7] = (val2 >> 0) & 0xff;
2668 } else {
2669 val1 = OHCI_CSR_READ(sc, OHCI_REG_Version);
2670 if ((val1 & OHCI_Version_GUID_ROM) == 0)
2671 return -1;
2672 OHCI_CSR_WRITE(sc, OHCI_REG_Guid_Rom, OHCI_Guid_AddrReset);
2673 for (i = 0; i < OHCI_LOOP; i++) {
2674 val1 = OHCI_CSR_READ(sc, OHCI_REG_Guid_Rom);
2675 if (!(val1 & OHCI_Guid_AddrReset))
2676 break;
2677 DELAY(10);
2678 }
2679 off = OHCI_BITVAL(val1, OHCI_Guid_MiniROM) + 4;
2680 val2 = 0;
2681 for (n = 0; n < off + sizeof(sc->sc_sc1394.sc1394_guid); n++) {
2682 OHCI_CSR_WRITE(sc, OHCI_REG_Guid_Rom,
2683 OHCI_Guid_RdStart);
2684 for (i = 0; i < OHCI_LOOP; i++) {
2685 val1 = OHCI_CSR_READ(sc, OHCI_REG_Guid_Rom);
2686 if (!(val1 & OHCI_Guid_RdStart))
2687 break;
2688 DELAY(10);
2689 }
2690 if (n < off)
2691 continue;
2692 val1 = OHCI_BITVAL(val1, OHCI_Guid_RdData);
2693 sc->sc_sc1394.sc1394_guid[n - off] = val1;
2694 val2 |= val1;
2695 }
2696 if (val2 == 0)
2697 return -1;
2698 }
2699 return 0;
2700 }
2701
2702 /*
2703 * Initialization for Configuration ROM (no DMA context)
2704 */
2705
2706 #define CFR_MAXUNIT 20
2707
2708 struct configromctx {
2709 u_int32_t *ptr;
2710 int curunit;
2711 struct {
2712 u_int32_t *start;
2713 int length;
2714 u_int32_t *refer;
2715 int refunit;
2716 } unit[CFR_MAXUNIT];
2717 };
2718
2719 #define CFR_PUT_DATA4(cfr, d1, d2, d3, d4) \
2720 (*(cfr)->ptr++ = (((d1)<<24) | ((d2)<<16) | ((d3)<<8) | (d4)))
2721
2722 #define CFR_PUT_DATA1(cfr, d) (*(cfr)->ptr++ = (d))
2723
2724 #define CFR_PUT_VALUE(cfr, key, d) (*(cfr)->ptr++ = ((key)<<24) | (d))
2725
2726 #define CFR_PUT_CRC(cfr, n) \
2727 (*(cfr)->unit[n].start = ((cfr)->unit[n].length << 16) | \
2728 fwohci_crc16((cfr)->unit[n].start + 1, (cfr)->unit[n].length))
2729
2730 #define CFR_START_UNIT(cfr, n) \
2731 do { \
2732 if ((cfr)->unit[n].refer != NULL) { \
2733 *(cfr)->unit[n].refer |= \
2734 (cfr)->ptr - (cfr)->unit[n].refer; \
2735 CFR_PUT_CRC(cfr, (cfr)->unit[n].refunit); \
2736 } \
2737 (cfr)->curunit = (n); \
2738 (cfr)->unit[n].start = (cfr)->ptr++; \
2739 } while (0 /* CONSTCOND */)
2740
2741 #define CFR_PUT_REFER(cfr, key, n) \
2742 do { \
2743 (cfr)->unit[n].refer = (cfr)->ptr; \
2744 (cfr)->unit[n].refunit = (cfr)->curunit; \
2745 *(cfr)->ptr++ = (key) << 24; \
2746 } while (0 /* CONSTCOND */)
2747
2748 #define CFR_END_UNIT(cfr) \
2749 do { \
2750 (cfr)->unit[(cfr)->curunit].length = (cfr)->ptr - \
2751 ((cfr)->unit[(cfr)->curunit].start + 1); \
2752 CFR_PUT_CRC(cfr, (cfr)->curunit); \
2753 } while (0 /* CONSTCOND */)
2754
2755 static u_int16_t
2756 fwohci_crc16(u_int32_t *ptr, int len)
2757 {
2758 int shift;
2759 u_int32_t crc, sum, data;
2760
2761 crc = 0;
2762 while (len-- > 0) {
2763 data = *ptr++;
2764 for (shift = 28; shift >= 0; shift -= 4) {
2765 sum = ((crc >> 12) ^ (data >> shift)) & 0x000f;
2766 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ sum;
2767 }
2768 crc &= 0xffff;
2769 }
2770 return crc;
2771 }
2772
2773 static void
2774 fwohci_configrom_init(struct fwohci_softc *sc)
2775 {
2776 int i, val;
2777 struct fwohci_buf *fb;
2778 u_int32_t *hdr;
2779 struct configromctx cfr;
2780
2781 fb = &sc->sc_buf_cnfrom;
2782 memset(&cfr, 0, sizeof(cfr));
2783 cfr.ptr = hdr = (u_int32_t *)fb->fb_buf;
2784
2785 /* headers */
2786 CFR_START_UNIT(&cfr, 0);
2787 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_BusId));
2788 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_BusOptions));
2789 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_GUIDHi));
2790 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_GUIDLo));
2791 CFR_END_UNIT(&cfr);
2792 /* copy info_length from crc_length */
2793 *hdr |= (*hdr & 0x00ff0000) << 8;
2794 OHCI_CSR_WRITE(sc, OHCI_REG_ConfigROMhdr, *hdr);
2795
2796 /* root directory */
2797 CFR_START_UNIT(&cfr, 1);
2798 CFR_PUT_VALUE(&cfr, 0x03, 0x00005e); /* vendor id */
2799 CFR_PUT_REFER(&cfr, 0x81, 2); /* textual descriptor offset */
2800 CFR_PUT_VALUE(&cfr, 0x0c, 0x0083c0); /* node capability */
2801 /* spt,64,fix,lst,drq */
2802 #ifdef INET
2803 CFR_PUT_REFER(&cfr, 0xd1, 3); /* IPv4 unit directory */
2804 #endif /* INET */
2805 #ifdef INET6
2806 CFR_PUT_REFER(&cfr, 0xd1, 4); /* IPv6 unit directory */
2807 #endif /* INET6 */
2808 CFR_END_UNIT(&cfr);
2809
2810 CFR_START_UNIT(&cfr, 2);
2811 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2812 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2813 CFR_PUT_DATA4(&cfr, 'N', 'e', 't', 'B');
2814 CFR_PUT_DATA4(&cfr, 'S', 'D', 0x00, 0x00);
2815 CFR_END_UNIT(&cfr);
2816
2817 #ifdef INET
2818 /* IPv4 unit directory */
2819 CFR_START_UNIT(&cfr, 3);
2820 CFR_PUT_VALUE(&cfr, 0x12, 0x00005e); /* unit spec id */
2821 CFR_PUT_REFER(&cfr, 0x81, 6); /* textual descriptor offset */
2822 CFR_PUT_VALUE(&cfr, 0x13, 0x000001); /* unit sw version */
2823 CFR_PUT_REFER(&cfr, 0x81, 7); /* textual descriptor offset */
2824 CFR_PUT_REFER(&cfr, 0x95, 8); /* Unit location */
2825 CFR_END_UNIT(&cfr);
2826
2827 CFR_START_UNIT(&cfr, 6);
2828 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2829 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2830 CFR_PUT_DATA4(&cfr, 'I', 'A', 'N', 'A');
2831 CFR_END_UNIT(&cfr);
2832
2833 CFR_START_UNIT(&cfr, 7);
2834 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2835 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2836 CFR_PUT_DATA4(&cfr, 'I', 'P', 'v', '4');
2837 CFR_END_UNIT(&cfr);
2838
2839 CFR_START_UNIT(&cfr, 8); /* Spec's valid addr range. */
2840 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2841 CFR_PUT_DATA1(&cfr, (FW_FIFO_LO | 0x1));
2842 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2843 CFR_PUT_DATA1(&cfr, FW_FIFO_LO);
2844 CFR_END_UNIT(&cfr);
2845
2846 #endif /* INET */
2847
2848 #ifdef INET6
2849 /* IPv6 unit directory */
2850 CFR_START_UNIT(&cfr, 4);
2851 CFR_PUT_VALUE(&cfr, 0x12, 0x00005e); /* unit spec id */
2852 CFR_PUT_REFER(&cfr, 0x81, 9); /* textual descriptor offset */
2853 CFR_PUT_VALUE(&cfr, 0x13, 0x000002); /* unit sw version */
2854 /* XXX: TBA by IANA */
2855 CFR_PUT_REFER(&cfr, 0x81, 10); /* textual descriptor offset */
2856 CFR_PUT_REFER(&cfr, 0x95, 11); /* Unit location */
2857 CFR_END_UNIT(&cfr);
2858
2859 CFR_START_UNIT(&cfr, 9);
2860 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2861 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2862 CFR_PUT_DATA4(&cfr, 'I', 'A', 'N', 'A');
2863 CFR_END_UNIT(&cfr);
2864
2865 CFR_START_UNIT(&cfr, 10);
2866 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2867 CFR_PUT_DATA1(&cfr, 0);
2868 CFR_PUT_DATA4(&cfr, 'I', 'P', 'v', '6');
2869 CFR_END_UNIT(&cfr);
2870
2871 CFR_START_UNIT(&cfr, 11); /* Spec's valid addr range. */
2872 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2873 CFR_PUT_DATA1(&cfr, (FW_FIFO_LO | 0x1));
2874 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2875 CFR_PUT_DATA1(&cfr, FW_FIFO_LO);
2876 CFR_END_UNIT(&cfr);
2877
2878 #endif /* INET6 */
2879
2880 fb->fb_off = cfr.ptr - hdr;
2881 #ifdef FW_DEBUG
2882 DPRINTF(("%s: Config ROM:", sc->sc_sc1394.sc1394_dev.dv_xname));
2883 for (i = 0; i < fb->fb_off; i++)
2884 DPRINTF(("%s%08x", i&7?" ":"\n ", hdr[i]));
2885 DPRINTF(("\n"));
2886 #endif /* FW_DEBUG */
2887
2888 /*
2889 * Make network byte order for DMA
2890 */
2891 for (i = 0; i < fb->fb_off; i++)
2892 HTONL(hdr[i]);
2893 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0,
2894 (caddr_t)cfr.ptr - fb->fb_buf, BUS_DMASYNC_PREWRITE);
2895
2896 OHCI_CSR_WRITE(sc, OHCI_REG_ConfigROMmap,
2897 fb->fb_dmamap->dm_segs[0].ds_addr);
2898
2899 /* This register is only valid on OHCI 1.1. */
2900 val = OHCI_CSR_READ(sc, OHCI_REG_Version);
2901 if ((OHCI_Version_GET_Version(val) == 1) &&
2902 (OHCI_Version_GET_Revision(val) == 1))
2903 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet,
2904 OHCI_HCControl_BIBImageValid);
2905
2906 /* Only allow quad reads of the rom. */
2907 for (i = 0; i < fb->fb_off; i++)
2908 fwohci_handler_set(sc, IEEE1394_TCODE_READ_REQ_QUAD,
2909 CSR_BASE_HI, CSR_BASE_LO + CSR_CONFIG_ROM + (i * 4), 0,
2910 fwohci_configrom_input, NULL);
2911 }
2912
2913 static int
2914 fwohci_configrom_input(struct fwohci_softc *sc, void *arg,
2915 struct fwohci_pkt *pkt)
2916 {
2917 struct fwohci_pkt res;
2918 u_int32_t loc, *rom;
2919
2920 /* This will be used as an array index so size accordingly. */
2921 loc = pkt->fp_hdr[2] - (CSR_BASE_LO + CSR_CONFIG_ROM);
2922 if ((loc & 0x03) != 0) {
2923 /* alignment error */
2924 return IEEE1394_RCODE_ADDRESS_ERROR;
2925 }
2926 else
2927 loc /= 4;
2928 rom = (u_int32_t *)sc->sc_buf_cnfrom.fb_buf;
2929
2930 DPRINTFN(1, ("fwohci_configrom_input: ConfigRom[0x%04x]: 0x%08x\n", loc,
2931 ntohl(rom[loc])));
2932
2933 memset(&res, 0, sizeof(res));
2934 res.fp_hdr[3] = rom[loc];
2935 fwohci_atrs_output(sc, IEEE1394_RCODE_COMPLETE, pkt, &res);
2936 return -1;
2937 }
2938
2939 /*
2940 * SelfID buffer (no DMA context)
2941 */
2942 static void
2943 fwohci_selfid_init(struct fwohci_softc *sc)
2944 {
2945 struct fwohci_buf *fb;
2946
2947 fb = &sc->sc_buf_selfid;
2948 #ifdef DIAGNOSTIC
2949 if ((fb->fb_dmamap->dm_segs[0].ds_addr & 0x7ff) != 0)
2950 panic("fwohci_selfid_init: not aligned: %ld (%ld) %p",
2951 (unsigned long)fb->fb_dmamap->dm_segs[0].ds_addr,
2952 (unsigned long)fb->fb_dmamap->dm_segs[0].ds_len, fb->fb_buf);
2953 #endif
2954 memset(fb->fb_buf, 0, fb->fb_dmamap->dm_segs[0].ds_len);
2955 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0,
2956 fb->fb_dmamap->dm_segs[0].ds_len, BUS_DMASYNC_PREREAD);
2957
2958 OHCI_CSR_WRITE(sc, OHCI_REG_SelfIDBuffer,
2959 fb->fb_dmamap->dm_segs[0].ds_addr);
2960 }
2961
2962 static int
2963 fwohci_selfid_input(struct fwohci_softc *sc)
2964 {
2965 int i;
2966 u_int32_t count, val, gen;
2967 u_int32_t *buf;
2968
2969 buf = (u_int32_t *)sc->sc_buf_selfid.fb_buf;
2970 val = OHCI_CSR_READ(sc, OHCI_REG_SelfIDCount);
2971 again:
2972 if (val & OHCI_SelfID_Error) {
2973 printf("%s: SelfID Error\n", sc->sc_sc1394.sc1394_dev.dv_xname);
2974 return -1;
2975 }
2976 count = OHCI_BITVAL(val, OHCI_SelfID_Size);
2977
2978 bus_dmamap_sync(sc->sc_dmat, sc->sc_buf_selfid.fb_dmamap,
2979 0, count << 2, BUS_DMASYNC_POSTREAD);
2980 gen = OHCI_BITVAL(buf[0], OHCI_SelfID_Gen);
2981
2982 #ifdef FW_DEBUG
2983 DPRINTFN(1, ("%s: SelfID: 0x%08x", sc->sc_sc1394.sc1394_dev.dv_xname,
2984 val));
2985 for (i = 0; i < count; i++)
2986 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ", buf[i]));
2987 DPRINTFN(1, ("\n"));
2988 #endif /* FW_DEBUG */
2989
2990 for (i = 1; i < count; i += 2) {
2991 if (buf[i] != ~buf[i + 1])
2992 break;
2993 if (buf[i] & 0x00000001)
2994 continue; /* more pkt */
2995 if (buf[i] & 0x00800000)
2996 continue; /* external id */
2997 sc->sc_rootid = (buf[i] & 0x3f000000) >> 24;
2998 if ((buf[i] & 0x00400800) == 0x00400800)
2999 sc->sc_irmid = sc->sc_rootid;
3000 }
3001
3002 val = OHCI_CSR_READ(sc, OHCI_REG_SelfIDCount);
3003 if (OHCI_BITVAL(val, OHCI_SelfID_Gen) != gen) {
3004 if (OHCI_BITVAL(val, OHCI_SelfID_Gen) !=
3005 OHCI_BITVAL(buf[0], OHCI_SelfID_Gen))
3006 goto again;
3007 DPRINTF(("%s: SelfID Gen mismatch (%d, %d)\n",
3008 sc->sc_sc1394.sc1394_dev.dv_xname, gen,
3009 OHCI_BITVAL(val, OHCI_SelfID_Gen)));
3010 return -1;
3011 }
3012 if (i != count) {
3013 printf("%s: SelfID corrupted (%d, 0x%08x, 0x%08x)\n",
3014 sc->sc_sc1394.sc1394_dev.dv_xname, i, buf[i], buf[i + 1]);
3015 #if 1
3016 if (i == 1 && buf[i] == 0 && buf[i + 1] == 0) {
3017 /*
3018 * XXX: CXD3222 sometimes fails to DMA
3019 * selfid packet??
3020 */
3021 sc->sc_rootid = (count - 1) / 2 - 1;
3022 sc->sc_irmid = sc->sc_rootid;
3023 } else
3024 #endif
3025 return -1;
3026 }
3027
3028 val = OHCI_CSR_READ(sc, OHCI_REG_NodeId);
3029 if ((val & OHCI_NodeId_IDValid) == 0) {
3030 sc->sc_nodeid = 0xffff; /* invalid */
3031 printf("%s: nodeid is invalid\n",
3032 sc->sc_sc1394.sc1394_dev.dv_xname);
3033 return -1;
3034 }
3035 sc->sc_nodeid = val & 0xffff;
3036 sc->sc_sc1394.sc1394_node_id = sc->sc_nodeid & OHCI_NodeId_NodeNumber;
3037
3038 DPRINTF(("%s: nodeid=0x%04x(%d), rootid=%d, irmid=%d\n",
3039 sc->sc_sc1394.sc1394_dev.dv_xname, sc->sc_nodeid,
3040 sc->sc_nodeid & OHCI_NodeId_NodeNumber, sc->sc_rootid,
3041 sc->sc_irmid));
3042
3043 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) > sc->sc_rootid)
3044 return -1;
3045
3046 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) == sc->sc_rootid)
3047 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlSet,
3048 OHCI_LinkControl_CycleMaster);
3049 else
3050 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlClear,
3051 OHCI_LinkControl_CycleMaster);
3052 return 0;
3053 }
3054
3055 /*
3056 * some CSRs are handled by driver.
3057 */
3058 static void
3059 fwohci_csr_init(struct fwohci_softc *sc)
3060 {
3061 int i;
3062 static u_int32_t csr[] = {
3063 CSR_STATE_CLEAR, CSR_STATE_SET, CSR_SB_CYCLE_TIME,
3064 CSR_SB_BUS_TIME, CSR_SB_BUSY_TIMEOUT, CSR_SB_BUS_MANAGER_ID,
3065 CSR_SB_CHANNEL_AVAILABLE_HI, CSR_SB_CHANNEL_AVAILABLE_LO,
3066 CSR_SB_BROADCAST_CHANNEL
3067 };
3068
3069 for (i = 0; i < sizeof(csr) / sizeof(csr[0]); i++) {
3070 fwohci_handler_set(sc, IEEE1394_TCODE_WRITE_REQ_QUAD,
3071 CSR_BASE_HI, CSR_BASE_LO + csr[i], 0, fwohci_csr_input,
3072 NULL);
3073 fwohci_handler_set(sc, IEEE1394_TCODE_READ_REQ_QUAD,
3074 CSR_BASE_HI, CSR_BASE_LO + csr[i], 0, fwohci_csr_input,
3075 NULL);
3076 }
3077 sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] = 31; /*XXX*/
3078 }
3079
3080 static int
3081 fwohci_csr_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3082 {
3083 struct fwohci_pkt res;
3084 u_int32_t reg;
3085
3086 /*
3087 * XXX need to do special functionality other than just r/w...
3088 */
3089 reg = pkt->fp_hdr[2] - CSR_BASE_LO;
3090
3091 if ((reg & 0x03) != 0) {
3092 /* alignment error */
3093 return IEEE1394_RCODE_ADDRESS_ERROR;
3094 }
3095 DPRINTFN(1, ("fwohci_csr_input: CSR[0x%04x]: 0x%08x", reg,
3096 *(u_int32_t *)(&sc->sc_csr[reg])));
3097 if (pkt->fp_tcode == IEEE1394_TCODE_WRITE_REQ_QUAD) {
3098 DPRINTFN(1, (" -> 0x%08x\n",
3099 ntohl(*(u_int32_t *)pkt->fp_iov[0].iov_base)));
3100 *(u_int32_t *)&sc->sc_csr[reg] =
3101 ntohl(*(u_int32_t *)pkt->fp_iov[0].iov_base);
3102 } else {
3103 DPRINTFN(1, ("\n"));
3104 res.fp_hdr[3] = htonl(*(u_int32_t *)&sc->sc_csr[reg]);
3105 res.fp_iov[0].iov_base = &res.fp_hdr[3];
3106 res.fp_iov[0].iov_len = 4;
3107 res.fp_uio.uio_resid = 4;
3108 res.fp_uio.uio_iovcnt = 1;
3109 fwohci_atrs_output(sc, IEEE1394_RCODE_COMPLETE, pkt, &res);
3110 return -1;
3111 }
3112 return IEEE1394_RCODE_COMPLETE;
3113 }
3114
3115 /*
3116 * Mapping between nodeid and unique ID (EUI-64).
3117 *
3118 * Track old mappings and simply update their devices with the new id's when
3119 * they match an existing EUI. This allows proper renumeration of the bus.
3120 */
3121 static void
3122 fwohci_uid_collect(struct fwohci_softc *sc)
3123 {
3124 int i;
3125 struct fwohci_uidtbl *fu;
3126 struct ieee1394_softc *iea;
3127
3128 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node)
3129 iea->sc1394_node_id = 0xffff;
3130
3131 if (sc->sc_uidtbl != NULL)
3132 free(sc->sc_uidtbl, M_DEVBUF);
3133 sc->sc_uidtbl = malloc(sizeof(*fu) * (sc->sc_rootid + 1), M_DEVBUF,
3134 M_NOWAIT|M_ZERO); /* XXX M_WAITOK requires locks */
3135 if (sc->sc_uidtbl == NULL)
3136 return;
3137
3138 for (i = 0, fu = sc->sc_uidtbl; i <= sc->sc_rootid; i++, fu++) {
3139 if (i == (sc->sc_nodeid & OHCI_NodeId_NodeNumber)) {
3140 memcpy(fu->fu_uid, sc->sc_sc1394.sc1394_guid, 8);
3141 fu->fu_valid = 3;
3142
3143 iea = (struct ieee1394_softc *)sc->sc_sc1394.sc1394_if;
3144 if (iea) {
3145 iea->sc1394_node_id = i;
3146 DPRINTF(("%s: Updating nodeid to %d\n",
3147 iea->sc1394_dev.dv_xname,
3148 iea->sc1394_node_id));
3149 }
3150 } else {
3151 fu->fu_valid = 0;
3152 fwohci_uid_req(sc, i);
3153 }
3154 }
3155 if (sc->sc_rootid == 0)
3156 fwohci_check_nodes(sc);
3157 }
3158
3159 static void
3160 fwohci_uid_req(struct fwohci_softc *sc, int phyid)
3161 {
3162 struct fwohci_pkt pkt;
3163
3164 memset(&pkt, 0, sizeof(pkt));
3165 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
3166 pkt.fp_hlen = 12;
3167 pkt.fp_dlen = 0;
3168 pkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
3169 (pkt.fp_tcode << 4);
3170 pkt.fp_hdr[1] = ((0xffc0 | phyid) << 16) | CSR_BASE_HI;
3171 pkt.fp_hdr[2] = CSR_BASE_LO + CSR_CONFIG_ROM + 12;
3172 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD, phyid,
3173 sc->sc_tlabel, 0, fwohci_uid_input, (void *)0);
3174 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3175 fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt);
3176
3177 pkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
3178 (pkt.fp_tcode << 4);
3179 pkt.fp_hdr[2] = CSR_BASE_LO + CSR_CONFIG_ROM + 16;
3180 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD, phyid,
3181 sc->sc_tlabel, 0, fwohci_uid_input, (void *)1);
3182 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3183 fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt);
3184 }
3185
3186 static int
3187 fwohci_uid_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *res)
3188 {
3189 struct fwohci_uidtbl *fu;
3190 struct ieee1394_softc *iea;
3191 struct ieee1394_attach_args fwa;
3192 int i, n, done, rcode, found;
3193
3194 found = 0;
3195
3196 n = (res->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber;
3197 rcode = (res->fp_hdr[1] & 0x0000f000) >> 12;
3198 if (rcode != IEEE1394_RCODE_COMPLETE ||
3199 sc->sc_uidtbl == NULL ||
3200 n > sc->sc_rootid)
3201 return 0;
3202 fu = &sc->sc_uidtbl[n];
3203 if (arg == 0) {
3204 memcpy(fu->fu_uid, res->fp_iov[0].iov_base, 4);
3205 fu->fu_valid |= 0x1;
3206 } else {
3207 memcpy(fu->fu_uid + 4, res->fp_iov[0].iov_base, 4);
3208 fu->fu_valid |= 0x2;
3209 }
3210 #ifdef FW_DEBUG
3211 if (fu->fu_valid == 0x3)
3212 DPRINTFN(1, ("fwohci_uid_input: "
3213 "Node %d, UID %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", n,
3214 fu->fu_uid[0], fu->fu_uid[1], fu->fu_uid[2], fu->fu_uid[3],
3215 fu->fu_uid[4], fu->fu_uid[5], fu->fu_uid[6], fu->fu_uid[7]));
3216 #endif
3217 if (fu->fu_valid == 0x3) {
3218 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node)
3219 if (memcmp(iea->sc1394_guid, fu->fu_uid, 8) == 0) {
3220 found = 1;
3221 iea->sc1394_node_id = n;
3222 DPRINTF(("%s: Updating nodeid to %d\n",
3223 iea->sc1394_dev.dv_xname,
3224 iea->sc1394_node_id));
3225 if (iea->sc1394_callback.sc1394_reset)
3226 iea->sc1394_callback.sc1394_reset(iea,
3227 iea->sc1394_callback.sc1394_resetarg);
3228 break;
3229 }
3230 if (!found) {
3231 strcpy(fwa.name, "fwnode");
3232 memcpy(fwa.uid, fu->fu_uid, 8);
3233 fwa.nodeid = n;
3234 iea = (struct ieee1394_softc *)
3235 config_found_sm(&sc->sc_sc1394.sc1394_dev, &fwa,
3236 fwohci_print, fwohci_submatch);
3237 if (iea != NULL)
3238 LIST_INSERT_HEAD(&sc->sc_nodelist, iea,
3239 sc1394_node);
3240 }
3241 }
3242 done = 1;
3243
3244 for (i = 0; i < sc->sc_rootid + 1; i++) {
3245 fu = &sc->sc_uidtbl[i];
3246 if (fu->fu_valid != 0x3) {
3247 done = 0;
3248 break;
3249 }
3250 }
3251 if (done)
3252 fwohci_check_nodes(sc);
3253
3254 return 0;
3255 }
3256
3257 static void
3258 fwohci_check_nodes(struct fwohci_softc *sc)
3259 {
3260 struct device *detach = NULL;
3261 struct ieee1394_softc *iea;
3262
3263 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node) {
3264
3265 /*
3266 * Have to defer detachment until the next
3267 * loop iteration since config_detach
3268 * free's the softc and the loop iterator
3269 * needs data from the softc to move
3270 * forward.
3271 */
3272
3273 if (detach) {
3274 config_detach(detach, 0);
3275 detach = NULL;
3276 }
3277 if (iea->sc1394_node_id == 0xffff) {
3278 detach = (struct device *)iea;
3279 LIST_REMOVE(iea, sc1394_node);
3280 }
3281 }
3282 if (detach)
3283 config_detach(detach, 0);
3284 }
3285
3286 static int
3287 fwohci_uid_lookup(struct fwohci_softc *sc, const u_int8_t *uid)
3288 {
3289 struct fwohci_uidtbl *fu;
3290 int n;
3291 static const u_int8_t bcast[] =
3292 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3293
3294 fu = sc->sc_uidtbl;
3295 if (fu == NULL) {
3296 if (memcmp(uid, bcast, sizeof(bcast)) == 0)
3297 return IEEE1394_BCAST_PHY_ID;
3298 fwohci_uid_collect(sc); /* try to get */
3299 return -1;
3300 }
3301 for (n = 0; n <= sc->sc_rootid; n++, fu++) {
3302 if (fu->fu_valid == 0x3 && memcmp(fu->fu_uid, uid, 8) == 0)
3303 return n;
3304 }
3305 if (memcmp(uid, bcast, sizeof(bcast)) == 0)
3306 return IEEE1394_BCAST_PHY_ID;
3307 for (n = 0, fu = sc->sc_uidtbl; n <= sc->sc_rootid; n++, fu++) {
3308 if (fu->fu_valid != 0x3) {
3309 /*
3310 * XXX: need timer before retransmission
3311 */
3312 fwohci_uid_req(sc, n);
3313 }
3314 }
3315 return -1;
3316 }
3317
3318 /*
3319 * functions to support network interface
3320 */
3321 static int
3322 fwohci_if_inreg(struct device *self, u_int32_t offhi, u_int32_t offlo,
3323 void (*handler)(struct device *, struct mbuf *))
3324 {
3325 struct fwohci_softc *sc = (struct fwohci_softc *)self;
3326
3327 fwohci_handler_set(sc, IEEE1394_TCODE_WRITE_REQ_BLOCK, offhi, offlo, 0,
3328 handler ? fwohci_if_input : NULL, handler);
3329 fwohci_handler_set(sc, IEEE1394_TCODE_STREAM_DATA,
3330 (sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] & IEEE1394_ISOCH_MASK) |
3331 OHCI_ASYNC_STREAM,
3332 1 << IEEE1394_TAG_GASP, 0,
3333 handler ? fwohci_if_input : NULL, handler);
3334 return 0;
3335 }
3336
3337 static int
3338 fwohci_if_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3339 {
3340 int n, len;
3341 struct mbuf *m;
3342 struct iovec *iov;
3343 void (*handler)(struct device *, struct mbuf *) = arg;
3344
3345 #ifdef FW_DEBUG
3346 int i;
3347 DPRINTFN(1, ("fwohci_if_input: tcode=0x%x, dlen=%d", pkt->fp_tcode,
3348 pkt->fp_dlen));
3349 for (i = 0; i < pkt->fp_hlen/4; i++)
3350 DPRINTFN(2, ("%s%08x", i?" ":"\n ", pkt->fp_hdr[i]));
3351 DPRINTFN(2, ("$"));
3352 for (n = 0, len = pkt->fp_dlen; len > 0; len -= i, n++){
3353 iov = &pkt->fp_iov[n];
3354 for (i = 0; i < iov->iov_len; i++)
3355 DPRINTFN(2, ("%s%02x", (i%32)?((i%4)?"":" "):"\n ",
3356 ((u_int8_t *)iov->iov_base)[i]));
3357 DPRINTFN(2, ("$"));
3358 }
3359 DPRINTFN(1, ("\n"));
3360 #endif /* FW_DEBUG */
3361 len = pkt->fp_dlen;
3362 MGETHDR(m, M_DONTWAIT, MT_DATA);
3363 if (m == NULL)
3364 return IEEE1394_RCODE_COMPLETE;
3365 m->m_len = 16;
3366 if (len + m->m_len > MHLEN) {
3367 MCLGET(m, M_DONTWAIT);
3368 if ((m->m_flags & M_EXT) == 0) {
3369 m_freem(m);
3370 return IEEE1394_RCODE_COMPLETE;
3371 }
3372 }
3373 n = (pkt->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber;
3374 if (sc->sc_uidtbl == NULL || n > sc->sc_rootid ||
3375 sc->sc_uidtbl[n].fu_valid != 0x3) {
3376 printf("%s: packet from unknown node: phy id %d\n",
3377 sc->sc_sc1394.sc1394_dev.dv_xname, n);
3378 m_freem(m);
3379 fwohci_uid_req(sc, n);
3380 return IEEE1394_RCODE_COMPLETE;
3381 }
3382 memcpy(mtod(m, caddr_t), sc->sc_uidtbl[n].fu_uid, 8);
3383 if (pkt->fp_tcode == IEEE1394_TCODE_STREAM_DATA) {
3384 m->m_flags |= M_BCAST;
3385 mtod(m, u_int32_t *)[2] = mtod(m, u_int32_t *)[3] = 0;
3386 } else {
3387 mtod(m, u_int32_t *)[2] = htonl(pkt->fp_hdr[1]);
3388 mtod(m, u_int32_t *)[3] = htonl(pkt->fp_hdr[2]);
3389 }
3390 mtod(m, u_int8_t *)[8] = n; /*XXX: node id for debug */
3391 mtod(m, u_int8_t *)[9] =
3392 (*pkt->fp_trail >> (16 + OHCI_CTXCTL_SPD_BITPOS)) &
3393 ((1 << OHCI_CTXCTL_SPD_BITLEN) - 1);
3394
3395 m->m_pkthdr.rcvif = NULL; /* set in child */
3396 m->m_pkthdr.len = len + m->m_len;
3397 /*
3398 * We may use receive buffer by external mbuf instead of copy here.
3399 * But asynchronous receive buffer must be operate in buffer fill
3400 * mode, so that each receive buffer will shared by multiple mbufs.
3401 * If upper layer doesn't free mbuf soon, e.g. application program
3402 * is suspended, buffer must be reallocated.
3403 * Isochronous buffer must be operate in packet buffer mode, and
3404 * it is easy to map receive buffer to external mbuf. But it is
3405 * used for broadcast/multicast only, and is expected not so
3406 * performance sensitive for now.
3407 * XXX: The performance may be important for multicast case,
3408 * so we should revisit here later.
3409 * -- onoe
3410 */
3411 n = 0;
3412 iov = pkt->fp_uio.uio_iov;
3413 while (len > 0) {
3414 memcpy(mtod(m, caddr_t) + m->m_len, iov->iov_base,
3415 iov->iov_len);
3416 m->m_len += iov->iov_len;
3417 len -= iov->iov_len;
3418 iov++;
3419 }
3420 (*handler)(sc->sc_sc1394.sc1394_if, m);
3421 return IEEE1394_RCODE_COMPLETE;
3422 }
3423
3424 static int
3425 fwohci_if_input_iso(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3426 {
3427 int n, len;
3428 int chan, tag;
3429 struct mbuf *m;
3430 struct iovec *iov;
3431 void (*handler)(struct device *, struct mbuf *) = arg;
3432 #ifdef FW_DEBUG
3433 int i;
3434 #endif
3435
3436 chan = (pkt->fp_hdr[0] & 0x00003f00) >> 8;
3437 tag = (pkt->fp_hdr[0] & 0x0000c000) >> 14;
3438 #ifdef FW_DEBUG
3439 DPRINTFN(1, ("fwohci_if_input_iso: "
3440 "tcode=0x%x, chan=%d, tag=%x, dlen=%d",
3441 pkt->fp_tcode, chan, tag, pkt->fp_dlen));
3442 for (i = 0; i < pkt->fp_hlen/4; i++)
3443 DPRINTFN(2, ("%s%08x", i?" ":"\n\t", pkt->fp_hdr[i]));
3444 DPRINTFN(2, ("$"));
3445 for (n = 0, len = pkt->fp_dlen; len > 0; len -= i, n++){
3446 iov = &pkt->fp_iov[n];
3447 for (i = 0; i < iov->iov_len; i++)
3448 DPRINTFN(2, ("%s%02x",
3449 (i%32)?((i%4)?"":" "):"\n\t",
3450 ((u_int8_t *)iov->iov_base)[i]));
3451 DPRINTFN(2, ("$"));
3452 }
3453 DPRINTFN(2, ("\n"));
3454 #endif /* FW_DEBUG */
3455 len = pkt->fp_dlen;
3456 MGETHDR(m, M_DONTWAIT, MT_DATA);
3457 if (m == NULL)
3458 return IEEE1394_RCODE_COMPLETE;
3459 m->m_len = 16;
3460 if (m->m_len + len > MHLEN) {
3461 MCLGET(m, M_DONTWAIT);
3462 if ((m->m_flags & M_EXT) == 0) {
3463 m_freem(m);
3464 return IEEE1394_RCODE_COMPLETE;
3465 }
3466 }
3467
3468 m->m_flags |= M_BCAST;
3469
3470 if (tag == IEEE1394_TAG_GASP) {
3471 n = (pkt->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber;
3472 if (sc->sc_uidtbl == NULL || n > sc->sc_rootid ||
3473 sc->sc_uidtbl[n].fu_valid != 0x3) {
3474 printf("%s: packet from unknown node: phy id %d\n",
3475 sc->sc_sc1394.sc1394_dev.dv_xname, n);
3476 m_freem(m);
3477 return IEEE1394_RCODE_COMPLETE;
3478 }
3479 memcpy(mtod(m, caddr_t), sc->sc_uidtbl[n].fu_uid, 8);
3480 mtod(m, u_int32_t *)[2] = htonl(pkt->fp_hdr[1]);
3481 mtod(m, u_int32_t *)[3] = htonl(pkt->fp_hdr[2]);
3482 mtod(m, u_int8_t *)[8] = n; /*XXX: node id for debug */
3483 mtod(m, u_int8_t *)[9] =
3484 (*pkt->fp_trail >> (16 + OHCI_CTXCTL_SPD_BITPOS)) &
3485 ((1 << OHCI_CTXCTL_SPD_BITLEN) - 1);
3486 }
3487 mtod(m, u_int8_t *)[14] = chan;
3488 mtod(m, u_int8_t *)[15] = tag;
3489
3490
3491 m->m_pkthdr.rcvif = NULL; /* set in child */
3492 m->m_pkthdr.len = len + m->m_len;
3493 /*
3494 * We may use receive buffer by external mbuf instead of copy here.
3495 * But asynchronous receive buffer must be operate in buffer fill
3496 * mode, so that each receive buffer will shared by multiple mbufs.
3497 * If upper layer doesn't free mbuf soon, e.g. application program
3498 * is suspended, buffer must be reallocated.
3499 * Isochronous buffer must be operate in packet buffer mode, and
3500 * it is easy to map receive buffer to external mbuf. But it is
3501 * used for broadcast/multicast only, and is expected not so
3502 * performance sensitive for now.
3503 * XXX: The performance may be important for multicast case,
3504 * so we should revisit here later.
3505 * -- onoe
3506 */
3507 n = 0;
3508 iov = pkt->fp_uio.uio_iov;
3509 while (len > 0) {
3510 memcpy(mtod(m, caddr_t) + m->m_len, iov->iov_base,
3511 iov->iov_len);
3512 m->m_len += iov->iov_len;
3513 len -= iov->iov_len;
3514 iov++;
3515 }
3516 (*handler)(sc->sc_sc1394.sc1394_if, m);
3517 return IEEE1394_RCODE_COMPLETE;
3518 }
3519
3520
3521
3522 static int
3523 fwohci_if_output(struct device *self, struct mbuf *m0,
3524 void (*callback)(struct device *, struct mbuf *))
3525 {
3526 struct fwohci_softc *sc = (struct fwohci_softc *)self;
3527 struct fwohci_pkt pkt;
3528 u_int8_t *p;
3529 int n, error, spd, hdrlen, maxrec;
3530 #ifdef FW_DEBUG
3531 struct mbuf *m;
3532 #endif
3533
3534 p = mtod(m0, u_int8_t *);
3535 if (m0->m_flags & (M_BCAST | M_MCAST)) {
3536 spd = IEEE1394_SPD_S100; /*XXX*/
3537 maxrec = 512; /*XXX*/
3538 hdrlen = 8;
3539 } else {
3540 n = fwohci_uid_lookup(sc, p);
3541 if (n < 0) {
3542 printf("%s: nodeid unknown:"
3543 " %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
3544 sc->sc_sc1394.sc1394_dev.dv_xname,
3545 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]);
3546 error = EHOSTUNREACH;
3547 goto end;
3548 }
3549 if (n == IEEE1394_BCAST_PHY_ID) {
3550 printf("%s: broadcast with !M_MCAST\n",
3551 sc->sc_sc1394.sc1394_dev.dv_xname);
3552 #ifdef FW_DEBUG
3553 DPRINTFN(2, ("packet:"));
3554 for (m = m0; m != NULL; m = m->m_next) {
3555 for (n = 0; n < m->m_len; n++)
3556 DPRINTFN(2, ("%s%02x", (n%32)?
3557 ((n%4)?"":" "):"\n ",
3558 mtod(m, u_int8_t *)[n]));
3559 DPRINTFN(2, ("$"));
3560 }
3561 DPRINTFN(2, ("\n"));
3562 #endif
3563 error = EHOSTUNREACH;
3564 goto end;
3565 }
3566 maxrec = 2 << p[8];
3567 spd = p[9];
3568 hdrlen = 0;
3569 }
3570 if (spd > sc->sc_sc1394.sc1394_link_speed) {
3571 DPRINTF(("fwohci_if_output: spd (%d) is faster than %d\n",
3572 spd, sc->sc_sc1394.sc1394_link_speed));
3573 spd = sc->sc_sc1394.sc1394_link_speed;
3574 }
3575 if (maxrec > (512 << spd)) {
3576 DPRINTF(("fwohci_if_output: maxrec (%d) is larger for spd (%d)"
3577 "\n", maxrec, spd));
3578 maxrec = 512 << spd;
3579 }
3580 while (maxrec > sc->sc_sc1394.sc1394_max_receive) {
3581 DPRINTF(("fwohci_if_output: maxrec (%d) is larger than"
3582 " %d\n", maxrec, sc->sc_sc1394.sc1394_max_receive));
3583 maxrec >>= 1;
3584 }
3585 if (maxrec < 512) {
3586 DPRINTF(("fwohci_if_output: maxrec (%d) is smaller than "
3587 "minimum\n", maxrec));
3588 maxrec = 512;
3589 }
3590
3591 m_adj(m0, 16 - hdrlen);
3592 if (m0->m_pkthdr.len > maxrec) {
3593 DPRINTF(("fwohci_if_output: packet too big: hdr %d, pktlen "
3594 "%d, maxrec %d\n", hdrlen, m0->m_pkthdr.len, maxrec));
3595 error = E2BIG; /*XXX*/
3596 goto end;
3597 }
3598
3599 memset(&pkt, 0, sizeof(pkt));
3600 pkt.fp_uio.uio_iov = pkt.fp_iov;
3601 pkt.fp_uio.uio_segflg = UIO_SYSSPACE;
3602 pkt.fp_uio.uio_rw = UIO_WRITE;
3603 if (m0->m_flags & (M_BCAST | M_MCAST)) {
3604 /* construct GASP header */
3605 p = mtod(m0, u_int8_t *);
3606 p[0] = sc->sc_nodeid >> 8;
3607 p[1] = sc->sc_nodeid & 0xff;
3608 p[2] = 0x00; p[3] = 0x00; p[4] = 0x5e;
3609 p[5] = 0x00; p[6] = 0x00; p[7] = 0x01;
3610 pkt.fp_tcode = IEEE1394_TCODE_STREAM_DATA;
3611 pkt.fp_hlen = 8;
3612 pkt.fp_hdr[0] = (spd << 16) | (IEEE1394_TAG_GASP << 14) |
3613 ((sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] &
3614 OHCI_NodeId_NodeNumber) << 8);
3615 pkt.fp_hdr[1] = m0->m_pkthdr.len << 16;
3616 } else {
3617 pkt.fp_tcode = IEEE1394_TCODE_WRITE_REQ_BLOCK;
3618 pkt.fp_hlen = 16;
3619 pkt.fp_hdr[0] = 0x00800100 | (sc->sc_tlabel << 10) |
3620 (spd << 16);
3621 pkt.fp_hdr[1] =
3622 (((sc->sc_nodeid & OHCI_NodeId_BusNumber) | n) << 16) |
3623 (p[10] << 8) | p[11];
3624 pkt.fp_hdr[2] = (p[12]<<24) | (p[13]<<16) | (p[14]<<8) | p[15];
3625 pkt.fp_hdr[3] = m0->m_pkthdr.len << 16;
3626 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3627 }
3628 pkt.fp_hdr[0] |= (pkt.fp_tcode << 4);
3629 pkt.fp_dlen = m0->m_pkthdr.len;
3630 pkt.fp_m = m0;
3631 pkt.fp_callback = callback;
3632 error = fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt);
3633 m0 = pkt.fp_m;
3634 end:
3635 if (m0 != NULL) {
3636 if (callback)
3637 (*callback)(sc->sc_sc1394.sc1394_if, m0);
3638 else
3639 m_freem(m0);
3640 }
3641 return error;
3642 }
3643
3644 /*
3645 * High level routines to provide abstraction to attaching layers to
3646 * send/receive data.
3647 */
3648
3649 /*
3650 * These break down into 4 routines as follows:
3651 *
3652 * int fwohci_read(struct ieee1394_abuf *)
3653 *
3654 * This routine will attempt to read a region from the requested node.
3655 * A callback must be provided which will be called when either the completed
3656 * read is done or an unrecoverable error occurs. This is mainly a convenience
3657 * routine since it will encapsulate retrying a region as quadlet vs. block
3658 * reads and recombining all the returned data. This could also be done with a
3659 * series of write/inreg's for each packet sent.
3660 *
3661 * int fwohci_write(struct ieee1394_abuf *)
3662 *
3663 * The work horse main entry point for putting packets on the bus. This is the
3664 * generalized interface for fwnode/etc code to put packets out onto the bus.
3665 * It accepts all standard ieee1394 tcodes (XXX: only a few today) and
3666 * optionally will callback via a func pointer to the calling code with the
3667 * resulting ACK code from the packet. If the ACK code is to be ignored (i.e.
3668 * no cb) then the write routine will take care of free'ing the abuf since the
3669 * fwnode/etc code won't have any knowledge of when to do this. This allows for
3670 * simple one-off packets to be sent from the upper-level code without worrying
3671 * about a callback for cleanup.
3672 *
3673 * int fwohci_inreg(struct ieee1394_abuf *, int)
3674 *
3675 * This is very simple. It evals the abuf passed in and registers an internal
3676 * handler as the callback for packets received for that operation.
3677 * The integer argument specifies whether on a block read/write operation to
3678 * allow sub-regions to be read/written (in block form) as well.
3679 *
3680 * XXX: This whole structure needs to be redone as a list of regions and
3681 * operations allowed on those regions.
3682 *
3683 * int fwohci_unreg(struct ieee1394_abuf *, int)
3684 *
3685 * This simply unregisters the respective callback done via inreg for items
3686 * which only need to register an area for a one-time operation (like a status
3687 * buffer a remote node will write to when the current operation is done). The
3688 * int argument specifies the same behavior as inreg, except in reverse (i.e.
3689 * it unregisters).
3690 */
3691
3692 static int
3693 fwohci_read(struct ieee1394_abuf *ab)
3694 {
3695 struct fwohci_pkt pkt;
3696 struct ieee1394_softc *sc = ab->ab_req;
3697 struct fwohci_softc *psc =
3698 (struct fwohci_softc *)sc->sc1394_dev.dv_parent;
3699 struct fwohci_cb *fcb;
3700 u_int32_t high, lo;
3701 int rv, tcode;
3702
3703 /* Have to have a callback when reading. */
3704 if (ab->ab_cb == NULL)
3705 return -1;
3706
3707 fcb = malloc(sizeof(struct fwohci_cb), M_DEVBUF, M_WAITOK);
3708 fcb->ab = ab;
3709 fcb->count = 0;
3710 fcb->abuf_valid = 1;
3711
3712 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
3713 lo = (ab->ab_addr & 0x00000000ffffffffULL);
3714
3715 memset(&pkt, 0, sizeof(pkt));
3716 pkt.fp_hdr[1] = ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
3717 pkt.fp_hdr[2] = lo;
3718 pkt.fp_dlen = 0;
3719
3720 if (ab->ab_length == 4) {
3721 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
3722 tcode = IEEE1394_TCODE_READ_RESP_QUAD;
3723 pkt.fp_hlen = 12;
3724 } else {
3725 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_BLOCK;
3726 pkt.fp_hlen = 16;
3727 tcode = IEEE1394_TCODE_READ_RESP_BLOCK;
3728 pkt.fp_hdr[3] = (ab->ab_length << 16);
3729 }
3730 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) |
3731 (psc->sc_tlabel << 10) | (pkt.fp_tcode << 4);
3732
3733 pkt.fp_statusarg = fcb;
3734 pkt.fp_statuscb = fwohci_read_resp;
3735
3736 rv = fwohci_handler_set(psc, tcode, ab->ab_req->sc1394_node_id,
3737 psc->sc_tlabel, 0, fwohci_read_resp, fcb);
3738 if (rv)
3739 return rv;
3740 rv = fwohci_at_output(psc, psc->sc_ctx_atrq, &pkt);
3741 if (rv)
3742 fwohci_handler_set(psc, tcode, ab->ab_req->sc1394_node_id,
3743 psc->sc_tlabel, 0, NULL, NULL);
3744 psc->sc_tlabel = (psc->sc_tlabel + 1) & 0x3f;
3745 fcb->count = 1;
3746 return rv;
3747 }
3748
3749 static int
3750 fwohci_write(struct ieee1394_abuf *ab)
3751 {
3752 struct fwohci_pkt pkt;
3753 struct ieee1394_softc *sc = ab->ab_req;
3754 struct fwohci_softc *psc =
3755 (struct fwohci_softc *)sc->sc1394_dev.dv_parent;
3756 u_int32_t high, lo;
3757 int rv;
3758
3759 if (ab->ab_tcode == IEEE1394_TCODE_WRITE_REQ_BLOCK) {
3760 if (ab->ab_length > IEEE1394_MAX_REC(sc->sc1394_max_receive)) {
3761 DPRINTF(("Packet too large: %d\n", ab->ab_length));
3762 return E2BIG;
3763 }
3764 }
3765
3766 if (ab->ab_length >
3767 IEEE1394_MAX_ASYNCH_FOR_SPEED(sc->sc1394_link_speed)) {
3768 DPRINTF(("Packet too large: %d\n", ab->ab_length));
3769 return E2BIG;
3770 }
3771
3772 if (ab->ab_data && ab->ab_uio)
3773 panic("Can't call with uio and data set");
3774 if ((ab->ab_data == NULL) && (ab->ab_uio == NULL))
3775 panic("One of either ab_data or ab_uio must be set");
3776
3777 memset(&pkt, 0, sizeof(pkt));
3778
3779 pkt.fp_tcode = ab->ab_tcode;
3780 if (ab->ab_data) {
3781 pkt.fp_uio.uio_iov = pkt.fp_iov;
3782 pkt.fp_uio.uio_segflg = UIO_SYSSPACE;
3783 pkt.fp_uio.uio_rw = UIO_WRITE;
3784 } else
3785 memcpy(&pkt.fp_uio, ab->ab_uio, sizeof(struct uio));
3786
3787 pkt.fp_statusarg = ab;
3788 pkt.fp_statuscb = fwohci_write_ack;
3789
3790 switch (ab->ab_tcode) {
3791 case IEEE1394_TCODE_WRITE_RESP:
3792 pkt.fp_hlen = 12;
3793 case IEEE1394_TCODE_READ_RESP_QUAD:
3794 case IEEE1394_TCODE_READ_RESP_BLOCK:
3795 if (!pkt.fp_hlen)
3796 pkt.fp_hlen = 16;
3797 high = ab->ab_retlen;
3798 ab->ab_retlen = 0;
3799 lo = 0;
3800 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) |
3801 (ab->ab_tlabel << 10) | (pkt.fp_tcode << 4);
3802 break;
3803 default:
3804 pkt.fp_hlen = 16;
3805 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
3806 lo = (ab->ab_addr & 0x00000000ffffffffULL);
3807 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) |
3808 (psc->sc_tlabel << 10) | (pkt.fp_tcode << 4);
3809 psc->sc_tlabel = (psc->sc_tlabel + 1) & 0x3f;
3810 break;
3811 }
3812
3813 pkt.fp_hdr[1] = ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
3814 pkt.fp_hdr[2] = lo;
3815 if (pkt.fp_hlen == 16) {
3816 if (ab->ab_length == 4) {
3817 pkt.fp_hdr[3] = ab->ab_data[0];
3818 pkt.fp_dlen = 0;
3819 } else {
3820 pkt.fp_hdr[3] = (ab->ab_length << 16);
3821 pkt.fp_dlen = ab->ab_length;
3822 if (ab->ab_data) {
3823 pkt.fp_uio.uio_iovcnt = 1;
3824 pkt.fp_uio.uio_resid = ab->ab_length;
3825 pkt.fp_iov[0].iov_base = ab->ab_data;
3826 pkt.fp_iov[0].iov_len = ab->ab_length;
3827 }
3828 }
3829 }
3830 switch (ab->ab_tcode) {
3831 case IEEE1394_TCODE_WRITE_RESP:
3832 case IEEE1394_TCODE_READ_RESP_QUAD:
3833 case IEEE1394_TCODE_READ_RESP_BLOCK:
3834 rv = fwohci_at_output(psc, psc->sc_ctx_atrs, &pkt);
3835 break;
3836 default:
3837 rv = fwohci_at_output(psc, psc->sc_ctx_atrq, &pkt);
3838 break;
3839 }
3840 return rv;
3841 }
3842
3843 static int
3844 fwohci_read_resp(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3845 {
3846 struct fwohci_cb *fcb = arg;
3847 struct ieee1394_abuf *ab = fcb->ab;
3848 struct fwohci_pkt newpkt;
3849 u_int32_t *cur, high, lo;
3850 int i, tcode, rcode, status, rv;
3851
3852 /*
3853 * Both the ACK handling and normal response callbacks are handled here.
3854 * The main reason for this is the various error conditions that can
3855 * occur trying to block read some areas and the ways that gets reported
3856 * back to calling station. This is a variety of ACK codes, responses,
3857 * etc which makes it much more difficult to process if both aren't
3858 * handled here.
3859 */
3860
3861 /* Check for status packet. */
3862
3863 if (pkt->fp_tcode == -1) {
3864 status = pkt->fp_status & OHCI_DESC_STATUS_ACK_MASK;
3865 rcode = -1;
3866 tcode = (pkt->fp_hdr[0] >> 4) & 0xf;
3867 if ((status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) &&
3868 (status != OHCI_CTXCTL_EVENT_ACK_PENDING))
3869 DPRINTFN(2, ("Got status packet: 0x%02x\n",
3870 (unsigned int)status));
3871 fcb->count--;
3872
3873 /*
3874 * Got all the ack's back and the buffer is invalid (i.e. the
3875 * callback has been called. Clean up.
3876 */
3877
3878 if (fcb->abuf_valid == 0) {
3879 if (fcb->count == 0)
3880 free(fcb, M_DEVBUF);
3881 return IEEE1394_RCODE_COMPLETE;
3882 }
3883 } else {
3884 status = -1;
3885 tcode = pkt->fp_tcode;
3886 rcode = (pkt->fp_hdr[1] & 0x0000f000) >> 12;
3887 }
3888
3889 /*
3890 * Some area's (like the config rom want to be read as quadlets only.
3891 *
3892 * The current ideas to try are:
3893 *
3894 * Got an ACK_TYPE_ERROR on a block read.
3895 *
3896 * Got either RCODE_TYPE or RCODE_ADDRESS errors in a block read
3897 * response.
3898 *
3899 * In all cases construct a new packet for a quadlet read and let
3900 * mutli_resp handle the iteration over the space.
3901 */
3902
3903 if (((status == OHCI_CTXCTL_EVENT_ACK_TYPE_ERROR) &&
3904 (tcode == IEEE1394_TCODE_READ_REQ_BLOCK)) ||
3905 (((rcode == IEEE1394_RCODE_TYPE_ERROR) ||
3906 (rcode == IEEE1394_RCODE_ADDRESS_ERROR)) &&
3907 (tcode == IEEE1394_TCODE_READ_RESP_BLOCK))) {
3908
3909 /* Read the area in quadlet chunks (internally track this). */
3910
3911 memset(&newpkt, 0, sizeof(newpkt));
3912
3913 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
3914 lo = (ab->ab_addr & 0x00000000ffffffffULL);
3915
3916 newpkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
3917 newpkt.fp_hlen = 12;
3918 newpkt.fp_dlen = 0;
3919 newpkt.fp_hdr[1] =
3920 ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
3921 newpkt.fp_hdr[2] = lo;
3922 newpkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
3923 (newpkt.fp_tcode << 4);
3924
3925 rv = fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD,
3926 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0,
3927 fwohci_read_multi_resp, fcb);
3928 if (rv) {
3929 (*ab->ab_cb)(ab, -1);
3930 goto cleanup;
3931 }
3932 newpkt.fp_statusarg = fcb;
3933 newpkt.fp_statuscb = fwohci_read_resp;
3934 rv = fwohci_at_output(sc, sc->sc_ctx_atrq, &newpkt);
3935 if (rv) {
3936 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD,
3937 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0, NULL,
3938 NULL);
3939 (*ab->ab_cb)(ab, -1);
3940 goto cleanup;
3941 }
3942 fcb->count++;
3943 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3944 return IEEE1394_RCODE_COMPLETE;
3945 } else if ((rcode != -1) || ((status != -1) &&
3946 (status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) &&
3947 (status != OHCI_CTXCTL_EVENT_ACK_PENDING))) {
3948
3949 /*
3950 * Recombine all the iov data into 1 chunk for higher
3951 * level code.
3952 */
3953
3954 if (rcode != -1) {
3955 cur = ab->ab_data;
3956 for (i = 0; i < pkt->fp_uio.uio_iovcnt; i++) {
3957 /*
3958 * Make sure and don't exceed the buffer
3959 * allocated for return.
3960 */
3961 if ((ab->ab_retlen + pkt->fp_iov[i].iov_len) >
3962 ab->ab_length) {
3963 memcpy(cur, pkt->fp_iov[i].iov_base,
3964 (ab->ab_length - ab->ab_retlen));
3965 ab->ab_retlen = ab->ab_length;
3966 break;
3967 }
3968 memcpy(cur, pkt->fp_iov[i].iov_base,
3969 pkt->fp_iov[i].iov_len);
3970 cur += pkt->fp_iov[i].iov_len;
3971 ab->ab_retlen += pkt->fp_iov[i].iov_len;
3972 }
3973 }
3974 if (status != -1)
3975 /* XXX: Need a complete tlabel interface. */
3976 for (i = 0; i < 64; i++)
3977 fwohci_handler_set(sc,
3978 IEEE1394_TCODE_READ_RESP_QUAD,
3979 ab->ab_req->sc1394_node_id, i, 0, NULL,
3980 NULL);
3981 (*ab->ab_cb)(ab, rcode);
3982 goto cleanup;
3983 } else
3984 /* Good ack packet. */
3985 return IEEE1394_RCODE_COMPLETE;
3986
3987 /* Can't get here unless ab->ab_cb has been called. */
3988
3989 cleanup:
3990 fcb->abuf_valid = 0;
3991 if (fcb->count == 0)
3992 free(fcb, M_DEVBUF);
3993 return IEEE1394_RCODE_COMPLETE;
3994 }
3995
3996 static int
3997 fwohci_read_multi_resp(struct fwohci_softc *sc, void *arg,
3998 struct fwohci_pkt *pkt)
3999 {
4000 struct fwohci_cb *fcb = arg;
4001 struct ieee1394_abuf *ab = fcb->ab;
4002 struct fwohci_pkt newpkt;
4003 u_int32_t high, lo;
4004 int rcode, rv;
4005
4006 /*
4007 * Bad return codes from the wire, just return what's already in the
4008 * buf.
4009 */
4010
4011 /* Make sure a response packet didn't arrive after a bad ACK. */
4012 if (fcb->abuf_valid == 0)
4013 return IEEE1394_RCODE_COMPLETE;
4014
4015 rcode = (pkt->fp_hdr[1] & 0x0000f000) >> 12;
4016
4017 if (rcode) {
4018 (*ab->ab_cb)(ab, rcode);
4019 goto cleanup;
4020 }
4021
4022 if ((ab->ab_retlen + pkt->fp_iov[0].iov_len) > ab->ab_length) {
4023 memcpy(((char *)ab->ab_data + ab->ab_retlen),
4024 pkt->fp_iov[0].iov_base, (ab->ab_length - ab->ab_retlen));
4025 ab->ab_retlen = ab->ab_length;
4026 } else {
4027 memcpy(((char *)ab->ab_data + ab->ab_retlen),
4028 pkt->fp_iov[0].iov_base, 4);
4029 ab->ab_retlen += 4;
4030 }
4031 /* Still more, loop and read 4 more bytes. */
4032 if (ab->ab_retlen < ab->ab_length) {
4033 memset(&newpkt, 0, sizeof(newpkt));
4034
4035 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
4036 lo = (ab->ab_addr & 0x00000000ffffffffULL) + ab->ab_retlen;
4037
4038 newpkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
4039 newpkt.fp_hlen = 12;
4040 newpkt.fp_dlen = 0;
4041 newpkt.fp_hdr[1] =
4042 ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
4043 newpkt.fp_hdr[2] = lo;
4044 newpkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
4045 (newpkt.fp_tcode << 4);
4046
4047 newpkt.fp_statusarg = fcb;
4048 newpkt.fp_statuscb = fwohci_read_resp;
4049
4050 /*
4051 * Bad return code. Just give up and return what's
4052 * come in now.
4053 */
4054 rv = fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD,
4055 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0,
4056 fwohci_read_multi_resp, fcb);
4057 if (rv)
4058 (*ab->ab_cb)(ab, -1);
4059 else {
4060 rv = fwohci_at_output(sc, sc->sc_ctx_atrq, &newpkt);
4061 if (rv) {
4062 fwohci_handler_set(sc,
4063 IEEE1394_TCODE_READ_RESP_QUAD,
4064 ab->ab_req->sc1394_node_id, sc->sc_tlabel,
4065 0, NULL, NULL);
4066 (*ab->ab_cb)(ab, -1);
4067 } else {
4068 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
4069 fcb->count++;
4070 return IEEE1394_RCODE_COMPLETE;
4071 }
4072 }
4073 } else
4074 (*ab->ab_cb)(ab, IEEE1394_RCODE_COMPLETE);
4075
4076 cleanup:
4077 /* Can't get here unless ab_cb has been called. */
4078 fcb->abuf_valid = 0;
4079 if (fcb->count == 0)
4080 free(fcb, M_DEVBUF);
4081 return IEEE1394_RCODE_COMPLETE;
4082 }
4083
4084 static int
4085 fwohci_write_ack(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
4086 {
4087 struct ieee1394_abuf *ab = arg;
4088 u_int16_t status;
4089
4090
4091 status = pkt->fp_status & OHCI_DESC_STATUS_ACK_MASK;
4092 if ((status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) &&
4093 (status != OHCI_CTXCTL_EVENT_ACK_PENDING))
4094 DPRINTF(("Got status packet: 0x%02x\n",
4095 (unsigned int)status));
4096
4097 /* No callback means this level should free the buffers. */
4098 if (ab->ab_cb)
4099 (*ab->ab_cb)(ab, status);
4100 else {
4101 if (ab->ab_data)
4102 free(ab->ab_data, M_1394DATA);
4103 free(ab, M_1394DATA);
4104 }
4105 return IEEE1394_RCODE_COMPLETE;
4106 }
4107
4108 static int
4109 fwohci_inreg(struct ieee1394_abuf *ab, int allow)
4110 {
4111 struct ieee1394_softc *sc = ab->ab_req;
4112 struct fwohci_softc *psc =
4113 (struct fwohci_softc *)sc->sc1394_dev.dv_parent;
4114 u_int32_t high, lo;
4115 int rv;
4116
4117 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
4118 lo = (ab->ab_addr & 0x00000000ffffffffULL);
4119
4120 rv = 0;
4121 switch (ab->ab_tcode) {
4122 case IEEE1394_TCODE_READ_REQ_QUAD:
4123 case IEEE1394_TCODE_WRITE_REQ_QUAD:
4124 if (ab->ab_cb)
4125 rv = fwohci_handler_set(psc, ab->ab_tcode, high, lo, 0,
4126 fwohci_parse_input, ab);
4127 else
4128 fwohci_handler_set(psc, ab->ab_tcode, high, lo, 0, NULL,
4129 NULL);
4130 break;
4131 case IEEE1394_TCODE_READ_REQ_BLOCK:
4132 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
4133 if (allow) {
4134 if (ab->ab_cb) {
4135 rv = fwohci_handler_set(psc, ab->ab_tcode,
4136 high, lo, ab->ab_length,
4137 fwohci_parse_input, ab);
4138 if (rv)
4139 fwohci_handler_set(psc, ab->ab_tcode,
4140 high, lo, ab->ab_length, NULL,
4141 NULL);
4142 ab->ab_subok = 1;
4143 } else
4144 fwohci_handler_set(psc, ab->ab_tcode, high, lo,
4145 ab->ab_length, NULL, NULL);
4146 } else {
4147 if (ab->ab_cb)
4148 rv = fwohci_handler_set(psc, ab->ab_tcode, high,
4149 lo, 0, fwohci_parse_input, ab);
4150 else
4151 fwohci_handler_set(psc, ab->ab_tcode, high, lo,
4152 0, NULL, NULL);
4153 }
4154 break;
4155 default:
4156 DPRINTF(("Invalid registration tcode: %d\n", ab->ab_tcode));
4157 return -1;
4158 break;
4159 }
4160 return rv;
4161 }
4162
4163 static int
4164 fwohci_unreg(struct ieee1394_abuf *ab, int allow)
4165 {
4166 void *save;
4167 int rv;
4168
4169 save = ab->ab_cb;
4170 ab->ab_cb = NULL;
4171 rv = fwohci_inreg(ab, allow);
4172 ab->ab_cb = save;
4173 return rv;
4174 }
4175
4176 static int
4177 fwohci_parse_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
4178 {
4179 struct ieee1394_abuf *ab = (struct ieee1394_abuf *)arg;
4180 u_int64_t addr;
4181 u_int8_t *cur;
4182 int i, count, ret;
4183
4184 ab->ab_tcode = (pkt->fp_hdr[0] >> 4) & 0xf;
4185 ab->ab_tlabel = (pkt->fp_hdr[0] >> 10) & 0x3f;
4186 addr = (((u_int64_t)(pkt->fp_hdr[1] & 0xffff) << 32) | pkt->fp_hdr[2]);
4187
4188 /* Make sure it's always 0 in case this gets reused multiple times. */
4189 ab->ab_retlen = 0;
4190
4191 switch (ab->ab_tcode) {
4192 case IEEE1394_TCODE_READ_REQ_QUAD:
4193 ab->ab_retlen = 4;
4194 /* Response's (if required) will come from callback code */
4195 ret = -1;
4196 break;
4197 case IEEE1394_TCODE_READ_REQ_BLOCK:
4198 ab->ab_retlen = (pkt->fp_hdr[3] >> 16) & 0xffff;
4199 if (ab->ab_subok) {
4200 if ((addr + ab->ab_retlen) >
4201 (ab->ab_addr + ab->ab_length))
4202 return IEEE1394_RCODE_ADDRESS_ERROR;
4203 } else
4204 if (ab->ab_retlen != ab->ab_length)
4205 return IEEE1394_RCODE_ADDRESS_ERROR;
4206 /* Response's (if required) will come from callback code */
4207 ret = -1;
4208 break;
4209 case IEEE1394_TCODE_WRITE_REQ_QUAD:
4210 ab->ab_retlen = 4;
4211 /* Fall through. */
4212
4213 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
4214 if (!ab->ab_retlen)
4215 ab->ab_retlen = (pkt->fp_hdr[3] >> 16) & 0xffff;
4216 if (ab->ab_subok) {
4217 if ((addr + ab->ab_retlen) >
4218 (ab->ab_addr + ab->ab_length))
4219 return IEEE1394_RCODE_ADDRESS_ERROR;
4220 } else
4221 if (ab->ab_retlen > ab->ab_length)
4222 return IEEE1394_RCODE_ADDRESS_ERROR;
4223
4224 if (ab->ab_tcode == IEEE1394_TCODE_WRITE_REQ_QUAD)
4225 ab->ab_data[0] = pkt->fp_hdr[3];
4226 else {
4227 count = 0;
4228 cur = (u_int8_t *)ab->ab_data + (addr - ab->ab_addr);
4229 for (i = 0; i < pkt->fp_uio.uio_iovcnt; i++) {
4230 memcpy(cur, pkt->fp_iov[i].iov_base,
4231 pkt->fp_iov[i].iov_len);
4232 cur += pkt->fp_iov[i].iov_len;
4233 count += pkt->fp_iov[i].iov_len;
4234 }
4235 if (ab->ab_retlen != count)
4236 panic("Packet claims %d length "
4237 "but only %d bytes returned\n",
4238 ab->ab_retlen, count);
4239 }
4240 ret = IEEE1394_RCODE_COMPLETE;
4241 break;
4242 default:
4243 panic("Got a callback for a tcode that wasn't requested: %d",
4244 ab->ab_tcode);
4245 break;
4246 }
4247 if (ab->ab_cb) {
4248 ab->ab_retaddr = addr;
4249 ab->ab_cb(ab, IEEE1394_RCODE_COMPLETE);
4250 }
4251 return ret;
4252 }
4253
4254 static int
4255 fwohci_submatch(struct device *parent, struct cfdata *cf, void *aux)
4256 {
4257 struct ieee1394_attach_args *fwa = aux;
4258
4259 /* Both halves must be filled in for a match. */
4260 if ((cf->fwbuscf_idhi == FWBUS_UNK_IDHI &&
4261 cf->fwbuscf_idlo == FWBUS_UNK_IDLO) ||
4262 (cf->fwbuscf_idhi == ntohl(*((u_int32_t *)&fwa->uid[0])) &&
4263 cf->fwbuscf_idlo == ntohl(*((u_int32_t *)&fwa->uid[4]))))
4264 return (config_match(parent, cf, aux));
4265 return 0;
4266 }
4267
4268 int
4269 fwohci_detach(struct fwohci_softc *sc, int flags)
4270 {
4271 int rv = 0;
4272
4273 if (sc->sc_sc1394.sc1394_if != NULL)
4274 rv = config_detach(sc->sc_sc1394.sc1394_if, flags);
4275 if (rv != 0)
4276 return (rv);
4277
4278 callout_stop(&sc->sc_selfid_callout);
4279
4280 if (sc->sc_powerhook != NULL)
4281 powerhook_disestablish(sc->sc_powerhook);
4282 if (sc->sc_shutdownhook != NULL)
4283 shutdownhook_disestablish(sc->sc_shutdownhook);
4284
4285 return (rv);
4286 }
4287
4288 int
4289 fwohci_activate(struct device *self, enum devact act)
4290 {
4291 struct fwohci_softc *sc = (struct fwohci_softc *)self;
4292 int s, rv = 0;
4293
4294 s = splhigh();
4295 switch (act) {
4296 case DVACT_ACTIVATE:
4297 rv = EOPNOTSUPP;
4298 break;
4299
4300 case DVACT_DEACTIVATE:
4301 if (sc->sc_sc1394.sc1394_if != NULL)
4302 rv = config_deactivate(sc->sc_sc1394.sc1394_if);
4303 break;
4304 }
4305 splx(s);
4306
4307 return (rv);
4308 }
4309
4310 #ifdef FW_DEBUG
4311 static void
4312 fwohci_show_intr(struct fwohci_softc *sc, u_int32_t intmask)
4313 {
4314
4315 printf("%s: intmask=0x%08x:", sc->sc_sc1394.sc1394_dev.dv_xname,
4316 intmask);
4317 if (intmask & OHCI_Int_CycleTooLong)
4318 printf(" CycleTooLong");
4319 if (intmask & OHCI_Int_UnrecoverableError)
4320 printf(" UnrecoverableError");
4321 if (intmask & OHCI_Int_CycleInconsistent)
4322 printf(" CycleInconsistent");
4323 if (intmask & OHCI_Int_BusReset)
4324 printf(" BusReset");
4325 if (intmask & OHCI_Int_SelfIDComplete)
4326 printf(" SelfIDComplete");
4327 if (intmask & OHCI_Int_LockRespErr)
4328 printf(" LockRespErr");
4329 if (intmask & OHCI_Int_PostedWriteErr)
4330 printf(" PostedWriteErr");
4331 if (intmask & OHCI_Int_ReqTxComplete)
4332 printf(" ReqTxComplete(0x%04x)",
4333 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_REQUEST,
4334 OHCI_SUBREG_ContextControlClear));
4335 if (intmask & OHCI_Int_RespTxComplete)
4336 printf(" RespTxComplete(0x%04x)",
4337 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_RESPONSE,
4338 OHCI_SUBREG_ContextControlClear));
4339 if (intmask & OHCI_Int_ARRS)
4340 printf(" ARRS(0x%04x)",
4341 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
4342 OHCI_SUBREG_ContextControlClear));
4343 if (intmask & OHCI_Int_ARRQ)
4344 printf(" ARRQ(0x%04x)",
4345 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_REQUEST,
4346 OHCI_SUBREG_ContextControlClear));
4347 if (intmask & OHCI_Int_IsochRx)
4348 printf(" IsochRx(0x%08x)",
4349 OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntEventClear));
4350 if (intmask & OHCI_Int_IsochTx)
4351 printf(" IsochTx(0x%08x)",
4352 OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntEventClear));
4353 if (intmask & OHCI_Int_RQPkt)
4354 printf(" RQPkt(0x%04x)",
4355 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_REQUEST,
4356 OHCI_SUBREG_ContextControlClear));
4357 if (intmask & OHCI_Int_RSPkt)
4358 printf(" RSPkt(0x%04x)",
4359 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
4360 OHCI_SUBREG_ContextControlClear));
4361 printf("\n");
4362 }
4363
4364 static void
4365 fwohci_show_phypkt(struct fwohci_softc *sc, u_int32_t val)
4366 {
4367 u_int8_t key, phyid;
4368
4369 key = (val & 0xc0000000) >> 30;
4370 phyid = (val & 0x3f000000) >> 24;
4371 printf("%s: PHY packet from %d: ",
4372 sc->sc_sc1394.sc1394_dev.dv_xname, phyid);
4373 switch (key) {
4374 case 0:
4375 printf("PHY Config:");
4376 if (val & 0x00800000)
4377 printf(" ForceRoot");
4378 if (val & 0x00400000)
4379 printf(" Gap=%x", (val & 0x003f0000) >> 16);
4380 printf("\n");
4381 break;
4382 case 1:
4383 printf("Link-on\n");
4384 break;
4385 case 2:
4386 printf("SelfID:");
4387 if (val & 0x00800000) {
4388 printf(" #%d", (val & 0x00700000) >> 20);
4389 } else {
4390 if (val & 0x00400000)
4391 printf(" LinkActive");
4392 printf(" Gap=%x", (val & 0x003f0000) >> 16);
4393 printf(" Spd=S%d", 100 << ((val & 0x0000c000) >> 14));
4394 if (val & 0x00000800)
4395 printf(" Cont");
4396 if (val & 0x00000002)
4397 printf(" InitiateBusReset");
4398 }
4399 if (val & 0x00000001)
4400 printf(" +");
4401 printf("\n");
4402 break;
4403 default:
4404 printf("unknown: 0x%08x\n", val);
4405 break;
4406 }
4407 }
4408 #endif /* FW_DEBUG */
4409
4410 #if 0
4411 void fwohci_dumpreg(struct ieee1394_softc *, struct fwiso_regdump *);
4412
4413 void
4414 fwohci_dumpreg(struct ieee1394_softc *isc, struct fwiso_regdump *fr)
4415 {
4416 struct fwohci_softc *sc = (struct fwohci_softc *)isc;
4417 #if 0
4418 u_int32_t val;
4419
4420 printf("%s: dump reg\n", isc->sc1394_dev.dv_xname);
4421 printf("\tNodeID reg 0x%08x\n",
4422 OHCI_CSR_READ(sc, OHCI_REG_NodeId));
4423 val = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
4424 printf("\tIsoCounter 0x%08x, %d %d %d", val,
4425 (val >> 25) & 0xfe, (val >> 12) & 0x1fff, val & 0xfff);
4426 val = OHCI_CSR_READ(sc, OHCI_REG_IntMaskSet);
4427 printf(" IntMask 0x%08x, %s\n", val,
4428 val & OHCI_Int_IsochTx ? "isoTx" : "");
4429
4430 val = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_ContextControlSet);
4431 printf("\tIT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
4432 OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_CommandPtr),
4433 val,
4434 val & OHCI_CTXCTL_RUN ? " run" : "",
4435 val & OHCI_CTXCTL_WAKE ? " wake" : "",
4436 val & OHCI_CTXCTL_DEAD ? " dead" : "",
4437 val & OHCI_CTXCTL_ACTIVE ? " active" : "");
4438 #endif
4439
4440 fr->fr_nodeid = OHCI_CSR_READ(sc, OHCI_REG_NodeId);
4441 fr->fr_isocounter = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
4442 fr->fr_intmask = OHCI_CSR_READ(sc, OHCI_REG_IntMaskSet);
4443 fr->fr_it0_commandptr = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_CommandPtr);
4444 fr->fr_it0_contextctrl = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_ContextControlSet);
4445
4446
4447 }
4448 #endif
4449
4450
4451 u_int16_t
4452 fwohci_cycletimer(struct fwohci_softc *sc)
4453 {
4454 u_int32_t reg;
4455
4456 reg = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
4457
4458 return (reg >> 12)&0xffff;
4459 }
4460
4461
4462 u_int16_t
4463 fwohci_it_cycletimer(ieee1394_it_tag_t it)
4464 {
4465 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
4466
4467 return fwohci_cycletimer(itc->itc_sc);
4468 }
4469
4470
4471
4472
4473
4474 /*
4475 * return value: if positive value, number of DMA buffer segments. If
4476 * negative value, error happens. Never zero.
4477 */
4478 static int
4479 fwohci_misc_dmabuf_alloc(bus_dma_tag_t dmat, int dsize, int segno,
4480 bus_dma_segment_t *segp, bus_dmamap_t *dmapp, void **mapp,
4481 const char *xname)
4482 {
4483 int nsegs;
4484 int error;
4485
4486 printf("fwohci_misc_desc_alloc: dsize %d segno %d\n", dsize, segno);
4487
4488 if ((error = bus_dmamem_alloc(dmat, dsize, PAGE_SIZE, 0,
4489 segp, segno, &nsegs, 0)) != 0) {
4490 printf("%s: unable to allocate descriptor buffer, error = %d\n",
4491 xname, error);
4492 goto fail_0;
4493 }
4494
4495 DPRINTF(("fwohci_misc_desc_alloc: %d segment[s]\n", nsegs));
4496
4497 if ((error = bus_dmamem_map(dmat, segp, nsegs, dsize, (caddr_t *)mapp,
4498 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) {
4499 printf("%s: unable to map descriptor buffer, error = %d\n",
4500 xname, error);
4501 goto fail_1;
4502 }
4503
4504 DPRINTF(("fwohci_misc_desc_alloc: %s map ok\n", xname));
4505
4506 #ifdef FWOHCI_DEBUG
4507 {
4508 int loop;
4509
4510 for (loop = 0; loop < nsegs; ++loop) {
4511 printf("\t%.2d: 0x%lx - 0x%lx\n", loop,
4512 (long)segp[loop].ds_addr,
4513 (long)segp[loop].ds_addr + segp[loop].ds_len - 1);
4514 }
4515 }
4516 #endif /* FWOHCI_DEBUG */
4517
4518 if ((error = bus_dmamap_create(dmat, dsize, nsegs, dsize,
4519 0, BUS_DMA_WAITOK, dmapp)) != 0) {
4520 printf("%s: unable to create descriptor buffer DMA map, "
4521 "error = %d\n", xname, error);
4522 goto fail_2;
4523 }
4524
4525 DPRINTF(("fwohci_misc_dmabuf_alloc: bus_dmamem_create success\n"));
4526
4527 if ((error = bus_dmamap_load(dmat, *dmapp, *mapp, dsize, NULL,
4528 BUS_DMA_WAITOK)) != 0) {
4529 printf("%s: unable to load descriptor buffer DMA map, "
4530 "error = %d\n", xname, error);
4531 goto fail_3;
4532 }
4533
4534 DPRINTF(("fwohci_it_desc_alloc: bus_dmamem_load success\n"));
4535
4536 return nsegs;
4537
4538 fail_3:
4539 bus_dmamap_destroy(dmat, *dmapp);
4540 fail_2:
4541 bus_dmamem_unmap(dmat, *mapp, dsize);
4542 fail_1:
4543 bus_dmamem_free(dmat, segp, nsegs);
4544 fail_0:
4545 return error;
4546 }
4547
4548
4549 static void
4550 fwohci_misc_dmabuf_free(bus_dma_tag_t dmat, int dsize, int nsegs,
4551 bus_dma_segment_t *segp, bus_dmamap_t *dmapp, caddr_t map)
4552 {
4553 bus_dmamap_destroy(dmat, *dmapp);
4554 bus_dmamem_unmap(dmat, map, dsize);
4555 bus_dmamem_free(dmat, segp, nsegs);
4556 }
4557
4558
4559
4560
4561 /*
4562 * Isochronous receive service
4563 */
4564
4565 /*
4566 * static struct fwohci_ir_ctx *
4567 * fwohci_ir_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tagbm,
4568 * int bufnum, int maxsize, int flags)
4569 */
4570 static struct fwohci_ir_ctx *
4571 fwohci_ir_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tagbm,
4572 int bufnum, int maxsize, int flags)
4573 {
4574 struct fwohci_ir_ctx *irc;
4575 int i;
4576
4577 printf("fwohci_ir_construct(%s, %d, %d, %x, %d, %d\n",
4578 sc->sc_sc1394.sc1394_dev.dv_xname, no, ch, tagbm, bufnum, maxsize);
4579
4580 if ((irc = malloc(sizeof(*irc), M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) {
4581 return NULL;
4582 }
4583
4584 irc->irc_sc = sc;
4585
4586 irc->irc_num = no;
4587 irc->irc_status = 0;
4588
4589 irc->irc_channel = ch;
4590 irc->irc_tagbm = tagbm;
4591
4592 irc->irc_desc_num = bufnum;
4593
4594 irc->irc_flags = flags;
4595
4596 /* add header */
4597 maxsize += 8;
4598 /* rounding up */
4599 for (i = 32; i < maxsize; i <<= 1);
4600 printf("fwohci_ir_ctx_construct: maxsize %d => %d\n",
4601 maxsize, i);
4602
4603 maxsize = i;
4604
4605 irc->irc_maxsize = maxsize;
4606 irc->irc_buf_totalsize = bufnum * maxsize;
4607
4608 if (fwohci_ir_buf_setup(irc)) {
4609 /* cannot alloc descriptor */
4610 return NULL;
4611 }
4612
4613 irc->irc_readtop = irc->irc_desc_map;
4614 irc->irc_writeend = irc->irc_desc_map + irc->irc_desc_num - 1;
4615 irc->irc_savedbranch = irc->irc_writeend->fd_branch;
4616 irc->irc_writeend->fd_branch = 0;
4617 /* sync */
4618
4619 if (fwohci_ir_stop(irc) || fwohci_ir_init(irc)) {
4620 return NULL;
4621 }
4622
4623 irc->irc_status |= IRC_STATUS_READY;
4624
4625 return irc;
4626 }
4627
4628
4629
4630 /*
4631 * static void fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *irc)
4632 *
4633 * This function release all DMA buffers and itself.
4634 */
4635 static void
4636 fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *irc)
4637 {
4638 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat, irc->irc_buf_totalsize,
4639 irc->irc_buf_nsegs, irc->irc_buf_segs,
4640 &irc->irc_buf_dmamap, (caddr_t)irc->irc_buf);
4641 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4642 irc->irc_desc_size,
4643 irc->irc_desc_nsegs, &irc->irc_desc_seg,
4644 &irc->irc_desc_dmamap, (caddr_t)irc->irc_desc_map);
4645
4646 free(irc, M_DEVBUF);
4647 }
4648
4649
4650
4651
4652 /*
4653 * static int fwohci_ir_buf_setup(struct fwohci_ir_ctx *irc)
4654 *
4655 * Allocates descriptors for context DMA dedicated for
4656 * isochronous receive.
4657 *
4658 * This function returns 0 (zero) if it succeeds. Otherwise,
4659 * return negative value.
4660 */
4661 static int
4662 fwohci_ir_buf_setup(struct fwohci_ir_ctx *irc)
4663 {
4664 int nsegs;
4665 struct fwohci_desc *fd;
4666 u_int32_t branch;
4667 int bufno = 0; /* DMA segment */
4668 bus_size_t bufused = 0; /* offset in a DMA segment */
4669
4670 irc->irc_desc_size = irc->irc_desc_num * sizeof(struct fwohci_desc);
4671
4672 nsegs = fwohci_misc_dmabuf_alloc(irc->irc_sc->sc_dmat,
4673 irc->irc_desc_size, 1, &irc->irc_desc_seg, &irc->irc_desc_dmamap,
4674 (void **)&irc->irc_desc_map,
4675 irc->irc_sc->sc_sc1394.sc1394_dev.dv_xname);
4676
4677 if (nsegs < 0) {
4678 printf("fwohci_ir_buf_alloc: cannot get descriptor\n");
4679 return -1;
4680 }
4681 irc->irc_desc_nsegs = nsegs;
4682
4683 nsegs = fwohci_misc_dmabuf_alloc(irc->irc_sc->sc_dmat,
4684 irc->irc_buf_totalsize, 16, irc->irc_buf_segs,
4685 &irc->irc_buf_dmamap, (void **)&irc->irc_buf,
4686 irc->irc_sc->sc_sc1394.sc1394_dev.dv_xname);
4687
4688 if (nsegs < 0) {
4689 printf("fwohci_ir_buf_alloc: cannot get DMA buffer\n");
4690 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4691 irc->irc_desc_size,
4692 irc->irc_desc_nsegs, &irc->irc_desc_seg,
4693 &irc->irc_desc_dmamap, (caddr_t)irc->irc_desc_map);
4694 return -1;
4695 }
4696 irc->irc_buf_nsegs = nsegs;
4697
4698 branch = irc->irc_desc_dmamap->dm_segs[0].ds_addr
4699 + sizeof(struct fwohci_desc);
4700 bufno = 0;
4701 bufused = 0;
4702
4703 for (fd = irc->irc_desc_map;
4704 fd < irc->irc_desc_map + irc->irc_desc_num; ++fd) {
4705 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_LAST
4706 | OHCI_DESC_STATUS | OHCI_DESC_BRANCH;
4707 if (irc->irc_flags & IEEE1394_IR_SHORTDELAY) {
4708 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
4709 }
4710 #if 0
4711 if ((fd - irc->irc_desc_map) % 64 == 0) {
4712 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
4713 }
4714 #endif
4715 fd->fd_reqcount = irc->irc_maxsize;
4716 fd->fd_status = fd->fd_rescount = 0;
4717
4718 fd->fd_branch = branch | 0x01;
4719 branch += sizeof(struct fwohci_desc);
4720
4721 /* physical addr to data? */
4722 fd->fd_data =
4723 (u_int32_t)((irc->irc_buf_segs[bufno].ds_addr + bufused));
4724 bufused += irc->irc_maxsize;
4725 if (bufused > irc->irc_buf_segs[bufno].ds_len) {
4726 bufused = 0;
4727 if (++bufno == irc->irc_buf_nsegs) {
4728 /* fail */
4729 printf("fwohci_ir_buf_setup fail\n");
4730
4731 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4732 irc->irc_desc_size,
4733 irc->irc_desc_nsegs, &irc->irc_desc_seg,
4734 &irc->irc_desc_dmamap,
4735 (caddr_t)irc->irc_desc_map);
4736 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4737 irc->irc_buf_totalsize,
4738 irc->irc_buf_nsegs, irc->irc_buf_segs,
4739 &irc->irc_buf_dmamap,
4740 (caddr_t)irc->irc_buf);
4741 return -1;
4742 }
4743 }
4744
4745 #ifdef FWOHCI_DEBUG
4746 if (fd < irc->irc_desc_map + 4
4747 || (fd > irc->irc_desc_map + irc->irc_desc_num - 4)) {
4748 printf("fwohci_ir_buf_setup: desc %d %p buf %08x"
4749 " size %d branch %08x\n",
4750 fd - irc->irc_desc_map, fd, fd->fd_data,
4751 fd->fd_reqcount, fd->fd_branch);
4752 }
4753 #endif /* FWOHCI_DEBUG */
4754 }
4755
4756 --fd;
4757 fd->fd_branch = irc->irc_desc_dmamap->dm_segs[0].ds_addr | 1;
4758 DPRINTF(("fwohci_ir_buf_setup: desc %d %p buf %08x size %d branch %08x\n",
4759 fd - irc->irc_desc_map, fd, fd->fd_data, fd->fd_reqcount,
4760 fd->fd_branch));
4761
4762 return 0;
4763 }
4764
4765
4766
4767 /*
4768 * static void fwohci_ir_init(struct fwohci_ir_ctx *irc)
4769 *
4770 * This function initialise DMA engine.
4771 */
4772 static int
4773 fwohci_ir_init(struct fwohci_ir_ctx *irc)
4774 {
4775 struct fwohci_softc *sc = irc->irc_sc;
4776 int n = irc->irc_num;
4777 u_int32_t ctxmatch;
4778
4779 ctxmatch = irc->irc_channel & IEEE1394_ISO_CHANNEL_MASK;
4780
4781 if (irc->irc_channel & IEEE1394_ISO_CHANNEL_ANY) {
4782 OHCI_SYNC_RX_DMA_WRITE(sc, n,
4783 OHCI_SUBREG_ContextControlSet,
4784 OHCI_CTXCTL_RX_MULTI_CHAN_MODE);
4785
4786 /* Receive all the isochronous channels */
4787 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiSet, 0xffffffff);
4788 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoSet, 0xffffffff);
4789 ctxmatch = 0;
4790 }
4791
4792 ctxmatch |= ((irc->irc_tagbm & 0x0f) << OHCI_CTXMATCH_TAG_BITPOS);
4793 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextMatch, ctxmatch);
4794
4795 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlClear,
4796 OHCI_CTXCTL_RX_BUFFER_FILL | OHCI_CTXCTL_RX_CYCLE_MATCH_ENABLE);
4797 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlSet,
4798 OHCI_CTXCTL_RX_ISOCH_HEADER);
4799
4800 printf("fwohci_ir_init\n");
4801
4802 return 0;
4803 }
4804
4805
4806 /*
4807 * static int fwohci_ir_start(struct fwohci_ir_ctx *irc)
4808 *
4809 * This function starts DMA engine. This function must call
4810 * after fwohci_ir_init() and active bit of context control
4811 * register negated. This function will not check it.
4812 */
4813 static int
4814 fwohci_ir_start(struct fwohci_ir_ctx *irc)
4815 {
4816 struct fwohci_softc *sc = irc->irc_sc;
4817 int startidx = irc->irc_readtop - irc->irc_desc_map;
4818 u_int32_t startaddr;
4819
4820 startaddr = irc->irc_desc_dmamap->dm_segs[0].ds_addr
4821 + sizeof(struct fwohci_desc)*startidx;
4822
4823 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num, OHCI_SUBREG_CommandPtr,
4824 startaddr | 1);
4825 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear,
4826 (1 << irc->irc_num));
4827 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num,
4828 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
4829
4830 printf("fwohci_ir_start: CmdPtr %08x Ctx %08x startidx %d\n",
4831 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, OHCI_SUBREG_CommandPtr),
4832 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, OHCI_SUBREG_ContextControlSet),
4833 startidx);
4834
4835 irc->irc_status &= ~IRC_STATUS_READY;
4836 irc->irc_status |= IRC_STATUS_RUN;
4837
4838 if ((irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC) == 0) {
4839 irc->irc_status |= IRC_STATUS_RECEIVE;
4840 }
4841
4842 return 0;
4843 }
4844
4845
4846
4847 /*
4848 * static int fwohci_ir_stop(struct fwohci_ir_ctx *irc)
4849 *
4850 * This function stops DMA engine.
4851 */
4852 static int
4853 fwohci_ir_stop(struct fwohci_ir_ctx *irc)
4854 {
4855 struct fwohci_softc *sc = irc->irc_sc;
4856 int i;
4857
4858 printf("fwohci_ir_stop\n");
4859
4860 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num,
4861 OHCI_SUBREG_ContextControlClear,
4862 OHCI_CTXCTL_RUN | OHCI_CTXCTL_DEAD);
4863
4864 i = 0;
4865 while (OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4866 OHCI_SUBREG_ContextControlSet) & OHCI_CTXCTL_ACTIVE) {
4867 #if 0
4868 u_int32_t reg = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4869 OHCI_SUBREG_ContextControlClear);
4870
4871 printf("%s: %d intr IR_CommandPtr 0x%08x "
4872 "ContextCtrl 0x%08x%s%s%s%s\n",
4873 sc->sc_sc1394.sc1394_dev.dv_xname, i,
4874 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4875 OHCI_SUBREG_CommandPtr),
4876 reg,
4877 reg & OHCI_CTXCTL_RUN ? " run" : "",
4878 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
4879 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
4880 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
4881 #endif
4882 if (i > 20) {
4883 printf("fwohci_ir_stop: %s does not stop\n",
4884 sc->sc_sc1394.sc1394_dev.dv_xname);
4885 return 1;
4886 }
4887 DELAY(10);
4888 }
4889
4890 irc->irc_status &= ~IRC_STATUS_RUN;
4891
4892 return 0;
4893 }
4894
4895
4896
4897
4898
4899
4900 static void
4901 fwohci_ir_intr(struct fwohci_softc *sc, struct fwohci_ir_ctx *irc)
4902 {
4903 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname;
4904 u_int32_t cmd, ctx;
4905 int idx;
4906 struct fwohci_desc *fd;
4907
4908 sc->sc_isocnt.ev_count++;
4909
4910 if (!(irc->irc_status & IRC_STATUS_RUN)) {
4911 printf("fwohci_ir_intr: not running\n");
4912 return;
4913 }
4914
4915 bus_dmamap_sync(sc->sc_dmat, irc->irc_desc_dmamap,
4916 0, irc->irc_desc_size, BUS_DMASYNC_PREREAD);
4917
4918 ctx = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4919 OHCI_SUBREG_ContextControlSet);
4920
4921 cmd = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4922 OHCI_SUBREG_CommandPtr);
4923
4924 #define OHCI_CTXCTL_RUNNING (OHCI_CTXCTL_RUN|OHCI_CTXCTL_ACTIVE)
4925 #define OHCI_CTXCTL_RUNNING_MASK (OHCI_CTXCTL_RUNNING|OHCI_CTXCTL_DEAD)
4926
4927 idx = (cmd & 0xfffffff8) - (u_int32_t)irc->irc_desc_dmamap->dm_segs[0].ds_addr;
4928 idx /= sizeof(struct fwohci_desc);
4929
4930 if ((ctx & OHCI_CTXCTL_RUNNING_MASK) == OHCI_CTXCTL_RUNNING) {
4931 if (irc->irc_waitchan != NULL) {
4932 DPRINTF(("fwohci_ir_intr: wakeup "
4933 "ctx %d CmdPtr %08x Ctxctl %08x idx %d\n",
4934 irc->irc_num, cmd, ctx, idx));
4935 #ifdef FWOHCI_WAIT_DEBUG
4936 irc->irc_cycle[1] = fwohci_cycletimer(irc->irc_sc);
4937 #endif
4938 wakeup((void *)irc->irc_waitchan);
4939 }
4940 selwakeup(&irc->irc_sel);
4941 return;
4942 }
4943
4944 fd = irc->irc_desc_map + idx;
4945
4946 printf("fwohci_ir_intr: %s error "
4947 "ctx %d CmdPtr %08x Ctxctl %08x idx %d\n", xname,
4948 irc->irc_num, cmd, ctx, idx);
4949 printf("\tfd flag %x branch %x stat %x rescnt %x total pkt %d\n",
4950 fd->fd_flags, fd->fd_branch, fd->fd_status,fd->fd_rescount,
4951 irc->irc_pktcount);
4952 }
4953
4954
4955
4956
4957 /*
4958 * static int fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *irc)
4959 *
4960 * This function obtains the lenth of descriptors with data.
4961 */
4962 static int
4963 fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *irc)
4964 {
4965 struct fwohci_desc *fd = irc->irc_readtop;
4966 int i = 0;
4967
4968 /* XXX SYNC */
4969 while (fd->fd_status != 0) {
4970 if (fd == irc->irc_readtop && i > 0) {
4971 printf("descriptor filled %d at %d\n", i,
4972 irc->irc_pktcount);
4973 #ifdef FWOHCI_WAIT_DEBUG
4974 irc->irc_cycle[2] = fwohci_cycletimer(irc->irc_sc);
4975 printf("cycletimer %d:%d %d:%d %d:%d\n",
4976 irc->irc_cycle[0]>>13, irc->irc_cycle[0]&0x1fff,
4977 irc->irc_cycle[1]>>13, irc->irc_cycle[1]&0x1fff,
4978 irc->irc_cycle[2]>>13, irc->irc_cycle[2]&0x1fff);
4979 #endif
4980
4981 break;
4982 }
4983
4984 ++i;
4985 ++fd;
4986 if (fd == irc->irc_desc_map + irc->irc_desc_num) {
4987 fd = irc->irc_desc_map;
4988 }
4989
4990 }
4991
4992 return i;
4993 }
4994
4995
4996
4997
4998 /*
4999 * int fwohci_ir_read(struct device *dev, ieee1394_ir_tag_t tag,
5000 * struct uio *uio, int headoffs, int flags)
5001 *
5002 * This function reads data from fwohci's isochronous receive
5003 * buffer.
5004 */
5005 int
5006 fwohci_ir_read(struct device *dev, ieee1394_ir_tag_t tag, struct uio *uio,
5007 int headoffs, int flags)
5008 {
5009 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag;
5010 int packetnum;
5011 int copylen, hdrshim, fwisohdrsiz;
5012 struct fwohci_desc *fd, *fdprev;
5013 u_int8_t *data;
5014 int status = 0;
5015 u_int32_t tmpbranch;
5016 int pktcount_prev = irc->irc_pktcount;
5017 #ifdef FW_DEBUG
5018 int totalread = 0;
5019 #endif
5020
5021 if (irc->irc_status & IRC_STATUS_READY) {
5022 printf("fwohci_ir_read: starting iso read engine\n");
5023 fwohci_ir_start(irc);
5024 }
5025
5026 packetnum = fwohci_ir_ctx_packetnum(irc);
5027
5028 DPRINTF(("fwohci_ir_read resid %d DMA buf %d\n",
5029 uio->uio_resid, packetnum));
5030
5031 if (packetnum == 0) {
5032 return EAGAIN;
5033 }
5034
5035 #ifdef USEDRAIN
5036 if (packetnum > irc->irc_desc_num - irc->irc_desc_num/4) {
5037 packetnum -= fwohci_ir_ctx_drain(irc);
5038 if (irc->irc_pktcount != 0) {
5039 printf("fwohci_ir_read overrun %d\n",
5040 irc->irc_pktcount);
5041 }
5042 }
5043 #endif /* USEDRAIN */
5044
5045 fd = irc->irc_readtop;
5046
5047 #if 0
5048 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0
5049 && irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC) {
5050 unsigned int s;
5051 int i = 0;
5052
5053 fdprev = fd;
5054 while (fd->fd_status != 0) {
5055 s = data[14] << 8;
5056 s |= data[15];
5057
5058 if (s != 0x0000ffffu) {
5059 DPRINTF(("find header %x at %d\n",
5060 s, irc->irc_pktcount));
5061 irc->irc_status |= IRC_STATUS_RECEIVE;
5062 break;
5063 }
5064
5065 fd->fd_rescount = 0;
5066 fd->fd_status = 0;
5067
5068 fdprev = fd;
5069 if (++fd == irc->irc_desc_map + irc->irc_desc_num) {
5070 fd = irc->irc_desc_map;
5071 data = irc->irc_buf;
5072 }
5073 ++i;
5074 }
5075
5076 /* XXX SYNC */
5077 if (i > 0) {
5078 tmpbranch = fdprev->fd_branch;
5079 fdprev->fd_branch = 0;
5080 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5081 irc->irc_writeend = fdprev;
5082 irc->irc_savedbranch = tmpbranch;
5083 }
5084 /* XXX SYNC */
5085
5086 if (fd->fd_status == 0) {
5087 return EAGAIN;
5088 }
5089 }
5090 #endif
5091
5092 hdrshim = 8;
5093 fwisohdrsiz = 0;
5094 data = irc->irc_buf + (fd - irc->irc_desc_map) * irc->irc_maxsize;
5095 if (irc->irc_flags & IEEE1394_IR_NEEDHEADER) {
5096 fwisohdrsiz = sizeof(struct fwiso_header);
5097 }
5098
5099 while (fd->fd_status != 0 &&
5100 (copylen = fd->fd_reqcount - fd->fd_rescount - hdrshim - headoffs)
5101 + fwisohdrsiz < uio->uio_resid) {
5102
5103 DPRINTF(("pkt %04x:%04x uiomove %p, %d\n",
5104 fd->fd_status, fd->fd_rescount,
5105 (void *)(data + 8 + headoffs), copylen));
5106 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0) {
5107 DPRINTF(("[%d]", copylen));
5108 if (irc->irc_pktcount > 1000) {
5109 printf("no header found\n");
5110 status = EIO;
5111 break; /* XXX */
5112 }
5113 } else {
5114 DPRINTF(("<%d>", copylen));
5115 }
5116
5117 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0
5118 && irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC
5119 && copylen > 0) {
5120 unsigned int s;
5121
5122 s = data[14] << 8;
5123 s |= data[15];
5124
5125 if (s != 0x0000ffffu) {
5126 DPRINTF(("find header %x at %d\n",
5127 s, irc->irc_pktcount));
5128 irc->irc_status |= IRC_STATUS_RECEIVE;
5129 }
5130 }
5131
5132 if (irc->irc_status & IRC_STATUS_RECEIVE) {
5133 if (copylen > 0) {
5134 if (irc->irc_flags & IEEE1394_IR_NEEDHEADER) {
5135 struct fwiso_header fh;
5136
5137 fh.fh_timestamp = htonl((*(u_int32_t *)data) & 0xffff);
5138 fh.fh_speed = htonl((fd->fd_status >> 5)& 0x00000007);
5139 fh.fh_capture_size = htonl(copylen + 4);
5140 fh.fh_iso_header = htonl(*(u_int32_t *)(data + 4));
5141 status = uiomove((void *)&fh,
5142 sizeof(fh), uio);
5143 if (status != 0) {
5144 /* An error happens */
5145 printf("uio error in hdr\n");
5146 break;
5147 }
5148 }
5149 status = uiomove((void *)(data + 8 + headoffs),
5150 copylen, uio);
5151 if (status != 0) {
5152 /* An error happens */
5153 printf("uio error\n");
5154 break;
5155 }
5156 #ifdef FW_DEBUG
5157 totalread += copylen;
5158 #endif
5159 }
5160 }
5161
5162 fd->fd_rescount = 0;
5163 fd->fd_status = 0;
5164
5165 #if 0
5166 /* advance writeend pointer and fill branch */
5167
5168 tmpbranch = fd->fd_branch;
5169 fd->fd_branch = 0;
5170 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5171 irc->irc_writeend = fd;
5172 irc->irc_savedbranch = tmpbranch;
5173 #endif
5174 fdprev = fd;
5175
5176 data += irc->irc_maxsize;
5177 if (++fd == irc->irc_desc_map + irc->irc_desc_num) {
5178 fd = irc->irc_desc_map;
5179 data = irc->irc_buf;
5180 }
5181 ++irc->irc_pktcount;
5182 }
5183
5184 #if 1
5185 if (irc->irc_pktcount != pktcount_prev) {
5186 /* XXX SYNC */
5187 tmpbranch = fdprev->fd_branch;
5188 fdprev->fd_branch = 0;
5189 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5190 irc->irc_writeend = fdprev;
5191 irc->irc_savedbranch = tmpbranch;
5192 /* XXX SYNC */
5193 }
5194 #endif
5195
5196 if (!(OHCI_SYNC_RX_DMA_READ(irc->irc_sc, irc->irc_num,
5197 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE)) {
5198 /* do wake */
5199 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5200 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
5201 }
5202
5203 if (packetnum > irc->irc_maxqueuelen) {
5204 irc->irc_maxqueuelen = packetnum;
5205 irc->irc_maxqueuepos = irc->irc_pktcount;
5206 }
5207
5208 if (irc->irc_pktcount == pktcount_prev) {
5209 #if 0
5210 printf("fwohci_ir_read: process 0 packet, total %d\n",
5211 irc->irc_pktcount);
5212 if (++pktfail > 30) {
5213 return 0;
5214 }
5215 #endif
5216 return EAGAIN;
5217 }
5218
5219 irc->irc_readtop = fd;
5220
5221 DPRINTF(("fwochi_ir_read: process %d packet, total %d\n",
5222 totalread, irc->irc_pktcount));
5223
5224 return status;
5225 }
5226
5227
5228
5229
5230 /*
5231 * int fwohci_ir_wait(struct device *dev, ieee1394_ir_tag_t tag,
5232 * void *wchan, char *name)
5233 *
5234 * This function waits till new data comes.
5235 */
5236 int
5237 fwohci_ir_wait(struct device *dev, ieee1394_ir_tag_t tag, void *wchan, char *name)
5238 {
5239 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag;
5240 struct fwohci_desc *fd;
5241 int pktnum;
5242 int stat;
5243
5244 if ((pktnum = fwohci_ir_ctx_packetnum(irc)) > 4) {
5245 DPRINTF(("fwohci_ir_wait enough data %d\n", pktnum));
5246 return 0;
5247 }
5248
5249 fd = irc->irc_readtop + 32;
5250 if (fd >= irc->irc_desc_map + irc->irc_desc_num) {
5251 fd -= irc->irc_desc_num;
5252 }
5253
5254 irc->irc_waitchan = wchan;
5255 if ((irc->irc_flags & IEEE1394_IR_SHORTDELAY) == 0) {
5256 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
5257 DPRINTF(("fwohci_ir_wait stops %d set intr %d\n",
5258 irc->irc_readtop - irc->irc_desc_map,
5259 fd - irc->irc_desc_map));
5260 /* XXX SYNC */
5261 }
5262
5263 #ifdef FWOHCI_WAIT_DEBUG
5264 irc->irc_cycle[0] = fwohci_cycletimer(irc->irc_sc);
5265 #endif
5266
5267 irc->irc_status |= IRC_STATUS_SLEEPING;
5268 if ((stat = tsleep(wchan, PCATCH|PRIBIO, name, hz*10)) != 0) {
5269 irc->irc_waitchan = NULL;
5270 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
5271 if (stat == EWOULDBLOCK) {
5272 printf("fwohci_ir_wait: timeout\n");
5273 return EIO;
5274 } else {
5275 return EINTR;
5276 }
5277 }
5278
5279 irc->irc_waitchan = NULL;
5280 if ((irc->irc_flags & IEEE1394_IR_SHORTDELAY) == 0) {
5281 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
5282 /* XXX SYNC */
5283 }
5284
5285 DPRINTF(("fwohci_ir_wait: wakeup\n"));
5286
5287 return 0;
5288 }
5289
5290
5291
5292
5293 /*
5294 * int fwohci_ir_select(struct device *dev, ieee1394_ir_tag_t tag,
5295 * struct proc *p)
5296 *
5297 * This function returns the number of packets in queue.
5298 */
5299 int
5300 fwohci_ir_select(struct device *dev, ieee1394_ir_tag_t tag, struct proc *p)
5301 {
5302 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag;
5303 int pktnum;
5304
5305 if (irc->irc_status & IRC_STATUS_READY) {
5306 printf("fwohci_ir_select: starting iso read engine\n");
5307 fwohci_ir_start(irc);
5308 }
5309
5310 if ((pktnum = fwohci_ir_ctx_packetnum(irc)) == 0) {
5311 selrecord(p, &irc->irc_sel);
5312 }
5313
5314 return pktnum;
5315 }
5316
5317
5318
5319 #ifdef USEDRAIN
5320 /*
5321 * int fwohci_ir_ctx_drain(struct fwohci_ir_ctx *irc)
5322 *
5323 * This function will drain all the packets in receive DMA
5324 * buffer.
5325 */
5326 static int
5327 fwohci_ir_ctx_drain(struct fwohci_ir_ctx *irc)
5328 {
5329 struct fwohci_desc *fd = irc->irc_readtop;
5330 u_int32_t reg;
5331 int count = 0;
5332
5333 reg = OHCI_SYNC_RX_DMA_READ(irc->irc_sc, irc->irc_num,
5334 OHCI_SUBREG_ContextControlClear);
5335
5336 printf("fwohci_ir_ctx_drain ctx%s%s%s%s\n",
5337 reg & OHCI_CTXCTL_RUN ? " run" : "",
5338 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5339 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5340 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5341
5342 if ((reg & OHCI_CTXCTL_RUNNING_MASK) == OHCI_CTXCTL_RUN) {
5343 /* DMA engine is stopped */
5344 u_int32_t startadr;
5345
5346 for (fd = irc->irc_desc_map;
5347 fd < irc->irc_desc_map + irc->irc_desc_num;
5348 ++fd) {
5349 fd->fd_status = 0;
5350 }
5351
5352 /* Restore branch addr of the last descriptor */
5353 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5354
5355 irc->irc_readtop = irc->irc_desc_map;
5356 irc->irc_writeend = irc->irc_desc_map + irc->irc_desc_num - 1;
5357 irc->irc_savedbranch = irc->irc_writeend->fd_branch;
5358 irc->irc_writeend->fd_branch = 0;
5359
5360 count = irc->irc_desc_num;
5361
5362 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5363 OHCI_SUBREG_ContextControlClear,
5364 OHCI_CTXCTL_RUN | OHCI_CTXCTL_DEAD);
5365
5366 startadr = (u_int32_t)irc->irc_desc_dmamap->dm_segs[0].ds_addr;
5367
5368 printf("fwohci_ir_ctx_drain: remove %d pkts\n", count);
5369
5370 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5371 OHCI_SUBREG_CommandPtr, startadr | 1);
5372
5373 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5374 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
5375 } else {
5376 const int removecount = irc->irc_desc_num/2;
5377 u_int32_t tmpbranch;
5378
5379 for (count = 0; count < removecount; ++count) {
5380 if (fd->fd_status == 0) {
5381 break;
5382 }
5383
5384 fd->fd_status = 0;
5385
5386 tmpbranch = fd->fd_branch;
5387 fd->fd_branch = 0;
5388 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5389 irc->irc_writeend = fd;
5390 irc->irc_savedbranch = tmpbranch;
5391
5392 if (++fd == irc->irc_desc_map + irc->irc_desc_num) {
5393 fd = irc->irc_desc_map;
5394 }
5395 ++count;
5396 }
5397
5398 printf("fwohci_ir_ctx_drain: remove %d pkts\n", count);
5399 }
5400
5401 return count;
5402 }
5403 #endif /* USEDRAIN */
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413 /*
5414 * service routines for isochronous transmit
5415 */
5416
5417
5418 struct fwohci_it_ctx *
5419 fwohci_it_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tag, int maxsize)
5420 {
5421 struct fwohci_it_ctx *itc;
5422 size_t dmastrsize;
5423 struct fwohci_it_dmabuf *dmastr;
5424 struct fwohci_desc *desc;
5425 bus_addr_t descphys;
5426 int nodesc;
5427 int i, j;
5428
5429 if ((itc = malloc(sizeof(*itc), M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) {
5430 return itc;
5431 }
5432
5433 itc->itc_num = no;
5434 itc->itc_flags = 0;
5435 itc->itc_sc = sc;
5436 itc->itc_bufnum = FWOHCI_IT_BUFNUM;
5437
5438 itc->itc_channel = ch;
5439 itc->itc_tag = tag;
5440 itc->itc_speed = OHCI_CTXCTL_SPD_100; /* XXX */
5441
5442 itc->itc_outpkt = 0;
5443
5444 itc->itc_maxsize = maxsize;
5445
5446 dmastrsize = sizeof(struct fwohci_it_dmabuf)*itc->itc_bufnum;
5447
5448 if ((dmastr = malloc(dmastrsize, M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) {
5449 goto error_1;
5450 }
5451 itc->itc_buf = dmastr;
5452
5453 /*
5454 * Get memory for descriptors. One buffer will have 256
5455 * packet entry and 1 trailing descriptor for writing scratch.
5456 * 4-byte space for scratch.
5457 */
5458 itc->itc_descsize = (256*3 + 1)*itc->itc_bufnum;
5459
5460 if (fwohci_it_desc_alloc(itc)) {
5461 printf("%s: cannot get enough memory for descriptor\n",
5462 sc->sc_sc1394.sc1394_dev.dv_xname);
5463 goto error_2;
5464 }
5465
5466 /* prepare DMA buffer */
5467 nodesc = itc->itc_descsize/itc->itc_bufnum;
5468 desc = (struct fwohci_desc *)itc->itc_descmap;
5469 descphys = itc->itc_dseg.ds_addr;
5470
5471 for (i = 0; i < itc->itc_bufnum; ++i) {
5472
5473 if (fwohci_itd_construct(itc, &dmastr[i], i, desc,
5474 descphys, nodesc,
5475 itc->itc_maxsize, itc->itc_scratch_paddr)) {
5476 goto error_3;
5477 }
5478 desc += nodesc;
5479 descphys += sizeof(struct fwohci_desc)*nodesc;
5480 }
5481
5482 #if 1
5483 itc->itc_buf_start = itc->itc_buf;
5484 itc->itc_buf_end = itc->itc_buf;
5485 itc->itc_buf_linkend = itc->itc_buf;
5486 #else
5487 itc->itc_bufidx_start = 0;
5488 itc->itc_bufidx_end = 0;
5489 itc->itc_bufidx_linkend = 0;
5490 #endif
5491 itc->itc_buf_cnt = 0;
5492 itc->itc_waitchan = NULL;
5493 *itc->itc_scratch = 0xffffffff;
5494
5495 return itc;
5496
5497 error_3:
5498 for (j = 0; j < i; ++j) {
5499 fwohci_itd_destruct(&dmastr[j]);
5500 }
5501 fwohci_it_desc_free(itc);
5502 error_2:
5503 free(itc->itc_buf, M_DEVBUF);
5504 error_1:
5505 free(itc, M_DEVBUF);
5506
5507 return NULL;
5508 }
5509
5510
5511
5512 void
5513 fwohci_it_ctx_destruct(struct fwohci_it_ctx *itc)
5514 {
5515 int i;
5516
5517 for (i = 0; i < itc->itc_bufnum; ++i) {
5518 fwohci_itd_destruct(&itc->itc_buf[i]);
5519 }
5520
5521 fwohci_it_desc_free(itc);
5522 free(itc, M_DEVBUF);
5523 }
5524
5525
5526 /*
5527 * static int fwohci_it_desc_alloc(struct fwohci_it_ctx *itc)
5528 *
5529 * Allocates descriptors for context DMA dedicated for
5530 * isochronous transmit.
5531 *
5532 * This function returns 0 (zero) if it succeeds. Otherwise,
5533 * return negative value.
5534 */
5535 static int
5536 fwohci_it_desc_alloc(struct fwohci_it_ctx *itc)
5537 {
5538 bus_dma_tag_t dmat = itc->itc_sc->sc_dmat;
5539 const char *xname = itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
5540 int error, dsize;
5541
5542 /* add for scratch */
5543 itc->itc_descsize++;
5544
5545 /* rounding up to 256 */
5546 if ((itc->itc_descsize & 0x0ff) != 0) {
5547 itc->itc_descsize =
5548 (itc->itc_descsize & ~0x0ff) + 0x100;
5549 }
5550 /* remove for scratch */
5551
5552 itc->itc_descsize--;
5553 printf("%s: fwohci_it_desc_alloc will allocate %d descs\n",
5554 xname, itc->itc_descsize);
5555
5556 /*
5557 * allocate descriptor buffer
5558 */
5559 dsize = sizeof(struct fwohci_desc) * itc->itc_descsize;
5560
5561 printf("%s: fwohci_it_desc_alloc: descriptor %d, dsize %d\n",
5562 xname, itc->itc_descsize, dsize);
5563
5564 if ((error = bus_dmamem_alloc(dmat, dsize, PAGE_SIZE, 0,
5565 &itc->itc_dseg, 1, &itc->itc_dnsegs, 0)) != 0) {
5566 printf("%s: unable to allocate descriptor buffer, error = %d\n",
5567 xname, error);
5568 goto fail_0;
5569 }
5570
5571 printf("fwohci_it_desc_alloc: %d segment[s]\n", itc->itc_dnsegs);
5572
5573 if ((error = bus_dmamem_map(dmat, &itc->itc_dseg,
5574 itc->itc_dnsegs, dsize, (caddr_t *)&itc->itc_descmap,
5575 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) {
5576 printf("%s: unable to map descriptor buffer, error = %d\n",
5577 xname, error);
5578 goto fail_1;
5579 }
5580
5581 printf("fwohci_it_desc_alloc: bus_dmamem_map success dseg %lx:%lx\n",
5582 (long)itc->itc_dseg.ds_addr, (long)itc->itc_dseg.ds_len);
5583
5584 if ((error = bus_dmamap_create(dmat, dsize, itc->itc_dnsegs,
5585 dsize, 0, BUS_DMA_WAITOK, &itc->itc_ddmamap)) != 0) {
5586 printf("%s: unable to create descriptor buffer DMA map, "
5587 "error = %d\n", xname, error);
5588 goto fail_2;
5589 }
5590
5591 printf("fwohci_it_desc_alloc: bus_dmamem_create success\n");
5592
5593 {
5594 int loop;
5595
5596 for (loop = 0; loop < itc->itc_ddmamap->dm_nsegs; ++loop) {
5597 printf("\t%.2d: 0x%lx - 0x%lx\n", loop,
5598 (long)itc->itc_ddmamap->dm_segs[loop].ds_addr,
5599 (long)itc->itc_ddmamap->dm_segs[loop].ds_addr +
5600 (long)itc->itc_ddmamap->dm_segs[loop].ds_len - 1);
5601 }
5602 }
5603
5604 if ((error = bus_dmamap_load(dmat, itc->itc_ddmamap,
5605 itc->itc_descmap, dsize, NULL, BUS_DMA_WAITOK)) != 0) {
5606 printf("%s: unable to load descriptor buffer DMA map, "
5607 "error = %d\n", xname, error);
5608 goto fail_3;
5609 }
5610
5611 printf("%s: fwohci_it_desc_alloc: get DMA memory phys:0x%08x vm:%p\n",
5612 xname, (int)itc->itc_ddmamap->dm_segs[0].ds_addr, itc->itc_descmap);
5613
5614 itc->itc_scratch = (u_int32_t *)(itc->itc_descmap
5615 + (sizeof(struct fwohci_desc))*itc->itc_descsize);
5616 itc->itc_scratch_paddr =
5617 itc->itc_ddmamap->dm_segs[0].ds_addr
5618 + (sizeof(struct fwohci_desc))*itc->itc_descsize;
5619
5620 printf("%s: scratch %p, 0x%x\n", xname, itc->itc_scratch,
5621 (int)itc->itc_scratch_paddr);
5622
5623 /* itc->itc_scratch_paddr = vtophys(itc->itc_scratch); */
5624
5625 return 0;
5626
5627 fail_3:
5628 bus_dmamap_destroy(dmat, itc->itc_ddmamap);
5629 fail_2:
5630 bus_dmamem_unmap(dmat, (caddr_t)itc->itc_descmap, dsize);
5631 fail_1:
5632 bus_dmamem_free(dmat, &itc->itc_dseg, itc->itc_dnsegs);
5633 fail_0:
5634 itc->itc_dnsegs = 0;
5635 itc->itc_descmap = NULL;
5636 return error;
5637 }
5638
5639
5640 static void
5641 fwohci_it_desc_free(struct fwohci_it_ctx *itc)
5642 {
5643 bus_dma_tag_t dmat = itc->itc_sc->sc_dmat;
5644 int dsize = sizeof(struct fwohci_desc) * itc->itc_descsize + 4;
5645
5646 bus_dmamap_destroy(dmat, itc->itc_ddmamap);
5647 bus_dmamem_unmap(dmat, (caddr_t)itc->itc_descmap, dsize);
5648 bus_dmamem_free(dmat, &itc->itc_dseg, itc->itc_dnsegs);
5649
5650 itc->itc_dnsegs = 0;
5651 itc->itc_descmap = NULL;
5652 }
5653
5654
5655
5656 /*
5657 * int fwohci_it_ctx_writedata(ieee1394_it_tag_t it, int ndata,
5658 * struct ieee1394_it_datalist *itdata, int flags)
5659 *
5660 * This function will write packet data to DMA buffer in the
5661 * context. This function will parse ieee1394_it_datalist
5662 * command and fill DMA buffer. This function will return the
5663 * number of written packets, or error code if the return value
5664 * is negative.
5665 *
5666 * When this funtion returns positive value but smaller than
5667 * ndata, it reaches at the ent of DMA buffer.
5668 */
5669 int
5670 fwohci_it_ctx_writedata(ieee1394_it_tag_t it, int ndata,
5671 struct ieee1394_it_datalist *itdata, int flags)
5672 {
5673 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
5674 int rv;
5675 int writepkt = 0;
5676 struct fwohci_it_dmabuf *itd;
5677 int i = 0;
5678
5679 itd = itc->itc_buf_end;
5680
5681 while (ndata > 0) {
5682 int s;
5683
5684 if (fwohci_itd_isfull(itd) || fwohci_itd_islocked(itd)) {
5685 if (itc->itc_buf_cnt == itc->itc_bufnum) {
5686 /* no space to write */
5687 printf("sleeping: start linkend end %d %d %d "
5688 "bufcnt %d\n",
5689 itc->itc_buf_start->itd_num,
5690 itc->itc_buf_linkend->itd_num,
5691 itc->itc_buf_end->itd_num,
5692 itc->itc_buf_cnt);
5693
5694 itc->itc_waitchan = itc;
5695 if (tsleep((void *)itc->itc_waitchan,
5696 PCATCH, "fwohci it", 0) == EWOULDBLOCK) {
5697 itc->itc_waitchan = NULL;
5698 printf("fwohci0 signal\n");
5699 break;
5700 }
5701 printf("waking: start linkend end %d %d %d\n",
5702 itc->itc_buf_start->itd_num,
5703 itc->itc_buf_linkend->itd_num,
5704 itc->itc_buf_end->itd_num);
5705
5706 itc->itc_waitchan = itc;
5707 i = 0;
5708 } else {
5709 /*
5710 * Use next buffer. This DMA buffer is full
5711 * or locked.
5712 */
5713 INC_BUF(itc, itd);
5714 }
5715 }
5716
5717 if (++i > 10) {
5718 panic("why loop so much %d", itc->itc_buf_cnt);
5719 break;
5720 }
5721
5722 s = splbio();
5723
5724 if (fwohci_itd_hasdata(itd) == 0) {
5725 ++itc->itc_buf_cnt;
5726 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt));
5727 }
5728
5729 rv = fwohci_itd_writedata(itd, ndata, itdata);
5730 DPRINTF(("fwohci_it_ctx_writedata: buf %d ndata %d rv %d\n",
5731 itd->itd_num, ndata, rv));
5732
5733 if (itc->itc_buf_start == itc->itc_buf_linkend
5734 && (itc->itc_flags & ITC_FLAGS_RUN) != 0) {
5735
5736 #ifdef DEBUG_USERADD
5737 printf("fwohci_it_ctx_writedata: emergency!\n");
5738 #endif
5739 if (itc->itc_buf_linkend != itc->itc_buf_end
5740 && fwohci_itd_hasdata(itc->itc_buf_end)) {
5741 struct fwohci_it_dmabuf *itdn = itc->itc_buf_linkend;
5742
5743 INC_BUF(itc, itdn);
5744 printf("connecting %d after %d\n",
5745 itdn->itd_num,
5746 itc->itc_buf_linkend->itd_num);
5747 if (fwohci_itd_link(itc->itc_buf_linkend, itdn)) {
5748 printf("fwohci_it_ctx_writedata:"
5749 " cannot link correctly\n");
5750 return -1;
5751 }
5752 itc->itc_buf_linkend = itdn;
5753 }
5754 }
5755
5756 splx(s);
5757
5758 if (rv < 0) {
5759 /* some errors happend */
5760 break;
5761 }
5762
5763 writepkt += rv;
5764 ndata -= rv;
5765 itdata += rv;
5766 itc->itc_buf_end = itd;
5767 }
5768
5769 /* Start DMA engine if stopped */
5770 if ((itc->itc_flags & ITC_FLAGS_RUN) == 0) {
5771 if (itc->itc_buf_cnt > itc->itc_bufnum - 1 || flags) {
5772 /* run */
5773 printf("fwohci_itc_ctl_writedata: DMA engine start\n");
5774 fwohci_it_ctx_run(itc);
5775 }
5776 }
5777
5778 return writepkt;
5779 }
5780
5781
5782
5783 static void
5784 fwohci_it_ctx_run(struct fwohci_it_ctx *itc)
5785 {
5786 struct fwohci_softc *sc = itc->itc_sc;
5787 int ctx = itc->itc_num;
5788 struct fwohci_it_dmabuf *itd
5789 = (struct fwohci_it_dmabuf *)itc->itc_buf_start;
5790 u_int32_t reg;
5791 int i;
5792
5793 if (itc->itc_flags & ITC_FLAGS_RUN) {
5794 return;
5795 }
5796 itc->itc_flags |= ITC_FLAGS_RUN;
5797
5798 /*
5799 * dirty, but I can't imagine better place to save branch addr
5800 * of top DMA buffer and substitute 0 to it.
5801 */
5802 itd->itd_savedbranch = itd->itd_lastdesc->fd_branch;
5803 itd->itd_lastdesc->fd_branch = 0;
5804
5805 if (itc->itc_buf_cnt > 1) {
5806 struct fwohci_it_dmabuf *itdn = itd;
5807
5808 #if 0
5809 INC_BUF(itc, itdn);
5810
5811 if (fwohci_itd_link(itd, itdn)) {
5812 printf("fwohci_it_ctx_run: cannot link correctly\n");
5813 return;
5814 }
5815 itc->itc_buf_linkend = itdn;
5816 #else
5817 for (;;) {
5818 INC_BUF(itc, itdn);
5819
5820 if (itdn == itc->itc_buf_end) {
5821 break;
5822 }
5823 if (fwohci_itd_link(itd, itdn)) {
5824 printf("fwohci_it_ctx_run: cannot link\n");
5825 return;
5826 }
5827 itd = itdn;
5828 }
5829 itc->itc_buf_linkend = itd;
5830 #endif
5831 } else {
5832 itd->itd_lastdesc->fd_flags |= OHCI_DESC_INTR_ALWAYS;
5833 itc->itc_buf_linkend = itc->itc_buf_end;
5834 itc->itc_buf_end->itd_flags |= ITD_FLAGS_LOCK;
5835
5836 /* sanity check */
5837 if (itc->itc_buf_end != itc->itc_buf_start) {
5838 printf("buf start & end differs %p %p\n",
5839 itc->itc_buf_end, itc->itc_buf_start);
5840 }
5841 #if 0
5842 {
5843 u_int32_t *fdp;
5844 u_int32_t adr;
5845 int i;
5846
5847 printf("fwohci_it_ctx_run: itc_buf_cnt 1, DMA buf %d\n",
5848 itd->itd_num);
5849 printf(" last desc %p npacket %d, %d 0x%04x%04x",
5850 itd->itd_lastdesc, itd->itd_npacket,
5851 (itd->itd_lastdesc - itd->itd_desc)/3,
5852 itd->itd_lastdesc->fd_flags,
5853 itd->itd_lastdesc->fd_reqcount);
5854 fdp = (u_int32_t *)itd->itd_desc;
5855 adr = (u_int32_t)itd->itd_desc_phys; /* XXX */
5856
5857 for (i = 0; i < 7*4; ++i) {
5858 if (i % 4 == 0) {
5859 printf("\n%x:", adr + 4*i);
5860 }
5861 printf(" %08x", fdp[i]);
5862 }
5863
5864 if (itd->itd_npacket > 4) {
5865 printf("\n...");
5866 i = (itd->itd_npacket - 2)*12 + 4;
5867 } else {
5868 i = 2*12 + 4;
5869 }
5870 for (;i < itd->itd_npacket*12 + 4; ++i) {
5871 if (i % 4 == 0) {
5872 printf("\n%x:", adr + 4*i);
5873 }
5874 printf(" %08x", fdp[i]);
5875 }
5876 printf("\n");
5877 }
5878 #endif
5879 }
5880 {
5881 struct fwohci_desc *fd;
5882
5883 printf("fwohci_it_ctx_run: link start linkend end %d %d %d\n",
5884 itc->itc_buf_start->itd_num,
5885 itc->itc_buf_linkend->itd_num,
5886 itc->itc_buf_end->itd_num);
5887
5888 fd = itc->itc_buf_start->itd_desc;
5889 if ((fd->fd_flags & 0xff00) != OHCI_DESC_STORE_VALUE) {
5890 printf("fwohci_it_ctx_run: start buf not with STORE\n");
5891 }
5892 fd += 3;
5893 if ((fd->fd_flags & OHCI_DESC_INTR_ALWAYS) == 0) {
5894 printf("fwohci_it_ctx_run: start buf does not have intr\n");
5895 }
5896
5897 fd = itc->itc_buf_linkend->itd_desc;
5898 if ((fd->fd_flags & 0xff00) != OHCI_DESC_STORE_VALUE) {
5899 printf("fwohci_it_ctx_run: linkend buf not with STORE\n");
5900 }
5901 fd += 3;
5902 if ((fd->fd_flags & OHCI_DESC_INTR_ALWAYS) == 0) {
5903 printf("fwohci_it_ctx_run: linkend buf does not have intr\n");
5904 }
5905 }
5906
5907 *itc->itc_scratch = 0xffffffff;
5908
5909 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlClear,
5910 0xffff0000);
5911 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet);
5912
5913 printf("fwohci_it_ctx_run start for ctx %d\n", ctx);
5914 printf("%s: bfr IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
5915 sc->sc_sc1394.sc1394_dev.dv_xname,
5916 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr),
5917 reg,
5918 reg & OHCI_CTXCTL_RUN ? " run" : "",
5919 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5920 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5921 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5922
5923 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlClear,
5924 OHCI_CTXCTL_RUN);
5925
5926 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet);
5927 i = 0;
5928 while (reg & (OHCI_CTXCTL_ACTIVE | OHCI_CTXCTL_RUN)) {
5929 delay(100);
5930 if (++i > 1000) {
5931 printf("%s: cannot stop iso transmit engine\n",
5932 sc->sc_sc1394.sc1394_dev.dv_xname);
5933 break;
5934 }
5935 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx,
5936 OHCI_SUBREG_ContextControlSet);
5937 }
5938
5939 printf("%s: itm IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
5940 sc->sc_sc1394.sc1394_dev.dv_xname,
5941 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr),
5942 reg,
5943 reg & OHCI_CTXCTL_RUN ? " run" : "",
5944 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5945 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5946 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5947
5948 printf("%s: writing CommandPtr to 0x%08x\n",
5949 sc->sc_sc1394.sc1394_dev.dv_xname,
5950 (int)itc->itc_buf_start->itd_desc_phys);
5951 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_CommandPtr,
5952 fwohci_itd_list_head(itc->itc_buf_start) | 4);
5953
5954 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlSet,
5955 OHCI_CTXCTL_RUN | OHCI_CTXCTL_WAKE);
5956
5957 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet);
5958
5959 printf("%s: aft IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
5960 sc->sc_sc1394.sc1394_dev.dv_xname,
5961 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr),
5962 reg,
5963 reg & OHCI_CTXCTL_RUN ? " run" : "",
5964 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5965 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5966 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5967 }
5968
5969
5970
5971 int
5972 fwohci_it_ctx_flush(ieee1394_it_tag_t it)
5973 {
5974 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
5975 int rv = 0;
5976
5977 if ((itc->itc_flags & ITC_FLAGS_RUN) == 0
5978 && itc->itc_buf_cnt > 0) {
5979 printf("fwohci_it_ctx_flush: %s flushing\n",
5980 itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname);
5981
5982 fwohci_it_ctx_run(itc);
5983 rv = 1;
5984 }
5985
5986 return rv;
5987 }
5988
5989
5990 /*
5991 * static void fwohci_it_intr(struct fwohci_softc *sc,
5992 * struct fwochi_it_ctx *itc)
5993 *
5994 * This function is the interrupt handler for isochronous
5995 * transmit interrupt. This function will 1) unlink used
5996 * (already transmitted) buffers, 2) link new filled buffers, if
5997 * necessary and 3) say some free dma buffers exist to
5998 * fwiso_write()
5999 */
6000 static void
6001 fwohci_it_intr(struct fwohci_softc *sc, struct fwohci_it_ctx *itc)
6002 {
6003 struct fwohci_it_dmabuf *itd, *newstartbuf;
6004 u_int16_t scratchval;
6005 u_int32_t reg;
6006
6007 reg = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
6008 OHCI_SUBREG_ContextControlSet);
6009
6010 /* print out debug info */
6011 #ifdef FW_DEBUG
6012 printf("fwohci_it_intr: CTX %d\n", itc->itc_num);
6013
6014 printf("fwohci_it_intr: %s: IT_CommandPtr 0x%08x "
6015 "ContextCtrl 0x%08x%s%s%s%s\n",
6016 sc->sc_sc1394.sc1394_dev.dv_xname,
6017 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, OHCI_SUBREG_CommandPtr),
6018 reg,
6019 reg & OHCI_CTXCTL_RUN ? " run" : "",
6020 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
6021 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
6022 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
6023 printf("fwohci_it_intr: %s: scratch %x start %d end %d valid %d\n",
6024 sc->sc_sc1394.sc1394_dev.dv_xname, *itc->itc_scratch,
6025 itc->itc_buf_start->itd_num, itc->itc_buf_end->itd_num,
6026 itc->itc_buf_cnt);
6027 {
6028 u_int32_t reg
6029 = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
6030 printf("\t\tIsoCounter 0x%08x, %d %d %d\n", reg,
6031 (reg >> 25) & 0xfe, (reg >> 12) & 0x1fff, reg & 0xfff);
6032 }
6033 #endif /* FW_DEBUG */
6034 /* end print out debug info */
6035
6036 scratchval = (*itc->itc_scratch) & 0x0000ffff;
6037 *itc->itc_scratch = 0xffffffff;
6038
6039 if ((reg & OHCI_CTXCTL_ACTIVE) == 0 && scratchval != 0xffff) {
6040 /* DMA engine has been stopped */
6041 printf("DMA engine stopped\n");
6042 printf("fwohci_it_intr: %s: IT_CommandPtr 0x%08x "
6043 "ContextCtrl 0x%08x%s%s%s%s\n",
6044 sc->sc_sc1394.sc1394_dev.dv_xname,
6045 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, OHCI_SUBREG_CommandPtr),
6046 reg,
6047 reg & OHCI_CTXCTL_RUN ? " run" : "",
6048 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
6049 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
6050 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
6051 printf("fwohci_it_intr: %s: scratch %x start %d end %d valid %d\n",
6052 sc->sc_sc1394.sc1394_dev.dv_xname, *itc->itc_scratch,
6053 itc->itc_buf_start->itd_num, itc->itc_buf_end->itd_num,
6054 itc->itc_buf_cnt);
6055 {
6056 u_int32_t reg
6057 = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
6058 printf("\t\tIsoCounter 0x%08x, %d %d %d\n", reg,
6059 (reg >> 25) & 0xfe, (reg >> 12) & 0x1fff, reg & 0xfff);
6060 }
6061 printf("\t\tbranch of lastdesc 0x%08x\n",
6062 itc->itc_buf_start->itd_lastdesc->fd_branch);
6063
6064 scratchval = 0xffff;
6065 itc->itc_flags &= ~ITC_FLAGS_RUN;
6066 }
6067
6068 /* unlink old buffers */
6069 if (scratchval != 0xffff) {
6070 /* normal path */
6071 newstartbuf = &itc->itc_buf[scratchval];
6072 } else {
6073 /* DMA engine stopped */
6074 newstartbuf = itc->itc_buf_linkend;
6075 INC_BUF(itc, newstartbuf);
6076 }
6077
6078 itd = (struct fwohci_it_dmabuf *)itc->itc_buf_start;
6079 itc->itc_buf_start = newstartbuf;
6080 while (itd != newstartbuf) {
6081 itc->itc_outpkt += itd->itd_npacket;
6082 fwohci_itd_unlink(itd);
6083 INC_BUF(itc, itd);
6084 --itc->itc_buf_cnt;
6085 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt));
6086 }
6087
6088 #ifdef DEBUG_USERADD
6089 if (scratchval != 0xffff) {
6090 printf("fwohci0: intr start %d dataend %d %d\n", scratchval,
6091 itc->itc_buf_end->itd_num, itc->itc_outpkt);
6092 }
6093 #endif
6094
6095 if (scratchval == 0xffff) {
6096 /* no data supplied */
6097 printf("fwohci_it_intr: no it data. output total %d\n",
6098 itc->itc_outpkt);
6099
6100 if (itc->itc_buf_cnt > 0) {
6101 printf("fwohci_it_intr: it DMA stops "
6102 "w/ valid databuf %d buf %d data %d"
6103 " intr reg 0x%08x\n",
6104 itc->itc_buf_cnt,
6105 itc->itc_buf_end->itd_num,
6106 fwohci_itd_hasdata(itc->itc_buf_end),
6107 OHCI_CSR_READ(sc, OHCI_REG_IntEventSet));
6108 } else {
6109 /* All the data gone */
6110 itc->itc_buf_start
6111 = itc->itc_buf_end
6112 = itc->itc_buf_linkend
6113 = &itc->itc_buf[0];
6114 printf("fwohci_it_intr: all packets gone\n");
6115 }
6116
6117 itc->itc_flags &= ~ITC_FLAGS_RUN;
6118
6119 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num,
6120 OHCI_SUBREG_ContextControlClear, 0xffffffff);
6121 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num,
6122 OHCI_SUBREG_CommandPtr, 0);
6123 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num,
6124 OHCI_SUBREG_ContextControlClear, 0x1f);
6125
6126 /* send message */
6127 if (itc->itc_waitchan != NULL) {
6128 wakeup((void *)itc->itc_waitchan);
6129 }
6130
6131 return;
6132 }
6133
6134 #if 0
6135 /* unlink old buffers */
6136 newstartbuf = &itc->itc_buf[scratchval];
6137
6138 itd = (struct fwohci_it_dmabuf *)itc->itc_buf_start;
6139 itc->itc_buf_start = newstartbuf;
6140 while (itd != newstartbuf) {
6141 itc->itc_outpkt += itd->itd_npacket;
6142 fwohci_itd_unlink(itd);
6143 INC_BUF(itc, itd);
6144 --itc->itc_buf_cnt;
6145 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt));
6146 }
6147 #endif
6148
6149 /* sanity check */
6150 {
6151 int startidx, endidx, linkendidx;
6152
6153 startidx = itc->itc_buf_start->itd_num;
6154 endidx = itc->itc_buf_end->itd_num;
6155 linkendidx = itc->itc_buf_linkend->itd_num;
6156
6157 if (startidx < endidx) {
6158 if (linkendidx < startidx
6159 || endidx < linkendidx) {
6160 printf("funny, linkend is not between start "
6161 "and end [%d, %d]: %d\n",
6162 startidx, endidx, linkendidx);
6163 }
6164 } else if (startidx > endidx) {
6165 if (linkendidx < startidx
6166 && endidx < linkendidx) {
6167 printf("funny, linkend is not between start "
6168 "and end [%d, %d]: %d\n",
6169 startidx, endidx, linkendidx);
6170 }
6171 } else {
6172 if (linkendidx != startidx) {
6173 printf("funny, linkend is not between start "
6174 "and end [%d, %d]: %d\n",
6175 startidx, endidx, linkendidx);
6176 }
6177
6178 }
6179 }
6180
6181 /* link if some valid DMA buffers exist */
6182 if (itc->itc_buf_cnt > 1
6183 && itc->itc_buf_linkend != itc->itc_buf_end) {
6184 struct fwohci_it_dmabuf *itdprev;
6185 int i;
6186
6187 DPRINTF(("CTX %d: start linkend dataend bufs %d, %d, %d, %d\n",
6188 itc->itc_num,
6189 itc->itc_buf_start->itd_num,
6190 itc->itc_buf_linkend->itd_num,
6191 itc->itc_buf_end->itd_num,
6192 itc->itc_buf_cnt));
6193
6194 itd = itdprev = itc->itc_buf_linkend;
6195 INC_BUF(itc, itd);
6196
6197 #if 0
6198 if (fwohci_itd_isfilled(itd) || itc->itc_buf_cnt == 2) {
6199 while (itdprev != itc->itc_buf_end) {
6200
6201 if (fwohci_itd_link(itdprev, itd)) {
6202 break;
6203 }
6204
6205 itdprev = itd;
6206 INC_BUF(itc, itd);
6207 }
6208 itc->itc_buf_linkend = itdprev;
6209 }
6210 #endif
6211 i = 0;
6212 while (itdprev != itc->itc_buf_end) {
6213 if (!fwohci_itd_isfilled(itd) && itc->itc_buf_cnt > 2) {
6214 break;
6215 }
6216
6217 if (fwohci_itd_link(itdprev, itd)) {
6218 break;
6219 }
6220
6221 itdprev = itd;
6222 INC_BUF(itc, itd);
6223
6224 itc->itc_buf_linkend = itdprev;
6225 ++i;
6226 }
6227
6228 if (i > 0) {
6229 DPRINTF(("CTX %d: start linkend dataend bufs %d, %d, %d, %d\n",
6230 itc->itc_num,
6231 itc->itc_buf_start->itd_num,
6232 itc->itc_buf_linkend->itd_num,
6233 itc->itc_buf_end->itd_num,
6234 itc->itc_buf_cnt));
6235 }
6236 } else {
6237 struct fwohci_it_dmabuf *le;
6238
6239 le = itc->itc_buf_linkend;
6240
6241 printf("CTX %d: start linkend dataend bufs %d, %d, %d, %d no buffer added\n",
6242 itc->itc_num,
6243 itc->itc_buf_start->itd_num,
6244 itc->itc_buf_linkend->itd_num,
6245 itc->itc_buf_end->itd_num,
6246 itc->itc_buf_cnt);
6247 printf("\tlast descriptor %s %04x %08x\n",
6248 le->itd_lastdesc->fd_flags & OHCI_DESC_INTR_ALWAYS ? "intr" : "",
6249 le->itd_lastdesc->fd_flags,
6250 le->itd_lastdesc->fd_branch);
6251 }
6252
6253 /* send message */
6254 if (itc->itc_waitchan != NULL) {
6255 /* */
6256 wakeup((void *)itc->itc_waitchan);
6257 }
6258 }
6259
6260
6261
6262 /*
6263 * int fwohci_itd_construct(struct fwohci_it_ctx *itc,
6264 * struct fwohci_it_dmabuf *itd, int num,
6265 * struct fwohci_desc *desc, bus_addr_t phys,
6266 * int descsize, int maxsize, paddr_t scratch)
6267 *
6268 *
6269 *
6270 */
6271 int
6272 fwohci_itd_construct(struct fwohci_it_ctx *itc, struct fwohci_it_dmabuf *itd,
6273 int num, struct fwohci_desc *desc, bus_addr_t phys, int descsize,
6274 int maxsize, paddr_t scratch)
6275 {
6276 const char *xname = itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
6277 struct fwohci_desc *fd;
6278 struct fwohci_desc *descend;
6279 int npkt;
6280 int bufno = 0; /* DMA segment */
6281 bus_size_t bufused = 0; /* offset in a DMA segment */
6282 int roundsize;
6283 int tag = itc->itc_tag;
6284 int ch = itc->itc_channel;
6285
6286 itd->itd_ctx = itc;
6287 itd->itd_num = num;
6288
6289 if (descsize > 1024*3) {
6290 printf("%s: fwohci_itd_construct[%d] descsize %d too big\n",
6291 xname, num, descsize);
6292 return -1;
6293 }
6294
6295 itd->itd_desc = desc;
6296 itd->itd_descsize = descsize;
6297 itd->itd_desc_phys = phys;
6298
6299 itd->itd_lastdesc = desc;
6300 itd->itd_npacket = 0;
6301
6302 printf("%s: fwohci_itd_construct[%d] desc %p descsize %d, maxsize %d\n",
6303 xname, itd->itd_num, itd->itd_desc, itd->itd_descsize, maxsize);
6304
6305 if (descsize < 4) {
6306 /* too small descriptor array. at least 4 */
6307 return -1;
6308 }
6309
6310 /* count up how many packet can handle */
6311 itd->itd_maxpacket = (descsize - 1)/3;
6312
6313 /* rounding up to power of 2. minimum 16 */
6314 roundsize = 16;
6315 for (roundsize = 16; roundsize < maxsize; roundsize <<= 1);
6316 itd->itd_maxsize = roundsize;
6317
6318 printf("\t\tdesc%d [%x, %lx]\n", itd->itd_num,
6319 (u_int32_t)phys,
6320 (unsigned long)((u_int32_t)phys
6321 + (itd->itd_maxpacket*3 + 1)*sizeof(struct fwohci_desc)));
6322 printf("%s: fwohci_itd_construct[%d] npkt %d maxsize round up to %d\n",
6323 xname, itd->itd_num, itd->itd_maxpacket, itd->itd_maxsize);
6324
6325 /* obtain DMA buffer */
6326 if (fwohci_itd_dmabuf_alloc(itd)) {
6327 /* cannot allocate memory for DMA buffer */
6328 return -1;
6329 }
6330
6331 /*
6332 * make descriptor chain
6333 *
6334 * First descriptor group has a STORE_VALUE, OUTPUT_IMMEDIATE
6335 * and OUTPUT_LAST descriptors Second and after that, a
6336 * descriptor group has an OUTPUT_IMMEDIATE and an OUTPUT_LAST
6337 * descriptor.
6338 */
6339 descend = desc + descsize;
6340
6341 /* set store value descriptor for 1st descriptor group */
6342 desc->fd_flags = OHCI_DESC_STORE_VALUE;
6343 desc->fd_reqcount = num; /* write number of DMA buffer class */
6344 desc->fd_data = scratch; /* at physical memory 'scratch' */
6345 desc->fd_branch = 0;
6346 desc->fd_status = desc->fd_rescount = 0;
6347
6348 itd->itd_store = desc;
6349 itd->itd_store_phys = phys;
6350
6351 ++desc;
6352 phys += 16;
6353
6354 npkt = 0;
6355 /* make OUTPUT_DESC chain for packets */
6356 for (fd = desc; fd + 2 < descend; fd += 3, ++npkt) {
6357 struct fwohci_desc *fi = fd;
6358 struct fwohci_desc *fl = fd + 2;
6359 u_int32_t *fi_data = (u_int32_t *)(fd + 1);
6360
6361 #if 0
6362 if (npkt > itd->itd_maxpacket - 3) {
6363 printf("%s: %3d fi fl %p %p\n", xname, npkt, fi,fl);
6364 }
6365 #endif
6366
6367 fi->fd_reqcount = 8; /* data size for OHCI command */
6368 fi->fd_flags = OHCI_DESC_IMMED;
6369 fi->fd_data = 0;
6370 fi->fd_branch = 0; /* branch for error */
6371 fi->fd_status = fi->fd_rescount = 0;
6372
6373 /* channel and tag is unchanged */
6374 *fi_data = OHCI_ITHEADER_VAL(TAG, tag) |
6375 OHCI_ITHEADER_VAL(CHAN, ch) |
6376 OHCI_ITHEADER_VAL(TCODE, IEEE1394_TCODE_STREAM_DATA);
6377 *++fi_data = 0;
6378 *++fi_data = 0;
6379 *++fi_data = 0;
6380
6381 fl->fd_flags = OHCI_DESC_OUTPUT | OHCI_DESC_LAST |
6382 OHCI_DESC_BRANCH;
6383 fl->fd_branch =
6384 (phys + sizeof(struct fwohci_desc)*(npkt + 1)*3) | 0x03;
6385 fl->fd_status = fl->fd_rescount = 0;
6386
6387 #ifdef FW_DEBUG
6388 if (npkt > itd->itd_maxpacket - 3) {
6389 DPRINTF(("%s: %3d fi fl fl branch %p %p 0x%x\n",
6390 xname, npkt, fi, fl, (int)fl->fd_branch));
6391 }
6392 #endif
6393
6394 /* physical addr to data? */
6395 fl->fd_data =
6396 (u_int32_t)((itd->itd_seg[bufno].ds_addr + bufused));
6397 bufused += itd->itd_maxsize;
6398 if (bufused > itd->itd_seg[bufno].ds_len) {
6399 bufused = 0;
6400 if (++bufno == itd->itd_nsegs) {
6401 /* fail */
6402 break;
6403 }
6404 }
6405 }
6406
6407 #if 0
6408 if (itd->itd_num == 0) {
6409 u_int32_t *fdp;
6410 u_int32_t adr;
6411 int i = 0;
6412
6413 fdp = (u_int32_t *)itd->itd_desc;
6414 adr = (u_int32_t)itd->itd_desc_phys; /* XXX */
6415
6416 printf("fwohci_itd_construct: audit DMA desc chain. %d\n",
6417 itd->itd_maxpacket);
6418 for (i = 0; i < itd->itd_maxpacket*12 + 4; ++i) {
6419 if (i % 4 == 0) {
6420 printf("\n%x:", adr + 4*i);
6421 }
6422 printf(" %08x", fdp[i]);
6423 }
6424 printf("\n");
6425
6426 }
6427 #endif
6428 /* last branch should be 0 */
6429 --fd;
6430 fd->fd_branch = 0;
6431
6432 printf("%s: pkt %d %d maxdesc %p\n",
6433 xname, npkt, itd->itd_maxpacket, descend);
6434
6435 return 0;
6436 }
6437
6438 void
6439 fwohci_itd_destruct(struct fwohci_it_dmabuf *itd)
6440 {
6441 const char *xname = itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
6442
6443 printf("%s: fwohci_itd_destruct %d\n", xname, itd->itd_num);
6444
6445 fwohci_itd_dmabuf_free(itd);
6446 }
6447
6448
6449 /*
6450 * static int fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *itd)
6451 *
6452 * This function allocates DMA memory for fwohci_it_dmabuf. This
6453 * function will return 0 when it succeeds and return non-zero
6454 * value when it fails.
6455 */
6456 static int
6457 fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *itd)
6458 {
6459 const char *xname = itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
6460 bus_dma_tag_t dmat = itd->itd_ctx->itc_sc->sc_dmat;
6461
6462 int dmasize = itd->itd_maxsize * itd->itd_maxpacket;
6463 int error;
6464
6465 DPRINTF(("%s: fwohci_itd_dmabuf_alloc[%d] dmasize %d maxpkt %d\n",
6466 xname, itd->itd_num, dmasize, itd->itd_maxpacket));
6467
6468 if ((error = bus_dmamem_alloc(dmat, dmasize, PAGE_SIZE, 0,
6469 itd->itd_seg, FWOHCI_MAX_ITDATASEG, &itd->itd_nsegs, 0)) != 0) {
6470 printf("%s: unable to allocate data buffer, error = %d\n",
6471 xname, error);
6472 goto fail_0;
6473 }
6474
6475 /* checking memory range */
6476 #ifdef FW_DEBUG
6477 {
6478 int loop;
6479
6480 for (loop = 0; loop < itd->itd_nsegs; ++loop) {
6481 DPRINTF(("\t%.2d: 0x%lx - 0x%lx\n", loop,
6482 (long)itd->itd_seg[loop].ds_addr,
6483 (long)itd->itd_seg[loop].ds_addr
6484 + (long)itd->itd_seg[loop].ds_len - 1));
6485 }
6486 }
6487 #endif
6488
6489 if ((error = bus_dmamem_map(dmat, itd->itd_seg, itd->itd_nsegs,
6490 dmasize, (caddr_t *)&itd->itd_buf,
6491 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) {
6492 printf("%s: unable to map data buffer, error = %d\n",
6493 xname, error);
6494 goto fail_1;
6495 }
6496
6497 DPRINTF(("fwohci_it_data_alloc[%d]: bus_dmamem_map addr %p\n",
6498 itd->itd_num, itd->itd_buf));
6499
6500 if ((error = bus_dmamap_create(dmat, /*chunklen*/dmasize,
6501 itd->itd_nsegs, dmasize, 0, BUS_DMA_WAITOK,
6502 &itd->itd_dmamap)) != 0) {
6503 printf("%s: unable to create data buffer DMA map, "
6504 "error = %d\n", xname, error);
6505 goto fail_2;
6506 }
6507
6508 DPRINTF(("fwohci_it_data_alloc: bus_dmamem_create\n"));
6509
6510 if ((error = bus_dmamap_load(dmat, itd->itd_dmamap,
6511 itd->itd_buf, dmasize, NULL, BUS_DMA_WAITOK)) != 0) {
6512 printf("%s: unable to load data buffer DMA map, error = %d\n",
6513 xname, error);
6514 goto fail_3;
6515 }
6516
6517 DPRINTF(("fwohci_itd_dmabuf_alloc: load DMA memory vm %p\n",
6518 itd->itd_buf));
6519 DPRINTF(("\tmapsize %ld nsegs %d\n",
6520 (long)itd->itd_dmamap->dm_mapsize, itd->itd_dmamap->dm_nsegs));
6521
6522 #ifdef FW_DEBUG
6523 {
6524 int loop;
6525
6526 for (loop = 0; loop < itd->itd_dmamap->dm_nsegs; ++loop) {
6527 DPRINTF(("\t%.2d: 0x%lx - 0x%lx\n", loop,
6528 (long)itd->itd_dmamap->dm_segs[loop].ds_addr,
6529 (long)itd->itd_dmamap->dm_segs[loop].ds_addr +
6530 (long)itd->itd_dmamap->dm_segs[loop].ds_len - 1));
6531 }
6532 }
6533 #endif
6534
6535 return 0;
6536
6537 fail_3:
6538 bus_dmamap_destroy(dmat, itd->itd_dmamap);
6539 fail_2:
6540 bus_dmamem_unmap(dmat, (caddr_t)itd->itd_buf, dmasize);
6541 fail_1:
6542 bus_dmamem_free(dmat, itd->itd_seg, itd->itd_nsegs);
6543 fail_0:
6544 itd->itd_nsegs = 0;
6545 itd->itd_maxpacket = 0;
6546 return error;
6547 }
6548
6549 /*
6550 * static void fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *itd)
6551 *
6552 * This function will release memory resource allocated by
6553 * fwohci_itd_dmabuf_alloc().
6554 */
6555 static void
6556 fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *itd)
6557 {
6558 bus_dma_tag_t dmat = itd->itd_ctx->itc_sc->sc_dmat;
6559 int dmasize = itd->itd_maxsize * itd->itd_maxpacket;
6560
6561 bus_dmamap_destroy(dmat, itd->itd_dmamap);
6562 bus_dmamem_unmap(dmat, (caddr_t)itd->itd_buf, dmasize);
6563 bus_dmamem_free(dmat, itd->itd_seg, itd->itd_nsegs);
6564
6565 itd->itd_nsegs = 0;
6566 itd->itd_maxpacket = 0;
6567 }
6568
6569
6570
6571 /*
6572 * int fwohci_itd_link(struct fwohci_it_dmabuf *itd,
6573 * struct fwohci_it_dmabuf *itdc)
6574 *
6575 * This function will concatinate two descriptor chains in dmabuf
6576 * itd and itdc. The descriptor link in itdc follows one in itd.
6577 * This function will move interrrupt packet from the end of itd
6578 * to the top of itdc.
6579 *
6580 * This function will return 0 whel this funcion suceeds. If an
6581 * error happens, return a negative value.
6582 */
6583 int
6584 fwohci_itd_link(struct fwohci_it_dmabuf *itd, struct fwohci_it_dmabuf *itdc)
6585 {
6586 struct fwohci_desc *fd1, *fdc;
6587
6588 if (itdc->itd_lastdesc == itdc->itd_desc) {
6589 /* no valid data */
6590 printf("fwohci_itd_link: no data\n");
6591 return -1;
6592 }
6593
6594 if (itdc->itd_flags & ITD_FLAGS_LOCK) {
6595 /* used already */
6596 printf("fwohci_itd_link: link locked\n");
6597 return -1;
6598 }
6599 itdc->itd_flags |= ITD_FLAGS_LOCK;
6600 /* for the first one */
6601 itd->itd_flags |= ITD_FLAGS_LOCK;
6602
6603 DPRINTF(("linking %d after %d: add %d pkts\n",
6604 itdc->itd_num, itd->itd_num, itdc->itd_npacket));
6605
6606 /* XXX: should sync cache */
6607
6608 fd1 = itd->itd_lastdesc;
6609 fdc = itdc->itd_desc + 3; /* OUTPUT_LAST in the first descriptor */
6610
6611 /* sanity check */
6612 #define OUTPUT_LAST_DESC (OHCI_DESC_OUTPUT | OHCI_DESC_LAST | OHCI_DESC_BRANCH)
6613 if ((fd1->fd_flags & OUTPUT_LAST_DESC) != OUTPUT_LAST_DESC) {
6614 printf("funny! not OUTPUT_LAST descriptor %p\n", fd1);
6615 }
6616 if (itd->itd_lastdesc - itd->itd_desc != 3 * itd->itd_npacket) {
6617 printf("funny! packet number inconsistency %ld <=> %ld\n",
6618 (long)(itd->itd_lastdesc - itd->itd_desc),
6619 (long)(3*itd->itd_npacket));
6620 }
6621
6622 fd1->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6623 fdc->fd_flags |= OHCI_DESC_INTR_ALWAYS;
6624 fd1->fd_branch = itdc->itd_desc_phys | 4;
6625
6626 itdc->itd_lastdesc->fd_flags |= OHCI_DESC_INTR_ALWAYS;
6627 /* save branch addr of lastdesc and substitute 0 to it */
6628 itdc->itd_savedbranch = itdc->itd_lastdesc->fd_branch;
6629 itdc->itd_lastdesc->fd_branch = 0;
6630
6631 DPRINTF(("%s: link (%d %d), add pkt %d/%d branch 0x%x next saved 0x%x\n",
6632 itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname,
6633 itd->itd_num, itdc->itd_num,
6634 itdc->itd_npacket, itdc->itd_maxpacket,
6635 (int)fd1->fd_branch, (int)itdc->itd_savedbranch));
6636
6637 /* XXX: should sync cache */
6638
6639 return 0;
6640 }
6641
6642
6643 /*
6644 * int fwohci_itd_unlink(struct fwohci_it_dmabuf *itd)
6645 *
6646 * This function will unlink the descriptor chain from valid link
6647 * of descriptors. The target descriptor is specified by the
6648 * arguent.
6649 */
6650 int
6651 fwohci_itd_unlink(struct fwohci_it_dmabuf *itd)
6652 {
6653 struct fwohci_desc *fd;
6654
6655 /* XXX: should sync cache */
6656
6657 fd = itd->itd_lastdesc;
6658
6659 fd->fd_branch = itd->itd_savedbranch;
6660 DPRINTF(("%s: unlink buf %d branch restored 0x%x\n",
6661 itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname,
6662 itd->itd_num, (int)fd->fd_branch));
6663
6664 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6665 itd->itd_lastdesc = itd->itd_desc;
6666
6667 fd = itd->itd_desc + 3; /* 1st OUTPUT_LAST */
6668 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6669
6670 /* XXX: should sync cache */
6671
6672 itd->itd_npacket = 0;
6673 itd->itd_lastdesc = itd->itd_desc;
6674 itd->itd_flags &= ~ITD_FLAGS_LOCK;
6675
6676 return 0;
6677 }
6678
6679
6680 /*
6681 * static int fwohci_itd_writedata(struct fwohci_it_dmabuf *, int ndata,
6682 * struct ieee1394_it_datalist *);
6683 *
6684 * This function will return the number of written data, or
6685 * negative value if an error happens
6686 */
6687 int
6688 fwohci_itd_writedata(struct fwohci_it_dmabuf *itd, int ndata,
6689 struct ieee1394_it_datalist *itdata)
6690 {
6691 int writepkt;
6692 int i;
6693 u_int8_t *p;
6694 struct fwohci_desc *fd;
6695 u_int32_t *fd_idata;
6696 const int dspace =
6697 itd->itd_maxpacket - itd->itd_npacket < ndata ?
6698 itd->itd_maxpacket - itd->itd_npacket : ndata;
6699
6700 if (itd->itd_flags & ITD_FLAGS_LOCK || dspace == 0) {
6701 /* it is locked: cannot write anything */
6702 if (itd->itd_flags & ITD_FLAGS_LOCK) {
6703 DPRINTF(("fwohci_itd_writedata: buf %d lock flag %s,"
6704 " dspace %d\n",
6705 itd->itd_num,
6706 itd->itd_flags & ITD_FLAGS_LOCK ? "ON" : "OFF",
6707 dspace));
6708 return 0; /* not an error */
6709 }
6710 }
6711
6712 /* sanity check */
6713 if (itd->itd_maxpacket < itd->itd_npacket) {
6714 printf("fwohci_itd_writedata: funny! # pkt > maxpkt"
6715 "%d %d\n", itd->itd_npacket, itd->itd_maxpacket);
6716 }
6717
6718 p = itd->itd_buf + itd->itd_maxsize * itd->itd_npacket;
6719 fd = itd->itd_lastdesc;
6720
6721 DPRINTF(("fwohci_itd_writedata(%d[%p], %d, 0x%p) invoked:\n",
6722 itd->itd_num, itd, ndata, itdata));
6723
6724 for (writepkt = 0; writepkt < dspace; ++writepkt) {
6725 u_int8_t *p1 = p;
6726 int cpysize;
6727 int totalsize = 0;
6728
6729 DPRINTF(("writing %d ", writepkt));
6730
6731 for (i = 0; i < 4; ++i) {
6732 switch (itdata->it_cmd[i]&IEEE1394_IT_CMD_MASK) {
6733 case IEEE1394_IT_CMD_IMMED:
6734 memcpy(p1, &itdata->it_u[i].id_data, 8);
6735 p1 += 8;
6736 totalsize += 8;
6737 break;
6738 case IEEE1394_IT_CMD_PTR:
6739 cpysize = itdata->it_cmd[i]&IEEE1394_IT_CMD_SIZE;
6740 DPRINTF(("fwohci_itd_writedata: cpy %d %p\n",
6741 cpysize, itdata->it_u[i].id_addr));
6742 if (totalsize + cpysize > itd->itd_maxsize) {
6743 /* error: too big size */
6744 break;
6745 }
6746 memcpy(p1, itdata->it_u[i].id_addr, cpysize);
6747 totalsize += cpysize;
6748 break;
6749 case IEEE1394_IT_CMD_NOP:
6750 break;
6751 default:
6752 /* unknown command */
6753 break;
6754 }
6755 }
6756
6757 /* only for DV test */
6758 if (totalsize != 488) {
6759 printf("error: totalsize %d at %d\n",
6760 totalsize, writepkt);
6761 }
6762
6763 DPRINTF(("totalsize %d ", totalsize));
6764
6765 /* fill iso command in OUTPUT_IMMED descriptor */
6766
6767 /* XXX: sync cache */
6768 fd += 2; /* next to first descriptor */
6769 fd_idata = (u_int32_t *)fd;
6770
6771 /*
6772 * Umm, should tag, channel and tcode be written
6773 * previously in itd_construct?
6774 */
6775 #if 0
6776 *fd_idata = OHCI_ITHEADER_VAL(TAG, tag) |
6777 OHCI_ITHEADER_VAL(CHAN, ch) |
6778 OHCI_ITHEADER_VAL(TCODE, IEEE1394_TCODE_STREAM_DATA);
6779 #endif
6780 *++fd_idata = totalsize << 16;
6781
6782 /* fill data in OUTPUT_LAST descriptor */
6783 ++fd;
6784 /* intr check... */
6785 if (fd->fd_flags & OHCI_DESC_INTR_ALWAYS) {
6786 printf("uncleared INTR flag in desc %ld\n",
6787 (long)(fd - itd->itd_desc - 1)/3);
6788 }
6789 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6790
6791 if ((fd - itd->itd_desc - 1)/3 != itd->itd_maxpacket - 1) {
6792 u_int32_t bcal;
6793
6794 bcal = (fd - itd->itd_desc + 1)*sizeof(struct fwohci_desc) + (u_int32_t)itd->itd_desc_phys;
6795 if (bcal != (fd->fd_branch & 0xfffffff0)) {
6796
6797 printf("uum, branch differ at %d, %x %x %ld/%d\n",
6798 itd->itd_num,
6799 bcal,
6800 fd->fd_branch,
6801 (long)((fd - itd->itd_desc - 1)/3),
6802 itd->itd_maxpacket);
6803 }
6804 } else {
6805 /* the last pcaket */
6806 if (fd->fd_branch != 0) {
6807 printf("uum, branch differ at %d, %x %x %ld/%d\n",
6808 itd->itd_num,
6809 0,
6810 fd->fd_branch,
6811 (long)((fd - itd->itd_desc - 1)/3),
6812 itd->itd_maxpacket);
6813 }
6814 }
6815
6816 /* sanity check */
6817 if (fd->fd_flags != OUTPUT_LAST_DESC) {
6818 printf("fwohci_itd_writedata: dmabuf %d desc inconsistent %d\n",
6819 itd->itd_num, writepkt + itd->itd_npacket);
6820 break;
6821 }
6822 fd->fd_reqcount = totalsize;
6823 /* XXX: sync cache */
6824
6825 ++itdata;
6826 p += itd->itd_maxsize;
6827 }
6828
6829 DPRINTF(("loop start %d, %d times %d\n",
6830 itd->itd_npacket, dspace, writepkt));
6831
6832 itd->itd_npacket += writepkt;
6833 itd->itd_lastdesc = fd;
6834
6835 return writepkt;
6836 }
6837
6838
6839
6840
6841
6842 int
6843 fwohci_itd_isfilled(struct fwohci_it_dmabuf *itd)
6844 {
6845
6846 return itd->itd_npacket*2 > itd->itd_maxpacket ? 1 : 0;
6847 }
6848