fwohci.c revision 1.84 1 /* $NetBSD: fwohci.c,v 1.84 2005/02/27 00:27:17 perry Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * IEEE1394 Open Host Controller Interface
41 * based on OHCI Specification 1.1 (January 6, 2000)
42 * The first version to support network interface part is wrtten by
43 * Atsushi Onoe <onoe (at) NetBSD.org>.
44 */
45
46 /*
47 * The first version to support isochronous acquisition part is wrtten
48 * by HAYAKAWA Koichi <haya (at) NetBSD.org>.
49 */
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: fwohci.c,v 1.84 2005/02/27 00:27:17 perry Exp $");
53
54 #define FWOHCI_WAIT_DEBUG 1
55
56 #define FWOHCI_IT_BUFNUM 4
57
58 #include "opt_inet.h"
59 #include "fwiso.h"
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kthread.h>
64 #include <sys/socket.h>
65 #include <sys/callout.h>
66 #include <sys/device.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/mbuf.h>
70 #include <sys/poll.h>
71 #include <sys/select.h>
72
73 #if __NetBSD_Version__ >= 105010000
74 #include <uvm/uvm_extern.h>
75 #else
76 #include <vm/vm.h>
77 #endif
78
79 #include <machine/bus.h>
80 #include <machine/intr.h>
81
82 #include <dev/ieee1394/ieee1394reg.h>
83 #include <dev/ieee1394/fwohcireg.h>
84
85 #include <dev/ieee1394/ieee1394var.h>
86 #include <dev/ieee1394/fwohcivar.h>
87 #include <dev/ieee1394/fwisovar.h>
88
89 static const char * const ieee1394_speeds[] = { IEEE1394_SPD_STRINGS };
90
91 #if 0
92 static int fwohci_dnamem_alloc(struct fwohci_softc *sc, int size,
93 int alignment, bus_dmamap_t *mapp, caddr_t *kvap, int flags);
94 #endif
95 static void fwohci_create_event_thread(void *);
96 static void fwohci_thread_init(void *);
97
98 static void fwohci_event_thread(struct fwohci_softc *);
99 static void fwohci_hw_init(struct fwohci_softc *);
100 static void fwohci_power(int, void *);
101 static void fwohci_shutdown(void *);
102
103 static int fwohci_desc_alloc(struct fwohci_softc *);
104 static struct fwohci_desc *fwohci_desc_get(struct fwohci_softc *, int);
105 static void fwohci_desc_put(struct fwohci_softc *, struct fwohci_desc *, int);
106
107 static int fwohci_ctx_alloc(struct fwohci_softc *, struct fwohci_ctx **,
108 int, int, int);
109 static void fwohci_ctx_free(struct fwohci_softc *, struct fwohci_ctx *);
110 static void fwohci_ctx_init(struct fwohci_softc *, struct fwohci_ctx *);
111
112 static int fwohci_misc_dmabuf_alloc(bus_dma_tag_t, int, int,
113 bus_dma_segment_t *, bus_dmamap_t *, void **, const char *);
114 static void fwohci_misc_dmabuf_free(bus_dma_tag_t, int, int,
115 bus_dma_segment_t *, bus_dmamap_t *, caddr_t);
116
117 static struct fwohci_ir_ctx *fwohci_ir_ctx_construct(struct fwohci_softc *,
118 int, int, int, int, int, int);
119 static void fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *);
120
121 static int fwohci_ir_buf_setup(struct fwohci_ir_ctx *);
122 static int fwohci_ir_init(struct fwohci_ir_ctx *);
123 static int fwohci_ir_start(struct fwohci_ir_ctx *);
124 static void fwohci_ir_intr(struct fwohci_softc *, struct fwohci_ir_ctx *);
125 static int fwohci_ir_stop(struct fwohci_ir_ctx *);
126 static int fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *);
127 #ifdef USEDRAIN
128 static int fwohci_ir_ctx_drain(struct fwohci_ir_ctx *);
129 #endif /* USEDRAIN */
130
131 static int fwohci_it_desc_alloc(struct fwohci_it_ctx *);
132 static void fwohci_it_desc_free(struct fwohci_it_ctx *itc);
133 struct fwohci_it_ctx *fwohci_it_ctx_construct(struct fwohci_softc *,
134 int, int, int, int);
135 void fwohci_it_ctx_destruct(struct fwohci_it_ctx *);
136 int fwohci_it_ctx_writedata(ieee1394_it_tag_t, int,
137 struct ieee1394_it_datalist *, int);
138 static void fwohci_it_ctx_run(struct fwohci_it_ctx *);
139 int fwohci_it_ctx_flush(ieee1394_it_tag_t);
140 static void fwohci_it_intr(struct fwohci_softc *, struct fwohci_it_ctx *);
141
142 int fwohci_itd_construct(struct fwohci_it_ctx *, struct fwohci_it_dmabuf *,
143 int, struct fwohci_desc *, bus_addr_t, int, int, paddr_t);
144 void fwohci_itd_destruct(struct fwohci_it_dmabuf *);
145 static int fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *);
146 static void fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *);
147 int fwohci_itd_link(struct fwohci_it_dmabuf *, struct fwohci_it_dmabuf *);
148 int fwohci_itd_unlink(struct fwohci_it_dmabuf *);
149 int fwohci_itd_writedata(struct fwohci_it_dmabuf *, int,
150 struct ieee1394_it_datalist *);
151 int fwohci_itd_isfilled(struct fwohci_it_dmabuf *);
152
153 static int fwohci_buf_alloc(struct fwohci_softc *, struct fwohci_buf *);
154 static void fwohci_buf_free(struct fwohci_softc *, struct fwohci_buf *);
155 static void fwohci_buf_init_rx(struct fwohci_softc *);
156 static void fwohci_buf_start_rx(struct fwohci_softc *);
157 static void fwohci_buf_stop_tx(struct fwohci_softc *);
158 static void fwohci_buf_stop_rx(struct fwohci_softc *);
159 static void fwohci_buf_next(struct fwohci_softc *, struct fwohci_ctx *);
160 static int fwohci_buf_pktget(struct fwohci_softc *, struct fwohci_buf **,
161 caddr_t *, int);
162 static int fwohci_buf_input(struct fwohci_softc *, struct fwohci_ctx *,
163 struct fwohci_pkt *);
164 static int fwohci_buf_input_ppb(struct fwohci_softc *, struct fwohci_ctx *,
165 struct fwohci_pkt *);
166
167 static u_int8_t fwohci_phy_read(struct fwohci_softc *, u_int8_t);
168 static void fwohci_phy_write(struct fwohci_softc *, u_int8_t, u_int8_t);
169 static void fwohci_phy_busreset(struct fwohci_softc *);
170 static void fwohci_phy_input(struct fwohci_softc *, struct fwohci_pkt *);
171
172 static int fwohci_handler_set(struct fwohci_softc *, int, u_int32_t, u_int32_t,
173 u_int32_t, int (*)(struct fwohci_softc *, void *, struct fwohci_pkt *),
174 void *);
175
176 ieee1394_ir_tag_t fwohci_ir_ctx_set(struct device *, int, int, int, int, int);
177 int fwohci_ir_ctx_clear(struct device *, ieee1394_ir_tag_t);
178 int fwohci_ir_read(struct device *, ieee1394_ir_tag_t, struct uio *,
179 int, int);
180 int fwohci_ir_wait(struct device *, ieee1394_ir_tag_t, void *, char *name);
181 int fwohci_ir_select(struct device *, ieee1394_ir_tag_t, struct proc *);
182
183
184
185 ieee1394_it_tag_t fwohci_it_set(struct ieee1394_softc *, int, int);
186 static ieee1394_it_tag_t fwohci_it_ctx_set(struct fwohci_softc *, int, int, int);
187 int fwohci_it_ctx_clear(ieee1394_it_tag_t *);
188
189 static void fwohci_arrq_input(struct fwohci_softc *, struct fwohci_ctx *);
190 static void fwohci_arrs_input(struct fwohci_softc *, struct fwohci_ctx *);
191 static void fwohci_as_input(struct fwohci_softc *, struct fwohci_ctx *);
192
193 static int fwohci_at_output(struct fwohci_softc *, struct fwohci_ctx *,
194 struct fwohci_pkt *);
195 static void fwohci_at_done(struct fwohci_softc *, struct fwohci_ctx *, int);
196 static void fwohci_atrs_output(struct fwohci_softc *, int, struct fwohci_pkt *,
197 struct fwohci_pkt *);
198
199 static int fwohci_guidrom_init(struct fwohci_softc *);
200 static void fwohci_configrom_init(struct fwohci_softc *);
201 static int fwohci_configrom_input(struct fwohci_softc *, void *,
202 struct fwohci_pkt *);
203 static void fwohci_selfid_init(struct fwohci_softc *);
204 static int fwohci_selfid_input(struct fwohci_softc *);
205
206 static void fwohci_csr_init(struct fwohci_softc *);
207 static int fwohci_csr_input(struct fwohci_softc *, void *,
208 struct fwohci_pkt *);
209
210 static void fwohci_uid_collect(struct fwohci_softc *);
211 static void fwohci_uid_req(struct fwohci_softc *, int);
212 static int fwohci_uid_input(struct fwohci_softc *, void *,
213 struct fwohci_pkt *);
214 static int fwohci_uid_lookup(struct fwohci_softc *, const u_int8_t *);
215 static void fwohci_check_nodes(struct fwohci_softc *);
216
217 static int fwohci_if_inreg(struct device *, u_int32_t, u_int32_t,
218 void (*)(struct device *, struct mbuf *));
219 static int fwohci_if_input(struct fwohci_softc *, void *, struct fwohci_pkt *);
220 static int fwohci_if_input_iso(struct fwohci_softc *, void *, struct fwohci_pkt *);
221
222 static int fwohci_if_output(struct device *, struct mbuf *,
223 void (*)(struct device *, struct mbuf *));
224 static int fwohci_if_setiso(struct device *, u_int32_t, u_int32_t, u_int32_t,
225 void (*)(struct device *, struct mbuf *));
226 static int fwohci_read(struct ieee1394_abuf *);
227 static int fwohci_write(struct ieee1394_abuf *);
228 static int fwohci_read_resp(struct fwohci_softc *, void *, struct fwohci_pkt *);
229 static int fwohci_write_ack(struct fwohci_softc *, void *, struct fwohci_pkt *);
230 static int fwohci_read_multi_resp(struct fwohci_softc *, void *,
231 struct fwohci_pkt *);
232 static int fwohci_inreg(struct ieee1394_abuf *, int);
233 static int fwohci_unreg(struct ieee1394_abuf *, int);
234 static int fwohci_parse_input(struct fwohci_softc *, void *,
235 struct fwohci_pkt *);
236 static int fwohci_submatch(struct device *, struct cfdata *,
237 const locdesc_t *, void *);
238
239 /* XXX */
240 u_int16_t fwohci_cycletimer(struct fwohci_softc *);
241 u_int16_t fwohci_it_cycletimer(ieee1394_it_tag_t);
242
243 #ifdef FW_DEBUG
244 static void fwohci_show_intr(struct fwohci_softc *, u_int32_t);
245 static void fwohci_show_phypkt(struct fwohci_softc *, u_int32_t);
246
247 /* 1 is normal debug, 2 is verbose debug, 3 is complete (packet dumps). */
248
249 #define DPRINTF(x) if (fwdebug) printf x
250 #define DPRINTFN(n,x) if (fwdebug>(n)) printf x
251 int fwdebug = 1;
252 #else
253 #define DPRINTF(x)
254 #define DPRINTFN(n,x)
255 #endif
256
257 #define OHCI_ITHEADER_SPD_MASK 0x00070000
258 #define OHCI_ITHEADER_SPD_BITPOS 16
259 #define OHCI_ITHEADER_TAG_MASK 0x0000c000
260 #define OHCI_ITHEADER_TAG_BITPOS 14
261 #define OHCI_ITHEADER_CHAN_MASK 0x00003f00
262 #define OHCI_ITHEADER_CHAN_BITPOS 8
263 #define OHCI_ITHEADER_TCODE_MASK 0x000000f0
264 #define OHCI_ITHEADER_TCODE_BITPOS 4
265 #define OHCI_ITHEADER_SY_MASK 0x0000000f
266 #define OHCI_ITHEADER_SY_BITPOS 0
267
268 #define OHCI_ITHEADER_VAL(fld, val) \
269 (OHCI_ITHEADER_##fld##_MASK & ((val) << OHCI_ITHEADER_##fld##_BITPOS))
270
271 int
272 fwohci_init(struct fwohci_softc *sc, const struct evcnt *ev)
273 {
274 int i;
275 u_int32_t val;
276 #if 0
277 int error;
278 #endif
279
280 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ev,
281 sc->sc_sc1394.sc1394_dev.dv_xname, "intr");
282
283 evcnt_attach_dynamic(&sc->sc_isocnt, EVCNT_TYPE_MISC, ev,
284 sc->sc_sc1394.sc1394_dev.dv_xname, "isorcvs");
285 evcnt_attach_dynamic(&sc->sc_ascnt, EVCNT_TYPE_MISC, ev,
286 sc->sc_sc1394.sc1394_dev.dv_xname, "asrcvs");
287 evcnt_attach_dynamic(&sc->sc_itintrcnt, EVCNT_TYPE_INTR, ev,
288 sc->sc_sc1394.sc1394_dev.dv_xname, "itintr");
289
290 /*
291 * Wait for reset completion
292 */
293 for (i = 0; i < OHCI_LOOP; i++) {
294 val = OHCI_CSR_READ(sc, OHCI_REG_HCControlClear);
295 if ((val & OHCI_HCControl_SoftReset) == 0)
296 break;
297 DELAY(10);
298 }
299
300 /* What dialect of OHCI is this device?
301 */
302 val = OHCI_CSR_READ(sc, OHCI_REG_Version);
303 aprint_normal("%s: OHCI %u.%u", sc->sc_sc1394.sc1394_dev.dv_xname,
304 OHCI_Version_GET_Version(val), OHCI_Version_GET_Revision(val));
305
306 LIST_INIT(&sc->sc_nodelist);
307
308 if (fwohci_guidrom_init(sc) != 0) {
309 aprint_error("\n%s: fatal: no global UID ROM\n",
310 sc->sc_sc1394.sc1394_dev.dv_xname);
311 return -1;
312 }
313
314 aprint_normal(", %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
315 sc->sc_sc1394.sc1394_guid[0], sc->sc_sc1394.sc1394_guid[1],
316 sc->sc_sc1394.sc1394_guid[2], sc->sc_sc1394.sc1394_guid[3],
317 sc->sc_sc1394.sc1394_guid[4], sc->sc_sc1394.sc1394_guid[5],
318 sc->sc_sc1394.sc1394_guid[6], sc->sc_sc1394.sc1394_guid[7]);
319
320 /* Get the maximum link speed and receive size
321 */
322 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions);
323 sc->sc_sc1394.sc1394_link_speed =
324 OHCI_BITVAL(val, OHCI_BusOptions_LinkSpd);
325 if (sc->sc_sc1394.sc1394_link_speed < IEEE1394_SPD_MAX) {
326 aprint_normal(", %s",
327 ieee1394_speeds[sc->sc_sc1394.sc1394_link_speed]);
328 } else {
329 aprint_normal(", unknown speed %u",
330 sc->sc_sc1394.sc1394_link_speed);
331 }
332
333 /* MaxRec is encoded as log2(max_rec_octets)-1
334 */
335 sc->sc_sc1394.sc1394_max_receive =
336 1 << (OHCI_BITVAL(val, OHCI_BusOptions_MaxRec) + 1);
337 aprint_normal(", %u max_rec", sc->sc_sc1394.sc1394_max_receive);
338
339 /*
340 * Count how many isochronous receive ctx we have.
341 */
342 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskSet, ~0);
343 val = OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntMaskClear);
344 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskClear, ~0);
345 for (i = 0; val != 0; val >>= 1) {
346 if (val & 0x1)
347 i++;
348 }
349 sc->sc_isoctx = i;
350 aprint_normal(", %d ir_ctx", sc->sc_isoctx);
351
352 /*
353 * Count how many isochronous transmit ctx we have.
354 */
355 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskSet, ~0);
356 val = OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntMaskClear);
357 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskClear, ~0);
358 for (i = 0; val != 0; val >>= 1) {
359 if (val & 0x1) {
360 i++;
361 OHCI_SYNC_TX_DMA_WRITE(sc, i,OHCI_SUBREG_CommandPtr,0);
362 }
363 }
364 sc->sc_itctx = i;
365
366 aprint_normal(", %d it_ctx", sc->sc_itctx);
367
368 aprint_normal("\n");
369
370 #if 0
371 error = fwohci_dnamem_alloc(sc, OHCI_CONFIG_SIZE,
372 OHCI_CONFIG_ALIGNMENT, &sc->sc_configrom_map,
373 (caddr_t *) &sc->sc_configrom, BUS_DMA_WAITOK|BUS_DMA_COHERENT);
374 return error;
375 #endif
376
377 sc->sc_dying = 0;
378 sc->sc_nodeid = 0xffff; /* invalid */
379
380 sc->sc_sc1394.sc1394_callback.sc1394_read = fwohci_read;
381 sc->sc_sc1394.sc1394_callback.sc1394_write = fwohci_write;
382 sc->sc_sc1394.sc1394_callback.sc1394_inreg = fwohci_inreg;
383 sc->sc_sc1394.sc1394_callback.sc1394_unreg = fwohci_unreg;
384
385 kthread_create(fwohci_create_event_thread, sc);
386 return 0;
387 }
388
389 static int
390 fwohci_if_setiso(struct device *self, u_int32_t channel, u_int32_t tag,
391 u_int32_t direction, void (*handler)(struct device *, struct mbuf *))
392 {
393 struct fwohci_softc *sc = (struct fwohci_softc *)self;
394 int retval;
395 int s;
396
397 if (direction == 1) {
398 return EIO;
399 }
400
401 s = splnet();
402 retval = fwohci_handler_set(sc, IEEE1394_TCODE_STREAM_DATA,
403 channel, 1 << tag, 0, fwohci_if_input_iso, handler);
404 splx(s);
405
406 if (!retval) {
407 printf("%s: dummy iso handler set\n",
408 sc->sc_sc1394.sc1394_dev.dv_xname);
409 } else {
410 printf("%s: dummy iso handler cannot set\n",
411 sc->sc_sc1394.sc1394_dev.dv_xname);
412 }
413
414 return retval;
415 }
416
417 int
418 fwohci_intr(void *arg)
419 {
420 struct fwohci_softc * const sc = arg;
421 int progress = 0;
422 u_int32_t intmask, iso;
423
424 for (;;) {
425 intmask = OHCI_CSR_READ(sc, OHCI_REG_IntEventClear);
426
427 /*
428 * On a bus reset, everything except bus reset gets
429 * cleared. That can't get cleared until the selfid
430 * phase completes (which happens outside the
431 * interrupt routines). So if just a bus reset is left
432 * in the mask and it's already in the sc_intmask,
433 * just return.
434 */
435
436 if ((intmask == 0) ||
437 (progress && (intmask == OHCI_Int_BusReset) &&
438 (sc->sc_intmask & OHCI_Int_BusReset))) {
439 if (progress)
440 wakeup(fwohci_event_thread);
441 return progress;
442 }
443 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear,
444 intmask & ~OHCI_Int_BusReset);
445 #ifdef FW_DEBUG
446 if (fwdebug > 1)
447 fwohci_show_intr(sc, intmask);
448 #endif
449
450 if (intmask & OHCI_Int_BusReset) {
451 /*
452 * According to OHCI spec 6.1.1 "busReset",
453 * All asynchronous transmit must be stopped before
454 * clearing BusReset. Moreover, the BusReset
455 * interrupt bit should not be cleared during the
456 * SelfID phase. Thus we turned off interrupt mask
457 * bit of BusReset instead until SelfID completion
458 * or SelfID timeout.
459 */
460 intmask &= OHCI_Int_SelfIDComplete;
461 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear,
462 OHCI_Int_BusReset);
463 sc->sc_intmask = OHCI_Int_BusReset;
464 }
465 sc->sc_intmask |= intmask;
466
467 if (intmask & OHCI_Int_IsochTx) {
468 int i;
469
470 iso = OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntEventClear);
471 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntEventClear, iso);
472
473 sc->sc_itintrcnt.ev_count++;
474 for (i = 0; i < sc->sc_itctx; ++i) {
475 if ((iso & (1<<i)) == 0 ||
476 sc->sc_ctx_it[i] == NULL) {
477 continue;
478 }
479
480 fwohci_it_intr(sc, sc->sc_ctx_it[i]);
481 }
482 }
483 if (intmask & OHCI_Int_IsochRx) {
484 int i;
485
486 iso = OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntEventClear);
487 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear, iso);
488
489 for (i = 0; i < sc->sc_isoctx; i++) {
490 if ((iso & (1 << i))
491 && sc->sc_ctx_ir[i] != NULL) {
492 iso &= ~(1 << i);
493 fwohci_ir_intr(sc, sc->sc_ctx_ir[i]);
494 }
495 }
496
497 if (iso == 0) {
498 sc->sc_intmask &= ~OHCI_Int_IsochRx;
499 }
500 sc->sc_iso |= iso;
501 }
502
503 if (!progress) {
504 sc->sc_intrcnt.ev_count++;
505 progress = 1;
506 }
507 }
508 }
509
510 static void
511 fwohci_create_event_thread(void *arg)
512 {
513 struct fwohci_softc *sc = arg;
514
515 if (kthread_create1(fwohci_thread_init, sc, &sc->sc_event_thread, "%s",
516 sc->sc_sc1394.sc1394_dev.dv_xname)) {
517 printf("%s: unable to create event thread\n",
518 sc->sc_sc1394.sc1394_dev.dv_xname);
519 panic("fwohci_create_event_thread");
520 }
521 }
522
523 static void
524 fwohci_thread_init(void *arg)
525 {
526 struct fwohci_softc *sc = arg;
527 int i;
528
529 /*
530 * Allocate descriptors
531 */
532 if (fwohci_desc_alloc(sc)) {
533 printf("%s: not enabling interrupts\n",
534 sc->sc_sc1394.sc1394_dev.dv_xname);
535 kthread_exit(1);
536 }
537
538 /*
539 * Enable Link Power
540 */
541
542 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LPS);
543
544 /*
545 * Allocate DMA Context
546 */
547 fwohci_ctx_alloc(sc, &sc->sc_ctx_arrq, OHCI_BUF_ARRQ_CNT,
548 OHCI_CTX_ASYNC_RX_REQUEST, FWOHCI_CTX_ASYNC);
549 fwohci_ctx_alloc(sc, &sc->sc_ctx_arrs, OHCI_BUF_ARRS_CNT,
550 OHCI_CTX_ASYNC_RX_RESPONSE, FWOHCI_CTX_ASYNC);
551 fwohci_ctx_alloc(sc, &sc->sc_ctx_atrq, 0, OHCI_CTX_ASYNC_TX_REQUEST,
552 FWOHCI_CTX_ASYNC);
553 fwohci_ctx_alloc(sc, &sc->sc_ctx_atrs, 0, OHCI_CTX_ASYNC_TX_RESPONSE,
554 FWOHCI_CTX_ASYNC);
555 sc->sc_ctx_as = malloc(sizeof(sc->sc_ctx_as[0]) * sc->sc_isoctx,
556 M_DEVBUF, M_WAITOK);
557 if (sc->sc_ctx_as == NULL) {
558 printf("no asynchronous stream\n");
559 } else {
560 for (i = 0; i < sc->sc_isoctx; i++)
561 sc->sc_ctx_as[i] = NULL;
562 }
563 sc->sc_ctx_ir = malloc(sizeof(sc->sc_ctx_ir[0]) * sc->sc_isoctx,
564 M_DEVBUF, M_WAITOK|M_ZERO);
565 sc->sc_ctx_it = malloc(sizeof(sc->sc_ctx_it[0]) * sc->sc_itctx,
566 M_DEVBUF, M_WAITOK|M_ZERO);
567
568 /*
569 * Allocate buffer for configuration ROM and SelfID buffer
570 */
571 fwohci_buf_alloc(sc, &sc->sc_buf_cnfrom);
572 fwohci_buf_alloc(sc, &sc->sc_buf_selfid);
573
574 callout_init(&sc->sc_selfid_callout);
575
576 sc->sc_sc1394.sc1394_ifinreg = fwohci_if_inreg;
577 sc->sc_sc1394.sc1394_ifoutput = fwohci_if_output;
578 sc->sc_sc1394.sc1394_ifsetiso = fwohci_if_setiso;
579
580 sc->sc_sc1394.sc1394_ir_open = fwohci_ir_ctx_set;
581 sc->sc_sc1394.sc1394_ir_close = fwohci_ir_ctx_clear;
582 sc->sc_sc1394.sc1394_ir_read = fwohci_ir_read;
583 sc->sc_sc1394.sc1394_ir_wait = fwohci_ir_wait;
584 sc->sc_sc1394.sc1394_ir_select = fwohci_ir_select;
585
586 #if 0
587 sc->sc_sc1394.sc1394_it_open = fwohci_it_open;
588 sc->sc_sc1394.sc1394_it_write = fwohci_it_write;
589 sc->sc_sc1394.sc1394_it_close = fwohci_it_close;
590 /* XXX: need fwohci_it_flush? */
591 #endif
592
593 /*
594 * establish hooks for shutdown and suspend/resume
595 */
596 sc->sc_shutdownhook = shutdownhook_establish(fwohci_shutdown, sc);
597 sc->sc_powerhook = powerhook_establish(fwohci_power, sc);
598
599 sc->sc_sc1394.sc1394_if = config_found(&sc->sc_sc1394.sc1394_dev, "fw",
600 fwohci_print);
601
602 #if NFWISO > 0
603 fwiso_register_if(&sc->sc_sc1394);
604 #endif
605
606 /* Main loop. It's not coming back normally. */
607
608 fwohci_event_thread(sc);
609
610 kthread_exit(0);
611 }
612
613 static void
614 fwohci_event_thread(struct fwohci_softc *sc)
615 {
616 int i, s;
617 u_int32_t intmask, iso;
618
619 s = splbio();
620
621 /*
622 * Initialize hardware registers.
623 */
624
625 fwohci_hw_init(sc);
626
627 /* Initial Bus Reset */
628 fwohci_phy_busreset(sc);
629 splx(s);
630
631 while (!sc->sc_dying) {
632 s = splbio();
633 intmask = sc->sc_intmask;
634 if (intmask == 0) {
635 tsleep(fwohci_event_thread, PZERO, "fwohciev", 0);
636 splx(s);
637 continue;
638 }
639 sc->sc_intmask = 0;
640 splx(s);
641
642 if (intmask & OHCI_Int_BusReset) {
643 fwohci_buf_stop_tx(sc);
644 if (sc->sc_uidtbl != NULL) {
645 free(sc->sc_uidtbl, M_DEVBUF);
646 sc->sc_uidtbl = NULL;
647 }
648
649 callout_reset(&sc->sc_selfid_callout,
650 OHCI_SELFID_TIMEOUT,
651 (void (*)(void *))fwohci_phy_busreset, sc);
652 sc->sc_nodeid = 0xffff; /* indicate invalid */
653 sc->sc_rootid = 0;
654 sc->sc_irmid = IEEE1394_BCAST_PHY_ID;
655 }
656 if (intmask & OHCI_Int_SelfIDComplete) {
657 s = splbio();
658 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear,
659 OHCI_Int_BusReset);
660 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet,
661 OHCI_Int_BusReset);
662 splx(s);
663 callout_stop(&sc->sc_selfid_callout);
664 if (fwohci_selfid_input(sc) == 0) {
665 fwohci_buf_start_rx(sc);
666 fwohci_uid_collect(sc);
667 }
668 }
669 if (intmask & OHCI_Int_ReqTxComplete)
670 fwohci_at_done(sc, sc->sc_ctx_atrq, 0);
671 if (intmask & OHCI_Int_RespTxComplete)
672 fwohci_at_done(sc, sc->sc_ctx_atrs, 0);
673 if (intmask & OHCI_Int_RQPkt)
674 fwohci_arrq_input(sc, sc->sc_ctx_arrq);
675 if (intmask & OHCI_Int_RSPkt)
676 fwohci_arrs_input(sc, sc->sc_ctx_arrs);
677 if (intmask & OHCI_Int_IsochRx) {
678 if (sc->sc_ctx_as == NULL) {
679 continue;
680 }
681 s = splbio();
682 iso = sc->sc_iso;
683 sc->sc_iso = 0;
684 splx(s);
685 for (i = 0; i < sc->sc_isoctx; i++) {
686 if ((iso & (1 << i)) &&
687 sc->sc_ctx_as[i] != NULL) {
688 fwohci_as_input(sc, sc->sc_ctx_as[i]);
689 sc->sc_ascnt.ev_count++;
690 }
691 }
692 }
693 }
694 }
695
696 #if 0
697 static int
698 fwohci_dnamem_alloc(struct fwohci_softc *sc, int size, int alignment,
699 bus_dmamap_t *mapp, caddr_t *kvap, int flags)
700 {
701 bus_dma_segment_t segs[1];
702 int error, nsegs, steps;
703
704 steps = 0;
705 error = bus_dmamem_alloc(sc->sc_dmat, size, alignment, alignment,
706 segs, 1, &nsegs, flags);
707 if (error)
708 goto cleanup;
709
710 steps = 1;
711 error = bus_dmamem_map(sc->sc_dmat, segs, nsegs, segs[0].ds_len,
712 kvap, flags);
713 if (error)
714 goto cleanup;
715
716 if (error == 0)
717 error = bus_dmamap_create(sc->sc_dmat, size, 1, alignment,
718 size, flags, mapp);
719 if (error)
720 goto cleanup;
721 if (error == 0)
722 error = bus_dmamap_load(sc->sc_dmat, *mapp, *kvap, size, NULL,
723 flags);
724 if (error)
725 goto cleanup;
726
727 cleanup:
728 switch (steps) {
729 case 1:
730 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
731 }
732
733 return error;
734 }
735 #endif
736
737 int
738 fwohci_print(void *aux, const char *pnp)
739 {
740 char *name = aux;
741
742 if (pnp)
743 aprint_normal("%s at %s", name, pnp);
744
745 return UNCONF;
746 }
747
748 static void
749 fwohci_hw_init(struct fwohci_softc *sc)
750 {
751 int i;
752 u_int32_t val;
753
754 /*
755 * Software Reset.
756 */
757 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_SoftReset);
758 for (i = 0; i < OHCI_LOOP; i++) {
759 val = OHCI_CSR_READ(sc, OHCI_REG_HCControlClear);
760 if ((val & OHCI_HCControl_SoftReset) == 0)
761 break;
762 DELAY(10);
763 }
764
765 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LPS);
766
767 /*
768 * First, initilize CSRs with undefined value to default settings.
769 */
770 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions);
771 val |= OHCI_BusOptions_ISC | OHCI_BusOptions_CMC;
772 #if 0
773 val |= OHCI_BusOptions_BMC | OHCI_BusOptions_IRMC;
774 #else
775 val &= ~(OHCI_BusOptions_BMC | OHCI_BusOptions_IRMC);
776 #endif
777 OHCI_CSR_WRITE(sc, OHCI_REG_BusOptions, val);
778 for (i = 0; i < sc->sc_isoctx; i++) {
779 OHCI_SYNC_RX_DMA_WRITE(sc, i, OHCI_SUBREG_ContextControlClear,
780 ~0);
781 }
782 for (i = 0; i < sc->sc_itctx; i++) {
783 OHCI_SYNC_TX_DMA_WRITE(sc, i, OHCI_SUBREG_ContextControlClear,
784 ~0);
785 }
786 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlClear, ~0);
787
788 fwohci_configrom_init(sc);
789 fwohci_selfid_init(sc);
790 fwohci_buf_init_rx(sc);
791 fwohci_csr_init(sc);
792
793 /*
794 * Final CSR settings.
795 */
796 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlSet,
797 OHCI_LinkControl_CycleTimerEnable |
798 OHCI_LinkControl_RcvSelfID | OHCI_LinkControl_RcvPhyPkt);
799
800 OHCI_CSR_WRITE(sc, OHCI_REG_ATRetries, 0x00000888); /*XXX*/
801
802 /* clear receive filter */
803 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiClear, ~0);
804 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoClear, ~0);
805 OHCI_CSR_WRITE(sc, OHCI_REG_AsynchronousRequestFilterHiSet, 0x80000000);
806
807 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear,
808 OHCI_HCControl_NoByteSwapData | OHCI_HCControl_APhyEnhanceEnable);
809 #if BYTE_ORDER == BIG_ENDIAN
810 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet,
811 OHCI_HCControl_NoByteSwapData);
812 #endif
813
814 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear, ~0);
815 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_BusReset |
816 OHCI_Int_SelfIDComplete | OHCI_Int_IsochRx | OHCI_Int_IsochTx |
817 OHCI_Int_RSPkt | OHCI_Int_RQPkt | OHCI_Int_ARRS | OHCI_Int_ARRQ |
818 OHCI_Int_RespTxComplete | OHCI_Int_ReqTxComplete);
819 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_CycleTooLong |
820 OHCI_Int_UnrecoverableError | OHCI_Int_CycleInconsistent |
821 OHCI_Int_LockRespErr | OHCI_Int_PostedWriteErr);
822 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskSet, ~0);
823 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskSet, ~0);
824 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_MasterEnable);
825
826 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LinkEnable);
827
828 /*
829 * Start the receivers
830 */
831 fwohci_buf_start_rx(sc);
832 }
833
834 static void
835 fwohci_power(int why, void *arg)
836 {
837 struct fwohci_softc *sc = arg;
838 int s;
839
840 s = splbio();
841 switch (why) {
842 case PWR_SUSPEND:
843 case PWR_STANDBY:
844 fwohci_shutdown(sc);
845 break;
846 case PWR_RESUME:
847 fwohci_hw_init(sc);
848 fwohci_phy_busreset(sc);
849 break;
850 case PWR_SOFTSUSPEND:
851 case PWR_SOFTSTANDBY:
852 case PWR_SOFTRESUME:
853 break;
854 }
855 splx(s);
856 }
857
858 static void
859 fwohci_shutdown(void *arg)
860 {
861 struct fwohci_softc *sc = arg;
862 u_int32_t val;
863
864 callout_stop(&sc->sc_selfid_callout);
865 /* disable all interrupt */
866 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear, OHCI_Int_MasterEnable);
867 fwohci_buf_stop_tx(sc);
868 fwohci_buf_stop_rx(sc);
869 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions);
870 val &= ~(OHCI_BusOptions_BMC | OHCI_BusOptions_ISC |
871 OHCI_BusOptions_CMC | OHCI_BusOptions_IRMC);
872 OHCI_CSR_WRITE(sc, OHCI_REG_BusOptions, val);
873 fwohci_phy_busreset(sc);
874 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear, OHCI_HCControl_LinkEnable);
875 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear, OHCI_HCControl_LPS);
876 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_SoftReset);
877 }
878
879 /*
880 * COMMON FUNCTIONS
881 */
882
883 /*
884 * read the PHY Register.
885 */
886 static u_int8_t
887 fwohci_phy_read(struct fwohci_softc *sc, u_int8_t reg)
888 {
889 int i;
890 u_int32_t val;
891
892 OHCI_CSR_WRITE(sc, OHCI_REG_PhyControl,
893 OHCI_PhyControl_RdReg | (reg << OHCI_PhyControl_RegAddr_BITPOS));
894 for (i = 0; i < OHCI_LOOP; i++) {
895 if (OHCI_CSR_READ(sc, OHCI_REG_PhyControl) &
896 OHCI_PhyControl_RdDone)
897 break;
898 DELAY(10);
899 }
900 val = OHCI_CSR_READ(sc, OHCI_REG_PhyControl);
901 return (val & OHCI_PhyControl_RdData) >> OHCI_PhyControl_RdData_BITPOS;
902 }
903
904 /*
905 * write the PHY Register.
906 */
907 static void
908 fwohci_phy_write(struct fwohci_softc *sc, u_int8_t reg, u_int8_t val)
909 {
910 int i;
911
912 OHCI_CSR_WRITE(sc, OHCI_REG_PhyControl, OHCI_PhyControl_WrReg |
913 (reg << OHCI_PhyControl_RegAddr_BITPOS) |
914 (val << OHCI_PhyControl_WrData_BITPOS));
915 for (i = 0; i < OHCI_LOOP; i++) {
916 if (!(OHCI_CSR_READ(sc, OHCI_REG_PhyControl) &
917 OHCI_PhyControl_WrReg))
918 break;
919 DELAY(10);
920 }
921 }
922
923 /*
924 * Initiate Bus Reset
925 */
926 static void
927 fwohci_phy_busreset(struct fwohci_softc *sc)
928 {
929 int s;
930 u_int8_t val;
931
932 s = splbio();
933 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear,
934 OHCI_Int_BusReset | OHCI_Int_SelfIDComplete);
935 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_BusReset);
936 callout_stop(&sc->sc_selfid_callout);
937 val = fwohci_phy_read(sc, 1);
938 val = (val & 0x80) | /* preserve RHB (force root) */
939 0x40 | /* Initiate Bus Reset */
940 0x3f; /* default GAP count */
941 fwohci_phy_write(sc, 1, val);
942 splx(s);
943 }
944
945 /*
946 * PHY Packet
947 */
948 static void
949 fwohci_phy_input(struct fwohci_softc *sc, struct fwohci_pkt *pkt)
950 {
951 u_int32_t val;
952
953 val = pkt->fp_hdr[1];
954 if (val != ~pkt->fp_hdr[2]) {
955 if (val == 0 && ((*pkt->fp_trail & 0x001f0000) >> 16) ==
956 OHCI_CTXCTL_EVENT_BUS_RESET) {
957 DPRINTFN(1, ("fwohci_phy_input: BusReset: 0x%08x\n",
958 pkt->fp_hdr[2]));
959 } else {
960 printf("%s: phy packet corrupted (0x%08x, 0x%08x)\n",
961 sc->sc_sc1394.sc1394_dev.dv_xname, val,
962 pkt->fp_hdr[2]);
963 }
964 return;
965 }
966 #ifdef FW_DEBUG
967 if (fwdebug > 1)
968 fwohci_show_phypkt(sc, val);
969 #endif
970 }
971
972 /*
973 * Descriptor for context DMA.
974 */
975 static int
976 fwohci_desc_alloc(struct fwohci_softc *sc)
977 {
978 int error, mapsize, dsize;
979
980 /*
981 * allocate descriptor buffer
982 */
983
984 sc->sc_descsize = OHCI_BUF_ARRQ_CNT + OHCI_BUF_ARRS_CNT +
985 OHCI_BUF_ATRQ_CNT + OHCI_BUF_ATRS_CNT +
986 OHCI_BUF_IR_CNT * sc->sc_isoctx + 2;
987 dsize = sizeof(struct fwohci_desc) * sc->sc_descsize;
988 mapsize = howmany(sc->sc_descsize, NBBY);
989 sc->sc_descmap = malloc(mapsize, M_DEVBUF, M_WAITOK|M_ZERO);
990
991 if (sc->sc_descmap == NULL) {
992 printf("fwohci_desc_alloc: cannot get memory\n");
993 return -1;
994 }
995
996 if ((error = bus_dmamem_alloc(sc->sc_dmat, dsize, PAGE_SIZE, 0,
997 &sc->sc_dseg, 1, &sc->sc_dnseg, 0)) != 0) {
998 printf("%s: unable to allocate descriptor buffer, error = %d\n",
999 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1000 goto fail_0;
1001 }
1002
1003 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg,
1004 dsize, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT | BUS_DMA_WAITOK))
1005 != 0) {
1006 printf("%s: unable to map descriptor buffer, error = %d\n",
1007 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1008 goto fail_1;
1009 }
1010
1011 if ((error = bus_dmamap_create(sc->sc_dmat, dsize, sc->sc_dnseg,
1012 dsize, 0, BUS_DMA_WAITOK, &sc->sc_ddmamap)) != 0) {
1013 printf("%s: unable to create descriptor buffer DMA map, "
1014 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname, error);
1015 goto fail_2;
1016 }
1017
1018 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc,
1019 dsize, NULL, BUS_DMA_WAITOK)) != 0) {
1020 printf("%s: unable to load descriptor buffer DMA map, "
1021 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname, error);
1022 goto fail_3;
1023 }
1024
1025 return 0;
1026
1027 fail_3:
1028 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap);
1029 fail_2:
1030 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, dsize);
1031 fail_1:
1032 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg);
1033 fail_0:
1034 return error;
1035 }
1036
1037 static struct fwohci_desc *
1038 fwohci_desc_get(struct fwohci_softc *sc, int ndesc)
1039 {
1040 int i, n;
1041
1042 for (n = 0; n <= sc->sc_descsize - ndesc; n++) {
1043 for (i = 0; ; i++) {
1044 if (i == ndesc) {
1045 for (i = 0; i < ndesc; i++)
1046 setbit(sc->sc_descmap, n + i);
1047 return sc->sc_desc + n;
1048 }
1049 if (isset(sc->sc_descmap, n + i))
1050 break;
1051 }
1052 }
1053 return NULL;
1054 }
1055
1056 static void
1057 fwohci_desc_put(struct fwohci_softc *sc, struct fwohci_desc *fd, int ndesc)
1058 {
1059 int i, n;
1060
1061 n = fd - sc->sc_desc;
1062 for (i = 0; i < ndesc; i++, n++) {
1063 #ifdef DIAGNOSTIC
1064 if (isclr(sc->sc_descmap, n))
1065 panic("fwohci_desc_put: duplicated free");
1066 #endif
1067 clrbit(sc->sc_descmap, n);
1068 }
1069 }
1070
1071 /*
1072 * Asynchronous/Isochronous Transmit/Receive Context
1073 */
1074 static int
1075 fwohci_ctx_alloc(struct fwohci_softc *sc, struct fwohci_ctx **fcp,
1076 int bufcnt, int ctx, int ctxtype)
1077 {
1078 int i, error;
1079 struct fwohci_ctx *fc;
1080 struct fwohci_buf *fb;
1081 struct fwohci_desc *fd;
1082 #if DOUBLEBUF
1083 int buf2cnt;
1084 #endif
1085
1086 fc = malloc(sizeof(*fc), M_DEVBUF, M_WAITOK|M_ZERO);
1087 LIST_INIT(&fc->fc_handler);
1088 TAILQ_INIT(&fc->fc_buf);
1089 fc->fc_ctx = ctx;
1090 fc->fc_buffers = fb = malloc(sizeof(*fb) * bufcnt, M_DEVBUF, M_WAITOK|M_ZERO);
1091 fc->fc_bufcnt = bufcnt;
1092 #if DOUBLEBUF
1093 TAILQ_INIT(&fc->fc_buf2); /* for isochronous */
1094 if (ctxtype == FWOHCI_CTX_ISO_MULTI) {
1095 buf2cnt = bufcnt/2;
1096 bufcnt -= buf2cnt;
1097 if (buf2cnt == 0) {
1098 panic("cannot allocate iso buffer");
1099 }
1100 }
1101 #endif
1102 for (i = 0; i < bufcnt; i++, fb++) {
1103 if ((error = fwohci_buf_alloc(sc, fb)) != 0)
1104 goto fail;
1105 if ((fd = fwohci_desc_get(sc, 1)) == NULL) {
1106 error = ENOBUFS;
1107 goto fail;
1108 }
1109 fb->fb_desc = fd;
1110 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr +
1111 ((caddr_t)fd - (caddr_t)sc->sc_desc);
1112 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_STATUS |
1113 OHCI_DESC_INTR_ALWAYS | OHCI_DESC_BRANCH;
1114 fd->fd_reqcount = fb->fb_dmamap->dm_segs[0].ds_len;
1115 fd->fd_data = fb->fb_dmamap->dm_segs[0].ds_addr;
1116 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list);
1117 }
1118 #if DOUBLEBUF
1119 if (ctxtype == FWOHCI_CTX_ISO_MULTI) {
1120 for (i = bufcnt; i < bufcnt + buf2cnt; i++, fb++) {
1121 if ((error = fwohci_buf_alloc(sc, fb)) != 0)
1122 goto fail;
1123 if ((fd = fwohci_desc_get(sc, 1)) == NULL) {
1124 error = ENOBUFS;
1125 goto fail;
1126 }
1127 fb->fb_desc = fd;
1128 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr +
1129 ((caddr_t)fd - (caddr_t)sc->sc_desc);
1130 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1131 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1132 BUS_DMASYNC_PREWRITE);
1133 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_STATUS |
1134 OHCI_DESC_INTR_ALWAYS | OHCI_DESC_BRANCH;
1135 fd->fd_reqcount = fb->fb_dmamap->dm_segs[0].ds_len;
1136 fd->fd_data = fb->fb_dmamap->dm_segs[0].ds_addr;
1137 TAILQ_INSERT_TAIL(&fc->fc_buf2, fb, fb_list);
1138 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1139 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1140 BUS_DMASYNC_POSTWRITE);
1141 }
1142 }
1143 #endif /* DOUBLEBUF */
1144 fc->fc_type = ctxtype;
1145 *fcp = fc;
1146 return 0;
1147
1148 fail:
1149 while (i-- > 0) {
1150 fb--;
1151 if (fb->fb_desc)
1152 fwohci_desc_put(sc, fb->fb_desc, 1);
1153 fwohci_buf_free(sc, fb);
1154 }
1155 free(fc, M_DEVBUF);
1156 return error;
1157 }
1158
1159 static void
1160 fwohci_ctx_free(struct fwohci_softc *sc, struct fwohci_ctx *fc)
1161 {
1162 struct fwohci_buf *fb;
1163 struct fwohci_handler *fh;
1164
1165 #if DOUBLEBUF
1166 if ((fc->fc_type == FWOHCI_CTX_ISO_MULTI) &&
1167 (TAILQ_FIRST(&fc->fc_buf) > TAILQ_FIRST(&fc->fc_buf2))) {
1168 struct fwohci_buf_s fctmp;
1169
1170 fctmp = fc->fc_buf;
1171 fc->fc_buf = fc->fc_buf2;
1172 fc->fc_buf2 = fctmp;
1173 }
1174 #endif
1175 while ((fh = LIST_FIRST(&fc->fc_handler)) != NULL)
1176 fwohci_handler_set(sc, fh->fh_tcode, fh->fh_key1, fh->fh_key2,
1177 fh->fh_key3, NULL, NULL);
1178 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) {
1179 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list);
1180 if (fb->fb_desc)
1181 fwohci_desc_put(sc, fb->fb_desc, 1);
1182 fwohci_buf_free(sc, fb);
1183 }
1184 #if DOUBLEBUF
1185 while ((fb = TAILQ_FIRST(&fc->fc_buf2)) != NULL) {
1186 TAILQ_REMOVE(&fc->fc_buf2, fb, fb_list);
1187 if (fb->fb_desc)
1188 fwohci_desc_put(sc, fb->fb_desc, 1);
1189 fwohci_buf_free(sc, fb);
1190 }
1191 #endif /* DOUBLEBUF */
1192 free(fc->fc_buffers, M_DEVBUF);
1193 free(fc, M_DEVBUF);
1194 }
1195
1196 static void
1197 fwohci_ctx_init(struct fwohci_softc *sc, struct fwohci_ctx *fc)
1198 {
1199 struct fwohci_buf *fb, *nfb;
1200 struct fwohci_desc *fd;
1201 struct fwohci_handler *fh;
1202 int n;
1203
1204 for (fb = TAILQ_FIRST(&fc->fc_buf); fb != NULL; fb = nfb) {
1205 nfb = TAILQ_NEXT(fb, fb_list);
1206 fb->fb_off = 0;
1207 fd = fb->fb_desc;
1208 fd->fd_branch = (nfb != NULL) ? (nfb->fb_daddr | 1) : 0;
1209 fd->fd_rescount = fd->fd_reqcount;
1210 }
1211
1212 #if DOUBLEBUF
1213 for (fb = TAILQ_FIRST(&fc->fc_buf2); fb != NULL; fb = nfb) {
1214 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1215 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1216 BUS_DMASYNC_PREWRITE);
1217 nfb = TAILQ_NEXT(fb, fb_list);
1218 fb->fb_off = 0;
1219 fd = fb->fb_desc;
1220 fd->fd_branch = (nfb != NULL) ? (nfb->fb_daddr | 1) : 0;
1221 fd->fd_rescount = fd->fd_reqcount;
1222 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1223 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1224 BUS_DMASYNC_POSTWRITE);
1225 }
1226 #endif /* DOUBLEBUF */
1227
1228 n = fc->fc_ctx;
1229 fb = TAILQ_FIRST(&fc->fc_buf);
1230 if (fc->fc_type != FWOHCI_CTX_ASYNC) {
1231 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_CommandPtr,
1232 fb->fb_daddr | 1);
1233 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlClear,
1234 OHCI_CTXCTL_RX_BUFFER_FILL |
1235 OHCI_CTXCTL_RX_CYCLE_MATCH_ENABLE |
1236 OHCI_CTXCTL_RX_MULTI_CHAN_MODE |
1237 OHCI_CTXCTL_RX_DUAL_BUFFER_MODE);
1238 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlSet,
1239 OHCI_CTXCTL_RX_ISOCH_HEADER);
1240 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) {
1241 OHCI_SYNC_RX_DMA_WRITE(sc, n,
1242 OHCI_SUBREG_ContextControlSet,
1243 OHCI_CTXCTL_RX_BUFFER_FILL);
1244 }
1245 fh = LIST_FIRST(&fc->fc_handler);
1246
1247 if (fh->fh_key1 == IEEE1394_ISO_CHANNEL_ANY) {
1248 OHCI_SYNC_RX_DMA_WRITE(sc, n,
1249 OHCI_SUBREG_ContextControlSet,
1250 OHCI_CTXCTL_RX_MULTI_CHAN_MODE);
1251
1252 /* Receive all the isochronous channels */
1253 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiSet,
1254 0xffffffff);
1255 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoSet,
1256 0xffffffff);
1257 DPRINTF(("%s: CTXCTL 0x%08x\n",
1258 sc->sc_sc1394.sc1394_dev.dv_xname,
1259 OHCI_SYNC_RX_DMA_READ(sc, n,
1260 OHCI_SUBREG_ContextControlSet)));
1261 }
1262 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextMatch,
1263 (fh->fh_key2 << OHCI_CTXMATCH_TAG_BITPOS) |
1264 (fh->fh_key1 & IEEE1394_ISO_CHANNEL_MASK));
1265 } else {
1266 OHCI_ASYNC_DMA_WRITE(sc, n, OHCI_SUBREG_CommandPtr,
1267 fb->fb_daddr | 1);
1268 }
1269 }
1270
1271 /*
1272 * DMA data buffer
1273 */
1274 static int
1275 fwohci_buf_alloc(struct fwohci_softc *sc, struct fwohci_buf *fb)
1276 {
1277 int error;
1278
1279 if ((error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1280 PAGE_SIZE, &fb->fb_seg, 1, &fb->fb_nseg, BUS_DMA_WAITOK)) != 0) {
1281 printf("%s: unable to allocate buffer, error = %d\n",
1282 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1283 goto fail_0;
1284 }
1285
1286 if ((error = bus_dmamem_map(sc->sc_dmat, &fb->fb_seg,
1287 fb->fb_nseg, PAGE_SIZE, &fb->fb_buf, BUS_DMA_WAITOK)) != 0) {
1288 printf("%s: unable to map buffer, error = %d\n",
1289 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1290 goto fail_1;
1291 }
1292
1293 if ((error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, fb->fb_nseg,
1294 PAGE_SIZE, 0, BUS_DMA_WAITOK, &fb->fb_dmamap)) != 0) {
1295 printf("%s: unable to create buffer DMA map, "
1296 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname,
1297 error);
1298 goto fail_2;
1299 }
1300
1301 if ((error = bus_dmamap_load(sc->sc_dmat, fb->fb_dmamap,
1302 fb->fb_buf, PAGE_SIZE, NULL, BUS_DMA_WAITOK)) != 0) {
1303 printf("%s: unable to load buffer DMA map, "
1304 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname,
1305 error);
1306 goto fail_3;
1307 }
1308
1309 return 0;
1310
1311 bus_dmamap_unload(sc->sc_dmat, fb->fb_dmamap);
1312 fail_3:
1313 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
1314 fail_2:
1315 bus_dmamem_unmap(sc->sc_dmat, fb->fb_buf, PAGE_SIZE);
1316 fail_1:
1317 bus_dmamem_free(sc->sc_dmat, &fb->fb_seg, fb->fb_nseg);
1318 fail_0:
1319 return error;
1320 }
1321
1322 static void
1323 fwohci_buf_free(struct fwohci_softc *sc, struct fwohci_buf *fb)
1324 {
1325
1326 bus_dmamap_unload(sc->sc_dmat, fb->fb_dmamap);
1327 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
1328 bus_dmamem_unmap(sc->sc_dmat, fb->fb_buf, PAGE_SIZE);
1329 bus_dmamem_free(sc->sc_dmat, &fb->fb_seg, fb->fb_nseg);
1330 }
1331
1332 static void
1333 fwohci_buf_init_rx(struct fwohci_softc *sc)
1334 {
1335 int i;
1336
1337 /*
1338 * Initialize for Asynchronous Receive Queue.
1339 */
1340 fwohci_ctx_init(sc, sc->sc_ctx_arrq);
1341 fwohci_ctx_init(sc, sc->sc_ctx_arrs);
1342
1343 /*
1344 * Initialize for Isochronous Receive Queue.
1345 */
1346 if (sc->sc_ctx_as != NULL) {
1347 for (i = 0; i < sc->sc_isoctx; i++) {
1348 if (sc->sc_ctx_as[i] != NULL)
1349 fwohci_ctx_init(sc, sc->sc_ctx_as[i]);
1350 }
1351 }
1352 }
1353
1354 static void
1355 fwohci_buf_start_rx(struct fwohci_softc *sc)
1356 {
1357 int i;
1358
1359 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_REQUEST,
1360 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
1361 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
1362 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
1363 if (sc->sc_ctx_as != NULL) {
1364 for (i = 0; i < sc->sc_isoctx; i++) {
1365 if (sc->sc_ctx_as[i] != NULL)
1366 OHCI_SYNC_RX_DMA_WRITE(sc, i,
1367 OHCI_SUBREG_ContextControlSet,
1368 OHCI_CTXCTL_RUN);
1369 }
1370 }
1371 }
1372
1373 static void
1374 fwohci_buf_stop_tx(struct fwohci_softc *sc)
1375 {
1376 int i;
1377
1378 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_TX_REQUEST,
1379 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1380 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_TX_RESPONSE,
1381 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1382
1383 /*
1384 * Make sure the transmitter is stopped.
1385 */
1386 for (i = 0; i < OHCI_LOOP; i++) {
1387 DELAY(10);
1388 if (OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_REQUEST,
1389 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE)
1390 continue;
1391 if (OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_RESPONSE,
1392 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE)
1393 continue;
1394 break;
1395 }
1396
1397 /*
1398 * Initialize for Asynchronous Transmit Queue.
1399 */
1400 fwohci_at_done(sc, sc->sc_ctx_atrq, 1);
1401 fwohci_at_done(sc, sc->sc_ctx_atrs, 1);
1402 }
1403
1404 static void
1405 fwohci_buf_stop_rx(struct fwohci_softc *sc)
1406 {
1407 int i;
1408
1409 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_REQUEST,
1410 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1411 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
1412 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1413 for (i = 0; i < sc->sc_isoctx; i++) {
1414 OHCI_SYNC_RX_DMA_WRITE(sc, i,
1415 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1416 }
1417 }
1418
1419 static void
1420 fwohci_buf_next(struct fwohci_softc *sc, struct fwohci_ctx *fc)
1421 {
1422 struct fwohci_buf *fb, *tfb;
1423
1424 #if DOUBLEBUF
1425 if (fc->fc_type != FWOHCI_CTX_ISO_MULTI) {
1426 #endif
1427 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) {
1428 if (fc->fc_type) {
1429 if (fb->fb_off == 0)
1430 break;
1431 } else {
1432 if (fb->fb_off != fb->fb_desc->fd_reqcount ||
1433 fb->fb_desc->fd_rescount != 0)
1434 break;
1435 }
1436 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list);
1437 fb->fb_desc->fd_rescount = fb->fb_desc->fd_reqcount;
1438 fb->fb_off = 0;
1439 fb->fb_desc->fd_branch = 0;
1440 tfb = TAILQ_LAST(&fc->fc_buf, fwohci_buf_s);
1441 tfb->fb_desc->fd_branch = fb->fb_daddr | 1;
1442 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list);
1443 }
1444 #if DOUBLEBUF
1445 } else {
1446 struct fwohci_buf_s fctmp;
1447
1448 /* cleaning buffer */
1449 for (fb = TAILQ_FIRST(&fc->fc_buf); fb != NULL;
1450 fb = TAILQ_NEXT(fb, fb_list)) {
1451 fb->fb_off = 0;
1452 fb->fb_desc->fd_rescount = fb->fb_desc->fd_reqcount;
1453 }
1454
1455 /* rotating buffer */
1456 fctmp = fc->fc_buf;
1457 fc->fc_buf = fc->fc_buf2;
1458 fc->fc_buf2 = fctmp;
1459 }
1460 #endif
1461 }
1462
1463 static int
1464 fwohci_buf_pktget(struct fwohci_softc *sc, struct fwohci_buf **fbp, caddr_t *pp,
1465 int len)
1466 {
1467 struct fwohci_buf *fb;
1468 struct fwohci_desc *fd;
1469 int bufend;
1470
1471 fb = *fbp;
1472 again:
1473 fd = fb->fb_desc;
1474 DPRINTFN(1, ("fwohci_buf_pktget: desc %ld, off %d, req %d, res %d,"
1475 " len %d, avail %d\n", (long)(fd - sc->sc_desc), fb->fb_off,
1476 fd->fd_reqcount, fd->fd_rescount, len,
1477 fd->fd_reqcount - fd->fd_rescount - fb->fb_off));
1478 bufend = fd->fd_reqcount - fd->fd_rescount;
1479 if (fb->fb_off >= bufend) {
1480 DPRINTFN(5, ("buf %x finish req %d res %d off %d ",
1481 fb->fb_desc->fd_data, fd->fd_reqcount, fd->fd_rescount,
1482 fb->fb_off));
1483 if (fd->fd_rescount == 0) {
1484 *fbp = fb = TAILQ_NEXT(fb, fb_list);
1485 if (fb != NULL)
1486 goto again;
1487 }
1488 return 0;
1489 }
1490 if (fb->fb_off + len > bufend)
1491 len = bufend - fb->fb_off;
1492 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, fb->fb_off, len,
1493 BUS_DMASYNC_POSTREAD);
1494 *pp = fb->fb_buf + fb->fb_off;
1495 fb->fb_off += roundup(len, 4);
1496 return len;
1497 }
1498
1499 static int
1500 fwohci_buf_input(struct fwohci_softc *sc, struct fwohci_ctx *fc,
1501 struct fwohci_pkt *pkt)
1502 {
1503 caddr_t p;
1504 struct fwohci_buf *fb;
1505 int len, count, i;
1506 #ifdef FW_DEBUG
1507 int tlabel;
1508 #endif
1509
1510 memset(pkt, 0, sizeof(*pkt));
1511 pkt->fp_uio.uio_iov = pkt->fp_iov;
1512 pkt->fp_uio.uio_rw = UIO_WRITE;
1513 pkt->fp_uio.uio_segflg = UIO_SYSSPACE;
1514
1515 /* get first quadlet */
1516 fb = TAILQ_FIRST(&fc->fc_buf);
1517 count = 4;
1518 len = fwohci_buf_pktget(sc, &fb, &p, count);
1519 if (len <= 0) {
1520 DPRINTFN(1, ("fwohci_buf_input: no input for %d\n",
1521 fc->fc_ctx));
1522 return 0;
1523 }
1524 pkt->fp_hdr[0] = *(u_int32_t *)p;
1525 pkt->fp_tcode = (pkt->fp_hdr[0] & 0x000000f0) >> 4;
1526 switch (pkt->fp_tcode) {
1527 case IEEE1394_TCODE_WRITE_REQ_QUAD:
1528 case IEEE1394_TCODE_READ_RESP_QUAD:
1529 pkt->fp_hlen = 12;
1530 pkt->fp_dlen = 4;
1531 break;
1532 case IEEE1394_TCODE_READ_REQ_BLOCK:
1533 pkt->fp_hlen = 16;
1534 pkt->fp_dlen = 0;
1535 break;
1536 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
1537 case IEEE1394_TCODE_READ_RESP_BLOCK:
1538 case IEEE1394_TCODE_LOCK_REQ:
1539 case IEEE1394_TCODE_LOCK_RESP:
1540 pkt->fp_hlen = 16;
1541 break;
1542 case IEEE1394_TCODE_STREAM_DATA:
1543 #ifdef DIAGNOSTIC
1544 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI)
1545 #endif
1546 {
1547 pkt->fp_hlen = 4;
1548 pkt->fp_dlen = pkt->fp_hdr[0] >> 16;
1549 DPRINTFN(5, ("[%d]", pkt->fp_dlen));
1550 break;
1551 }
1552 #ifdef DIAGNOSTIC
1553 else {
1554 printf("fwohci_buf_input: bad tcode: STREAM_DATA\n");
1555 return 0;
1556 }
1557 #endif
1558 default:
1559 pkt->fp_hlen = 12;
1560 pkt->fp_dlen = 0;
1561 break;
1562 }
1563
1564 /* get header */
1565 while (count < pkt->fp_hlen) {
1566 len = fwohci_buf_pktget(sc, &fb, &p, pkt->fp_hlen - count);
1567 if (len == 0) {
1568 printf("fwohci_buf_input: malformed input 1: %d\n",
1569 pkt->fp_hlen - count);
1570 return 0;
1571 }
1572 memcpy((caddr_t)pkt->fp_hdr + count, p, len);
1573 count += len;
1574 }
1575 if (pkt->fp_hlen == 16 &&
1576 pkt->fp_tcode != IEEE1394_TCODE_READ_REQ_BLOCK)
1577 pkt->fp_dlen = pkt->fp_hdr[3] >> 16;
1578 #ifdef FW_DEBUG
1579 tlabel = (pkt->fp_hdr[0] & 0x0000fc00) >> 10;
1580 #endif
1581 DPRINTFN(1, ("fwohci_buf_input: tcode=0x%x, tlabel=0x%x, hlen=%d, "
1582 "dlen=%d\n", pkt->fp_tcode, tlabel, pkt->fp_hlen, pkt->fp_dlen));
1583
1584 /* get data */
1585 count = 0;
1586 i = 0;
1587 while (count < pkt->fp_dlen) {
1588 len = fwohci_buf_pktget(sc, &fb,
1589 (caddr_t *)&pkt->fp_iov[i].iov_base,
1590 pkt->fp_dlen - count);
1591 if (len == 0) {
1592 printf("fwohci_buf_input: malformed input 2: %d\n",
1593 pkt->fp_dlen - count);
1594 return 0;
1595 }
1596 pkt->fp_iov[i++].iov_len = len;
1597 count += len;
1598 }
1599 pkt->fp_uio.uio_iovcnt = i;
1600 pkt->fp_uio.uio_resid = count;
1601
1602 /* get trailer */
1603 len = fwohci_buf_pktget(sc, &fb, (caddr_t *)&pkt->fp_trail,
1604 sizeof(*pkt->fp_trail));
1605 if (len <= 0) {
1606 printf("fwohci_buf_input: malformed input 3: %d\n",
1607 pkt->fp_hlen - count);
1608 return 0;
1609 }
1610 return 1;
1611 }
1612
1613 static int
1614 fwohci_buf_input_ppb(struct fwohci_softc *sc, struct fwohci_ctx *fc,
1615 struct fwohci_pkt *pkt)
1616 {
1617 caddr_t p;
1618 int len;
1619 struct fwohci_buf *fb;
1620 struct fwohci_desc *fd;
1621
1622 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) {
1623 return fwohci_buf_input(sc, fc, pkt);
1624 }
1625
1626 memset(pkt, 0, sizeof(*pkt));
1627 pkt->fp_uio.uio_iov = pkt->fp_iov;
1628 pkt->fp_uio.uio_rw = UIO_WRITE;
1629 pkt->fp_uio.uio_segflg = UIO_SYSSPACE;
1630
1631 for (fb = TAILQ_FIRST(&fc->fc_buf); ; fb = TAILQ_NEXT(fb, fb_list)) {
1632 if (fb == NULL)
1633 return 0;
1634 if (fb->fb_off == 0)
1635 break;
1636 }
1637 fd = fb->fb_desc;
1638 len = fd->fd_reqcount - fd->fd_rescount;
1639 if (len == 0)
1640 return 0;
1641 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, fb->fb_off, len,
1642 BUS_DMASYNC_POSTREAD);
1643
1644 p = fb->fb_buf;
1645 fb->fb_off += roundup(len, 4);
1646 if (len < 8) {
1647 printf("fwohci_buf_input_ppb: malformed input 1: %d\n", len);
1648 return 0;
1649 }
1650
1651 /*
1652 * get trailer first, may be bogus data unless status update
1653 * in descriptor is set.
1654 */
1655 pkt->fp_trail = (u_int32_t *)p;
1656 *pkt->fp_trail = (*pkt->fp_trail & 0xffff) | (fd->fd_status << 16);
1657 pkt->fp_hdr[0] = ((u_int32_t *)p)[1];
1658 pkt->fp_tcode = (pkt->fp_hdr[0] & 0x000000f0) >> 4;
1659 #ifdef DIAGNOSTIC
1660 if (pkt->fp_tcode != IEEE1394_TCODE_STREAM_DATA) {
1661 printf("fwohci_buf_input_ppb: bad tcode: 0x%x\n",
1662 pkt->fp_tcode);
1663 return 0;
1664 }
1665 #endif
1666 pkt->fp_hlen = 4;
1667 pkt->fp_dlen = pkt->fp_hdr[0] >> 16;
1668 p += 8;
1669 len -= 8;
1670 if (pkt->fp_dlen != len) {
1671 printf("fwohci_buf_input_ppb: malformed input 2: %d != %d\n",
1672 pkt->fp_dlen, len);
1673 return 0;
1674 }
1675 DPRINTFN(1, ("fwohci_buf_input_ppb: tcode=0x%x, hlen=%d, dlen=%d\n",
1676 pkt->fp_tcode, pkt->fp_hlen, pkt->fp_dlen));
1677 pkt->fp_iov[0].iov_base = p;
1678 pkt->fp_iov[0].iov_len = len;
1679 pkt->fp_uio.uio_iovcnt = 0;
1680 pkt->fp_uio.uio_resid = len;
1681 return 1;
1682 }
1683
1684 static int
1685 fwohci_handler_set(struct fwohci_softc *sc,
1686 int tcode, u_int32_t key1, u_int32_t key2, u_int32_t key3,
1687 int (*handler)(struct fwohci_softc *, void *, struct fwohci_pkt *),
1688 void *arg)
1689 {
1690 struct fwohci_ctx *fc;
1691 struct fwohci_handler *fh;
1692 u_int64_t addr, naddr;
1693 u_int32_t off;
1694 int i, j;
1695
1696 if (tcode == IEEE1394_TCODE_STREAM_DATA &&
1697 (((key1 & OHCI_ASYNC_STREAM) && sc->sc_ctx_as != NULL)
1698 || (key1 & OHCI_ASYNC_STREAM) == 0)) {
1699 int isasync = key1 & OHCI_ASYNC_STREAM;
1700
1701 key1 = key1 & IEEE1394_ISO_CHANNEL_ANY ?
1702 IEEE1394_ISO_CHANNEL_ANY : (key1 & IEEE1394_ISOCH_MASK);
1703 if (key1 & IEEE1394_ISO_CHANNEL_ANY) {
1704 printf("%s: key changed to %x\n",
1705 sc->sc_sc1394.sc1394_dev.dv_xname, key1);
1706 }
1707 j = sc->sc_isoctx;
1708 fh = NULL;
1709
1710 for (i = 0; i < sc->sc_isoctx; i++) {
1711 if ((fc = sc->sc_ctx_as[i]) == NULL) {
1712 if (j == sc->sc_isoctx)
1713 j = i;
1714 continue;
1715 }
1716 fh = LIST_FIRST(&fc->fc_handler);
1717 if (fh->fh_tcode == tcode &&
1718 fh->fh_key1 == key1 && fh->fh_key2 == key2)
1719 break;
1720 fh = NULL;
1721 }
1722 if (fh == NULL) {
1723 if (handler == NULL)
1724 return 0;
1725 if (j == sc->sc_isoctx) {
1726 DPRINTF(("fwohci_handler_set: no more free "
1727 "context\n"));
1728 return ENOMEM;
1729 }
1730 if ((fc = sc->sc_ctx_as[j]) == NULL) {
1731 fwohci_ctx_alloc(sc, &fc, OHCI_BUF_IR_CNT, j,
1732 isasync ? FWOHCI_CTX_ISO_SINGLE :
1733 FWOHCI_CTX_ISO_MULTI);
1734 sc->sc_ctx_as[j] = fc;
1735 }
1736 }
1737 #ifdef FW_DEBUG
1738 if (fh == NULL && handler != NULL) {
1739 printf("use ir context %d\n", j);
1740 } else if (fh != NULL && handler == NULL) {
1741 printf("remove ir context %d\n", i);
1742 }
1743 #endif
1744 } else {
1745 switch (tcode) {
1746 case IEEE1394_TCODE_WRITE_REQ_QUAD:
1747 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
1748 case IEEE1394_TCODE_READ_REQ_QUAD:
1749 case IEEE1394_TCODE_READ_REQ_BLOCK:
1750 case IEEE1394_TCODE_LOCK_REQ:
1751 fc = sc->sc_ctx_arrq;
1752 break;
1753 case IEEE1394_TCODE_WRITE_RESP:
1754 case IEEE1394_TCODE_READ_RESP_QUAD:
1755 case IEEE1394_TCODE_READ_RESP_BLOCK:
1756 case IEEE1394_TCODE_LOCK_RESP:
1757 fc = sc->sc_ctx_arrs;
1758 break;
1759 default:
1760 return EIO;
1761 }
1762 naddr = ((u_int64_t)key1 << 32) + key2;
1763
1764 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
1765 fh = LIST_NEXT(fh, fh_list)) {
1766 if (fh->fh_tcode == tcode) {
1767 if (fh->fh_key1 == key1 &&
1768 fh->fh_key2 == key2 && fh->fh_key3 == key3)
1769 break;
1770 /* Make sure it's not within a current range. */
1771 addr = ((u_int64_t)fh->fh_key1 << 32) +
1772 fh->fh_key2;
1773 off = fh->fh_key3;
1774 if (key3 &&
1775 (((naddr >= addr) &&
1776 (naddr < (addr + off))) ||
1777 (((naddr + key3) > addr) &&
1778 ((naddr + key3) <= (addr + off))) ||
1779 ((addr > naddr) &&
1780 (addr < (naddr + key3)))))
1781 if (handler)
1782 return EEXIST;
1783 }
1784 }
1785 }
1786 if (handler == NULL) {
1787 if (fh != NULL) {
1788 LIST_REMOVE(fh, fh_list);
1789 free(fh, M_DEVBUF);
1790 }
1791 if (tcode == IEEE1394_TCODE_STREAM_DATA) {
1792 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
1793 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1794 sc->sc_ctx_as[fc->fc_ctx] = NULL;
1795 fwohci_ctx_free(sc, fc);
1796 }
1797 return 0;
1798 }
1799 if (fh == NULL) {
1800 fh = malloc(sizeof(*fh), M_DEVBUF, M_WAITOK);
1801 LIST_INSERT_HEAD(&fc->fc_handler, fh, fh_list);
1802 }
1803 fh->fh_tcode = tcode;
1804 fh->fh_key1 = key1;
1805 fh->fh_key2 = key2;
1806 fh->fh_key3 = key3;
1807 fh->fh_handler = handler;
1808 fh->fh_handarg = arg;
1809 DPRINTFN(1, ("fwohci_handler_set: ctx %d, tcode %x, key 0x%x, 0x%x, "
1810 "0x%x\n", fc->fc_ctx, tcode, key1, key2, key3));
1811
1812 if (tcode == IEEE1394_TCODE_STREAM_DATA) {
1813 fwohci_ctx_init(sc, fc);
1814 DPRINTFN(1, ("fwohci_handler_set: SYNC desc %ld\n",
1815 (long)(TAILQ_FIRST(&fc->fc_buf)->fb_desc - sc->sc_desc)));
1816 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
1817 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
1818 }
1819 return 0;
1820 }
1821
1822 /*
1823 * static ieee1394_ir_tag_t
1824 * fwohci_ir_ctx_set(struct device *dev, int channel, int tagbm,
1825 * int bufnum, int maxsize, int flags)
1826 *
1827 * This function will return non-negative value if it succeeds.
1828 * This return value is pointer to the context of isochronous
1829 * transmission. This function will return NULL value if it
1830 * fails.
1831 */
1832 ieee1394_ir_tag_t
1833 fwohci_ir_ctx_set(struct device *dev, int channel, int tagbm,
1834 int bufnum, int maxsize, int flags)
1835 {
1836 int i, openctx;
1837 struct fwohci_ir_ctx *irc;
1838 struct fwohci_softc *sc = (struct fwohci_softc *)dev;
1839 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname;
1840
1841 printf("%s: ir_ctx_set channel %d tagbm 0x%x maxsize %d bufnum %d\n",
1842 xname, channel, tagbm, maxsize, bufnum);
1843 /*
1844 * This loop will find the smallest vacant context and check
1845 * whether other channel uses the same channel.
1846 */
1847 openctx = sc->sc_isoctx;
1848 for (i = 0; i < sc->sc_isoctx; ++i) {
1849 if (sc->sc_ctx_ir[i] == NULL) {
1850 /*
1851 * Find a vacant contet. If this has the
1852 * smallest context number, register it.
1853 */
1854 if (openctx == sc->sc_isoctx) {
1855 openctx = i;
1856 }
1857 } else {
1858 /*
1859 * This context is used. Check whether this
1860 * context uses the same channel as ours.
1861 */
1862 if (sc->sc_ctx_ir[i]->irc_channel == channel) {
1863 /* Using same channel. */
1864 printf("%s: channel %d occupied by ctx%d\n",
1865 xname, channel, i);
1866 return NULL;
1867 }
1868 }
1869 }
1870
1871 /*
1872 * If there is a vacant context, allocate isochronous transmit
1873 * context for it.
1874 */
1875 if (openctx != sc->sc_isoctx) {
1876 printf("%s using ctx %d for iso receive\n", xname, openctx);
1877 if ((irc = fwohci_ir_ctx_construct(sc, openctx, channel,
1878 tagbm, bufnum, maxsize, flags)) == NULL) {
1879 return NULL;
1880 }
1881 #ifndef IR_CTX_OPENTEST
1882 sc->sc_ctx_ir[openctx] = irc;
1883 #else
1884 fwohci_ir_ctx_destruct(irc);
1885 irc = NULL;
1886 #endif
1887 } else {
1888 printf("%s: cannot find any vacant contexts\n", xname);
1889 irc = NULL;
1890 }
1891
1892 return (ieee1394_ir_tag_t)irc;
1893 }
1894
1895
1896 /*
1897 * int fwohci_ir_ctx_clear(struct device *dev, ieee1394_ir_tag_t *ir)
1898 *
1899 * This function will return 0 if it succeed. Otherwise return
1900 * negative value.
1901 */
1902 int
1903 fwohci_ir_ctx_clear(struct device *dev, ieee1394_ir_tag_t ir)
1904 {
1905 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)ir;
1906 struct fwohci_softc *sc = irc->irc_sc;
1907 int i;
1908
1909 if (sc->sc_ctx_ir[irc->irc_num] != irc) {
1910 printf("fwohci_ir_ctx_clear: irc differs %p %p\n",
1911 sc->sc_ctx_ir[irc->irc_num], irc);
1912 return -1;
1913 }
1914
1915 i = 0;
1916 while (irc->irc_status & IRC_STATUS_RUN) {
1917 tsleep((void *)irc, PWAIT|PCATCH, "IEEE1394 iso receive", 100);
1918 if (irc->irc_status & IRC_STATUS_RUN) {
1919 if (fwohci_ir_stop(irc) == 0) {
1920 irc->irc_status &= ~IRC_STATUS_RUN;
1921 }
1922
1923 }
1924 if (++i > 20) {
1925 u_int32_t reg
1926 = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
1927 OHCI_SUBREG_ContextControlSet);
1928
1929 printf("fwochi_ir_ctx_clear: "
1930 "Cannot stop iso receive engine\n");
1931 printf("%s: intr IR_CommandPtr 0x%08x "
1932 "ContextCtrl 0x%08x%s%s%s%s\n",
1933 sc->sc_sc1394.sc1394_dev.dv_xname,
1934 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
1935 OHCI_SUBREG_CommandPtr),
1936 reg,
1937 reg & OHCI_CTXCTL_RUN ? " run" : "",
1938 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
1939 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
1940 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
1941
1942 return EBUSY;
1943 }
1944 }
1945
1946 printf("fwohci_ir_ctx_clear: DMA engine is stopped. get %d frames max queuelen %d pos %d\n",
1947 irc->irc_pktcount, irc->irc_maxqueuelen, irc->irc_maxqueuepos);
1948
1949 fwohci_ir_ctx_destruct(irc);
1950
1951 sc->sc_ctx_ir[irc->irc_num] = NULL;
1952
1953 return 0;
1954 }
1955
1956
1957
1958
1959
1960
1961
1962
1963 ieee1394_it_tag_t
1964 fwohci_it_set(struct ieee1394_softc *isc, int channel, int tagbm)
1965 {
1966 ieee1394_it_tag_t rv;
1967 int tag;
1968
1969 for (tag = 0; tagbm != 0 && (tagbm & 0x01) == 0; tagbm >>= 1, ++tag);
1970
1971 rv = fwohci_it_ctx_set((struct fwohci_softc *)isc, channel, tag, 488);
1972
1973 return rv;
1974 }
1975
1976 /*
1977 * static ieee1394_it_tag_t
1978 * fwohci_it_ctx_set(struct fwohci_softc *sc,
1979 * u_int32_t key1 (channel), u_int32_t key2 (tag), int maxsize)
1980 *
1981 * This function will return non-negative value if it succeeds.
1982 * This return value is pointer to the context of isochronous
1983 * transmission. This function will return NULL value if it
1984 * fails.
1985 */
1986 static ieee1394_it_tag_t
1987 fwohci_it_ctx_set(struct fwohci_softc *sc, int channel, int tag, int maxsize)
1988 {
1989 int i, openctx;
1990 struct fwohci_it_ctx *itc;
1991 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname;
1992 #ifdef TEST_CHAIN
1993 extern int fwohci_test_chain(struct fwohci_it_ctx *);
1994 #endif /* TEST_CHAIN */
1995 #ifdef TEST_WRITE
1996 extern void fwohci_test_write(struct fwohci_it_ctx *itc);
1997 #endif /* TEST_WRITE */
1998
1999 printf("%s: it_ctx_set channel %d tag %d maxsize %d\n",
2000 xname, channel, tag, maxsize);
2001
2002 /*
2003 * This loop will find the smallest vacant context and check
2004 * whether other channel uses the same channel.
2005 */
2006 openctx = sc->sc_itctx;
2007 for (i = 0; i < sc->sc_itctx; ++i) {
2008 if (sc->sc_ctx_it[i] == NULL) {
2009 /*
2010 * Find a vacant contet. If this has the
2011 * smallest context number, register it.
2012 */
2013 if (openctx == sc->sc_itctx) {
2014 openctx = i;
2015 }
2016 } else {
2017 /*
2018 * This context is used. Check whether this
2019 * context uses the same channel as ours.
2020 */
2021 if (sc->sc_ctx_it[i]->itc_channel == channel) {
2022 /* Using same channel. */
2023 printf("%s: channel %d occupied by ctx%d\n",
2024 xname, channel, i);
2025 return NULL;
2026 }
2027 }
2028 }
2029
2030 /*
2031 * If there is a vacant context, allocate isochronous transmit
2032 * context for it.
2033 */
2034 if (openctx != sc->sc_itctx) {
2035 printf("%s using ctx %d for iso trasmit\n", xname, openctx);
2036 if ((itc = fwohci_it_ctx_construct(sc, openctx, channel,
2037 tag, maxsize)) == NULL) {
2038 return NULL;
2039 }
2040 sc->sc_ctx_it[openctx] = itc;
2041
2042 #ifdef TEST_CHAIN
2043 fwohci_test_chain(itc);
2044 #endif /* TEST_CHAIN */
2045 #ifdef TEST_WRITE
2046 fwohci_test_write(itc);
2047 itc = NULL;
2048 #endif /* TEST_WRITE */
2049
2050 } else {
2051 printf("%s: cannot find any vacant contexts\n", xname);
2052 itc = NULL;
2053 }
2054
2055 return (ieee1394_it_tag_t)itc;
2056 }
2057
2058
2059 /*
2060 * int fwohci_it_ctx_clear(ieee1394_it_tag_t *it)
2061 *
2062 * This function will return 0 if it succeed. Otherwise return
2063 * negative value.
2064 */
2065 int
2066 fwohci_it_ctx_clear(ieee1394_it_tag_t *it)
2067 {
2068 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
2069 struct fwohci_softc *sc = itc->itc_sc;
2070 int i;
2071
2072 if (sc->sc_ctx_it[itc->itc_num] != itc) {
2073 printf("fwohci_it_ctx_clear: itc differs %p %p\n",
2074 sc->sc_ctx_it[itc->itc_num], itc);
2075 return -1;
2076 }
2077
2078 fwohci_it_ctx_flush(it);
2079
2080 i = 0;
2081 while (itc->itc_flags & ITC_FLAGS_RUN) {
2082 tsleep((void *)itc, PWAIT|PCATCH, "IEEE1394 iso transmit", 100);
2083 if (itc->itc_flags & ITC_FLAGS_RUN) {
2084 u_int32_t reg;
2085
2086 reg = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2087 OHCI_SUBREG_ContextControlSet);
2088
2089 if ((reg & OHCI_CTXCTL_WAKE) == 0) {
2090 itc->itc_flags &= ~ITC_FLAGS_RUN;
2091 printf("fwochi_it_ctx_clear: "
2092 "DMA engine stopped without intr\n");
2093 }
2094 printf("%s: %d intr IT_CommandPtr 0x%08x "
2095 "ContextCtrl 0x%08x%s%s%s%s\n",
2096 sc->sc_sc1394.sc1394_dev.dv_xname, i,
2097 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2098 OHCI_SUBREG_CommandPtr),
2099 reg,
2100 reg & OHCI_CTXCTL_RUN ? " run" : "",
2101 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
2102 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
2103 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
2104
2105
2106 }
2107 if (++i > 20) {
2108 u_int32_t reg
2109 = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2110 OHCI_SUBREG_ContextControlSet);
2111
2112 printf("fwochi_it_ctx_clear: "
2113 "Cannot stop iso transmit engine\n");
2114 printf("%s: intr IT_CommandPtr 0x%08x "
2115 "ContextCtrl 0x%08x%s%s%s%s\n",
2116 sc->sc_sc1394.sc1394_dev.dv_xname,
2117 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2118 OHCI_SUBREG_CommandPtr),
2119 reg,
2120 reg & OHCI_CTXCTL_RUN ? " run" : "",
2121 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
2122 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
2123 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
2124
2125 return EBUSY;
2126 }
2127 }
2128
2129 printf("fwohci_it_ctx_clear: DMA engine is stopped.\n");
2130
2131 fwohci_it_ctx_destruct(itc);
2132
2133 sc->sc_ctx_it[itc->itc_num] = NULL;
2134
2135
2136 return 0;
2137 }
2138
2139
2140
2141
2142
2143
2144 /*
2145 * Asynchronous Receive Requests input frontend.
2146 */
2147 static void
2148 fwohci_arrq_input(struct fwohci_softc *sc, struct fwohci_ctx *fc)
2149 {
2150 int rcode;
2151 u_int16_t len;
2152 u_int32_t key1, key2, off;
2153 u_int64_t addr, naddr;
2154 struct fwohci_handler *fh;
2155 struct fwohci_pkt pkt, res;
2156
2157 /*
2158 * Do not return if next packet is in the buffer, or the next
2159 * packet cannot be received until the next receive interrupt.
2160 */
2161 while (fwohci_buf_input(sc, fc, &pkt)) {
2162 if (pkt.fp_tcode == OHCI_TCODE_PHY) {
2163 fwohci_phy_input(sc, &pkt);
2164 continue;
2165 }
2166 key1 = pkt.fp_hdr[1] & 0xffff;
2167 key2 = pkt.fp_hdr[2];
2168 if ((pkt.fp_tcode == IEEE1394_TCODE_WRITE_REQ_BLOCK) ||
2169 (pkt.fp_tcode == IEEE1394_TCODE_READ_REQ_BLOCK)) {
2170 len = (pkt.fp_hdr[3] & 0xffff0000) >> 16;
2171 naddr = ((u_int64_t)key1 << 32) + key2;
2172 } else {
2173 len = 0;
2174 naddr = 0; /* XXX: gcc */
2175 }
2176 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
2177 fh = LIST_NEXT(fh, fh_list)) {
2178 if (pkt.fp_tcode == fh->fh_tcode) {
2179 /* Assume length check happens in handler */
2180 if (key1 == fh->fh_key1 &&
2181 key2 == fh->fh_key2) {
2182 rcode = (*fh->fh_handler)(sc,
2183 fh->fh_handarg, &pkt);
2184 break;
2185 }
2186 addr = ((u_int64_t)fh->fh_key1 << 32) +
2187 fh->fh_key2;
2188 off = fh->fh_key3;
2189 /* Check for a range qualifier */
2190 if (len &&
2191 ((naddr >= addr) && (naddr < (addr + off))
2192 && (naddr + len <= (addr + off)))) {
2193 rcode = (*fh->fh_handler)(sc,
2194 fh->fh_handarg, &pkt);
2195 break;
2196 }
2197 }
2198 }
2199 if (fh == NULL) {
2200 rcode = IEEE1394_RCODE_ADDRESS_ERROR;
2201 DPRINTFN(1, ("fwohci_arrq_input: no listener: tcode "
2202 "0x%x, addr=0x%04x %08x\n", pkt.fp_tcode, key1,
2203 key2));
2204 DPRINTFN(2, ("fwohci_arrq_input: no listener: hdr[0]: "
2205 "0x%08x, hdr[1]: 0x%08x, hdr[2]: 0x%08x, hdr[3]: "
2206 "0x%08x\n", pkt.fp_hdr[0], pkt.fp_hdr[1],
2207 pkt.fp_hdr[2], pkt.fp_hdr[3]));
2208 }
2209 if (((*pkt.fp_trail & 0x001f0000) >> 16) !=
2210 OHCI_CTXCTL_EVENT_ACK_PENDING)
2211 continue;
2212 if (rcode != -1) {
2213 memset(&res, 0, sizeof(res));
2214 res.fp_uio.uio_rw = UIO_WRITE;
2215 res.fp_uio.uio_segflg = UIO_SYSSPACE;
2216 fwohci_atrs_output(sc, rcode, &pkt, &res);
2217 }
2218 }
2219 fwohci_buf_next(sc, fc);
2220 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2221 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
2222 }
2223
2224
2225 /*
2226 * Asynchronous Receive Response input frontend.
2227 */
2228 static void
2229 fwohci_arrs_input(struct fwohci_softc *sc, struct fwohci_ctx *fc)
2230 {
2231 struct fwohci_pkt pkt;
2232 struct fwohci_handler *fh;
2233 u_int16_t srcid;
2234 int rcode, tlabel;
2235
2236 while (fwohci_buf_input(sc, fc, &pkt)) {
2237 srcid = pkt.fp_hdr[1] >> 16;
2238 rcode = (pkt.fp_hdr[1] & 0x0000f000) >> 12;
2239 tlabel = (pkt.fp_hdr[0] & 0x0000fc00) >> 10;
2240 DPRINTFN(1, ("fwohci_arrs_input: tcode 0x%x, from 0x%04x,"
2241 " tlabel 0x%x, rcode 0x%x, hlen %d, dlen %d\n",
2242 pkt.fp_tcode, srcid, tlabel, rcode, pkt.fp_hlen,
2243 pkt.fp_dlen));
2244 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
2245 fh = LIST_NEXT(fh, fh_list)) {
2246 if (pkt.fp_tcode == fh->fh_tcode &&
2247 (srcid & OHCI_NodeId_NodeNumber) == fh->fh_key1 &&
2248 tlabel == fh->fh_key2) {
2249 (*fh->fh_handler)(sc, fh->fh_handarg, &pkt);
2250 LIST_REMOVE(fh, fh_list);
2251 free(fh, M_DEVBUF);
2252 break;
2253 }
2254 }
2255 if (fh == NULL)
2256 DPRINTFN(1, ("fwohci_arrs_input: no listner\n"));
2257 }
2258 fwohci_buf_next(sc, fc);
2259 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2260 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
2261 }
2262
2263 /*
2264 * Isochronous Receive input frontend.
2265 */
2266 static void
2267 fwohci_as_input(struct fwohci_softc *sc, struct fwohci_ctx *fc)
2268 {
2269 int rcode, chan, tag;
2270 struct iovec *iov;
2271 struct fwohci_handler *fh;
2272 struct fwohci_pkt pkt;
2273
2274 #if DOUBLEBUF
2275 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) {
2276 struct fwohci_buf *fb;
2277 int i;
2278 u_int32_t reg;
2279
2280 /* stop DMA engine before read buffer */
2281 reg = OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx,
2282 OHCI_SUBREG_ContextControlClear);
2283 DPRINTFN(5, ("ir_input %08x =>", reg));
2284 if (reg & OHCI_CTXCTL_RUN) {
2285 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
2286 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
2287 }
2288 DPRINTFN(5, (" %08x\n", OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx, OHCI_SUBREG_ContextControlClear)));
2289
2290 i = 0;
2291 while ((reg = OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx, OHCI_SUBREG_ContextControlSet)) & OHCI_CTXCTL_ACTIVE) {
2292 delay(10);
2293 if (++i > 10000) {
2294 printf("cannot stop DMA engine 0x%08x\n", reg);
2295 return;
2296 }
2297 }
2298
2299 /* rotate DMA buffer */
2300 fb = TAILQ_FIRST(&fc->fc_buf2);
2301 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx, OHCI_SUBREG_CommandPtr,
2302 fb->fb_daddr | 1);
2303 /* start DMA engine */
2304 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
2305 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
2306 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear,
2307 (1 << fc->fc_ctx));
2308 }
2309 #endif
2310
2311 while (fwohci_buf_input_ppb(sc, fc, &pkt)) {
2312 chan = (pkt.fp_hdr[0] & 0x00003f00) >> 8;
2313 tag = (pkt.fp_hdr[0] & 0x0000c000) >> 14;
2314 DPRINTFN(1, ("fwohci_as_input: hdr 0x%08x, tcode 0x%0x, hlen %d"
2315 ", dlen %d\n", pkt.fp_hdr[0], pkt.fp_tcode, pkt.fp_hlen,
2316 pkt.fp_dlen));
2317 if (tag == IEEE1394_TAG_GASP &&
2318 fc->fc_type == FWOHCI_CTX_ISO_SINGLE) {
2319 /*
2320 * The pkt with tag=3 is GASP format.
2321 * Move GASP header to header part.
2322 */
2323 if (pkt.fp_dlen < 8)
2324 continue;
2325 iov = pkt.fp_iov;
2326 /* assuming pkt per buffer mode */
2327 pkt.fp_hdr[1] = ntohl(((u_int32_t *)iov->iov_base)[0]);
2328 pkt.fp_hdr[2] = ntohl(((u_int32_t *)iov->iov_base)[1]);
2329 iov->iov_base = (caddr_t)iov->iov_base + 8;
2330 iov->iov_len -= 8;
2331 pkt.fp_hlen += 8;
2332 pkt.fp_dlen -= 8;
2333 }
2334 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
2335 fh = LIST_NEXT(fh, fh_list)) {
2336 if (pkt.fp_tcode == fh->fh_tcode &&
2337 (chan == fh->fh_key1 ||
2338 fh->fh_key1 == IEEE1394_ISO_CHANNEL_ANY) &&
2339 ((1 << tag) & fh->fh_key2) != 0) {
2340 rcode = (*fh->fh_handler)(sc, fh->fh_handarg,
2341 &pkt);
2342 break;
2343 }
2344 }
2345 #ifdef FW_DEBUG
2346 if (fh == NULL) {
2347 DPRINTFN(1, ("fwohci_as_input: no handler\n"));
2348 } else {
2349 DPRINTFN(1, ("fwohci_as_input: rcode %d\n", rcode));
2350 }
2351 #endif
2352 }
2353 fwohci_buf_next(sc, fc);
2354
2355 if (fc->fc_type == FWOHCI_CTX_ISO_SINGLE) {
2356 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
2357 OHCI_SUBREG_ContextControlSet,
2358 OHCI_CTXCTL_WAKE);
2359 }
2360 }
2361
2362 /*
2363 * Asynchronous Transmit common routine.
2364 */
2365 static int
2366 fwohci_at_output(struct fwohci_softc *sc, struct fwohci_ctx *fc,
2367 struct fwohci_pkt *pkt)
2368 {
2369 struct fwohci_buf *fb;
2370 struct fwohci_desc *fd;
2371 struct mbuf *m, *m0;
2372 int i, ndesc, error, off, len;
2373 u_int32_t val;
2374 #ifdef FW_DEBUG
2375 struct iovec *iov;
2376 int tlabel = (pkt->fp_hdr[0] & 0x0000fc00) >> 10;
2377 #endif
2378
2379 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) == IEEE1394_BCAST_PHY_ID)
2380 /* We can't send anything during selfid duration */
2381 return EAGAIN;
2382
2383 #ifdef FW_DEBUG
2384 DPRINTFN(1, ("fwohci_at_output: tcode 0x%x, tlabel 0x%x hlen %d, "
2385 "dlen %d", pkt->fp_tcode, tlabel, pkt->fp_hlen, pkt->fp_dlen));
2386 for (i = 0; i < pkt->fp_hlen/4; i++)
2387 DPRINTFN(2, ("%s%08x", i?" ":"\n ", pkt->fp_hdr[i]));
2388 DPRINTFN(2, ("$"));
2389 for (ndesc = 0, iov = pkt->fp_iov;
2390 ndesc < pkt->fp_uio.uio_iovcnt; ndesc++, iov++) {
2391 for (i = 0; i < iov->iov_len; i++)
2392 DPRINTFN(2, ("%s%02x", (i%32)?((i%4)?"":" "):"\n ",
2393 ((u_int8_t *)iov->iov_base)[i]));
2394 DPRINTFN(2, ("$"));
2395 }
2396 DPRINTFN(1, ("\n"));
2397 #endif
2398
2399 if ((m = pkt->fp_m) != NULL) {
2400 for (ndesc = 2; m != NULL; m = m->m_next)
2401 ndesc++;
2402 if (ndesc > OHCI_DESC_MAX) {
2403 m0 = NULL;
2404 ndesc = 2;
2405 for (off = 0; off < pkt->fp_dlen; off += len) {
2406 if (m0 == NULL) {
2407 MGETHDR(m0, M_DONTWAIT, MT_DATA);
2408 if (m0 != NULL)
2409 M_COPY_PKTHDR(m0, pkt->fp_m);
2410 m = m0;
2411 } else {
2412 MGET(m->m_next, M_DONTWAIT, MT_DATA);
2413 m = m->m_next;
2414 }
2415 if (m != NULL)
2416 MCLGET(m, M_DONTWAIT);
2417 if (m == NULL || (m->m_flags & M_EXT) == 0) {
2418 m_freem(m0);
2419 return ENOMEM;
2420 }
2421 len = pkt->fp_dlen - off;
2422 if (len > m->m_ext.ext_size)
2423 len = m->m_ext.ext_size;
2424 m_copydata(pkt->fp_m, off, len,
2425 mtod(m, caddr_t));
2426 m->m_len = len;
2427 ndesc++;
2428 }
2429 m_freem(pkt->fp_m);
2430 pkt->fp_m = m0;
2431 }
2432 } else
2433 ndesc = 2 + pkt->fp_uio.uio_iovcnt;
2434
2435 if (ndesc > OHCI_DESC_MAX)
2436 return ENOBUFS;
2437
2438 fb = malloc(sizeof(*fb), M_DEVBUF, M_WAITOK);
2439 if (ndesc > 2) {
2440 if ((error = bus_dmamap_create(sc->sc_dmat, pkt->fp_dlen,
2441 OHCI_DESC_MAX - 2, pkt->fp_dlen, 0, BUS_DMA_WAITOK,
2442 &fb->fb_dmamap)) != 0) {
2443 fwohci_desc_put(sc, fb->fb_desc, ndesc);
2444 free(fb, M_DEVBUF);
2445 return error;
2446 }
2447
2448 if (pkt->fp_m != NULL)
2449 error = bus_dmamap_load_mbuf(sc->sc_dmat, fb->fb_dmamap,
2450 pkt->fp_m, BUS_DMA_WAITOK);
2451 else
2452 error = bus_dmamap_load_uio(sc->sc_dmat, fb->fb_dmamap,
2453 &pkt->fp_uio, BUS_DMA_WAITOK);
2454 if (error != 0) {
2455 DPRINTFN(1, ("Can't load DMA map: %d\n", error));
2456 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
2457 fwohci_desc_put(sc, fb->fb_desc, ndesc);
2458 free(fb, M_DEVBUF);
2459 return error;
2460 }
2461 ndesc = fb->fb_dmamap->dm_nsegs + 2;
2462
2463 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0, pkt->fp_dlen,
2464 BUS_DMASYNC_PREWRITE);
2465 }
2466
2467 fb->fb_nseg = ndesc;
2468 fb->fb_desc = fwohci_desc_get(sc, ndesc);
2469 if (fb->fb_desc == NULL) {
2470 free(fb, M_DEVBUF);
2471 return ENOBUFS;
2472 }
2473 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr +
2474 ((caddr_t)fb->fb_desc - (caddr_t)sc->sc_desc);
2475 fb->fb_m = pkt->fp_m;
2476 fb->fb_callback = pkt->fp_callback;
2477 fb->fb_statuscb = pkt->fp_statuscb;
2478 fb->fb_statusarg = pkt->fp_statusarg;
2479
2480 fd = fb->fb_desc;
2481 fd->fd_flags = OHCI_DESC_IMMED;
2482 fd->fd_reqcount = pkt->fp_hlen;
2483 fd->fd_data = 0;
2484 fd->fd_branch = 0;
2485 fd->fd_status = 0;
2486 if (fc->fc_ctx == OHCI_CTX_ASYNC_TX_RESPONSE) {
2487 i = 3; /* XXX: 3 sec */
2488 val = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
2489 fd->fd_timestamp = ((val >> 12) & 0x1fff) |
2490 ((((val >> 25) + i) & 0x7) << 13);
2491 } else
2492 fd->fd_timestamp = 0;
2493 memcpy(fd + 1, pkt->fp_hdr, pkt->fp_hlen);
2494 for (i = 0; i < ndesc - 2; i++) {
2495 fd = fb->fb_desc + 2 + i;
2496 fd->fd_flags = 0;
2497 fd->fd_reqcount = fb->fb_dmamap->dm_segs[i].ds_len;
2498 fd->fd_data = fb->fb_dmamap->dm_segs[i].ds_addr;
2499 fd->fd_branch = 0;
2500 fd->fd_status = 0;
2501 fd->fd_timestamp = 0;
2502 }
2503 fd->fd_flags |= OHCI_DESC_LAST | OHCI_DESC_BRANCH;
2504 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
2505
2506 #ifdef FW_DEBUG
2507 DPRINTFN(1, ("fwohci_at_output: desc %ld",
2508 (long)(fb->fb_desc - sc->sc_desc)));
2509 for (i = 0; i < ndesc * 4; i++)
2510 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ",
2511 ((u_int32_t *)fb->fb_desc)[i]));
2512 DPRINTFN(1, ("\n"));
2513 #endif
2514
2515 val = OHCI_ASYNC_DMA_READ(sc, fc->fc_ctx,
2516 OHCI_SUBREG_ContextControlClear);
2517
2518 if (val & OHCI_CTXCTL_RUN) {
2519 if (fc->fc_branch == NULL) {
2520 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2521 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
2522 goto run;
2523 }
2524 *fc->fc_branch = fb->fb_daddr | ndesc;
2525 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2526 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
2527 } else {
2528 run:
2529 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2530 OHCI_SUBREG_CommandPtr, fb->fb_daddr | ndesc);
2531 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2532 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
2533 }
2534 fc->fc_branch = &fd->fd_branch;
2535
2536 fc->fc_bufcnt++;
2537 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list);
2538 pkt->fp_m = NULL;
2539 return 0;
2540 }
2541
2542 static void
2543 fwohci_at_done(struct fwohci_softc *sc, struct fwohci_ctx *fc, int force)
2544 {
2545 struct fwohci_buf *fb;
2546 struct fwohci_desc *fd;
2547 struct fwohci_pkt pkt;
2548 int i;
2549
2550 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) {
2551 fd = fb->fb_desc;
2552 #ifdef FW_DEBUG
2553 DPRINTFN(1, ("fwohci_at_done: %sdesc %ld (%d)",
2554 force ? "force " : "", (long)(fd - sc->sc_desc),
2555 fb->fb_nseg));
2556 for (i = 0; i < fb->fb_nseg * 4; i++)
2557 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ",
2558 ((u_int32_t *)fd)[i]));
2559 DPRINTFN(1, ("\n"));
2560 #endif
2561 if (fb->fb_nseg > 2)
2562 fd += fb->fb_nseg - 1;
2563 if (!force && !(fd->fd_status & OHCI_CTXCTL_ACTIVE))
2564 break;
2565 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list);
2566 if (fc->fc_branch == &fd->fd_branch) {
2567 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2568 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
2569 fc->fc_branch = NULL;
2570 for (i = 0; i < OHCI_LOOP; i++) {
2571 if (!(OHCI_ASYNC_DMA_READ(sc, fc->fc_ctx,
2572 OHCI_SUBREG_ContextControlClear) &
2573 OHCI_CTXCTL_ACTIVE))
2574 break;
2575 DELAY(10);
2576 }
2577 }
2578
2579 if (fb->fb_statuscb) {
2580 memset(&pkt, 0, sizeof(pkt));
2581 pkt.fp_status = fd->fd_status;
2582 memcpy(pkt.fp_hdr, fd + 1, sizeof(pkt.fp_hdr[0]));
2583
2584 /* Indicate this is just returning the status bits. */
2585 pkt.fp_tcode = -1;
2586 (*fb->fb_statuscb)(sc, fb->fb_statusarg, &pkt);
2587 fb->fb_statuscb = NULL;
2588 fb->fb_statusarg = NULL;
2589 }
2590 fwohci_desc_put(sc, fb->fb_desc, fb->fb_nseg);
2591 if (fb->fb_nseg > 2)
2592 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
2593 fc->fc_bufcnt--;
2594 if (fb->fb_callback) {
2595 (*fb->fb_callback)(sc->sc_sc1394.sc1394_if, fb->fb_m);
2596 fb->fb_callback = NULL;
2597 } else if (fb->fb_m != NULL)
2598 m_freem(fb->fb_m);
2599 free(fb, M_DEVBUF);
2600 }
2601 }
2602
2603 /*
2604 * Asynchronous Transmit Response -- in response of request packet.
2605 */
2606 static void
2607 fwohci_atrs_output(struct fwohci_softc *sc, int rcode, struct fwohci_pkt *req,
2608 struct fwohci_pkt *res)
2609 {
2610
2611 if (((*req->fp_trail & 0x001f0000) >> 16) !=
2612 OHCI_CTXCTL_EVENT_ACK_PENDING)
2613 return;
2614
2615 res->fp_hdr[0] = (req->fp_hdr[0] & 0x0000fc00) | 0x00000100;
2616 res->fp_hdr[1] = (req->fp_hdr[1] & 0xffff0000) | (rcode << 12);
2617 switch (req->fp_tcode) {
2618 case IEEE1394_TCODE_WRITE_REQ_QUAD:
2619 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
2620 res->fp_tcode = IEEE1394_TCODE_WRITE_RESP;
2621 res->fp_hlen = 12;
2622 break;
2623 case IEEE1394_TCODE_READ_REQ_QUAD:
2624 res->fp_tcode = IEEE1394_TCODE_READ_RESP_QUAD;
2625 res->fp_hlen = 16;
2626 res->fp_dlen = 0;
2627 if (res->fp_uio.uio_iovcnt == 1 && res->fp_iov[0].iov_len == 4)
2628 res->fp_hdr[3] =
2629 *(u_int32_t *)res->fp_iov[0].iov_base;
2630 res->fp_uio.uio_iovcnt = 0;
2631 break;
2632 case IEEE1394_TCODE_READ_REQ_BLOCK:
2633 case IEEE1394_TCODE_LOCK_REQ:
2634 if (req->fp_tcode == IEEE1394_TCODE_LOCK_REQ)
2635 res->fp_tcode = IEEE1394_TCODE_LOCK_RESP;
2636 else
2637 res->fp_tcode = IEEE1394_TCODE_READ_RESP_BLOCK;
2638 res->fp_hlen = 16;
2639 res->fp_dlen = res->fp_uio.uio_resid;
2640 res->fp_hdr[3] = res->fp_dlen << 16;
2641 break;
2642 }
2643 res->fp_hdr[0] |= (res->fp_tcode << 4);
2644 fwohci_at_output(sc, sc->sc_ctx_atrs, res);
2645 }
2646
2647 /*
2648 * APPLICATION LAYER SERVICES
2649 */
2650
2651 /*
2652 * Retrieve Global UID from GUID ROM
2653 */
2654 static int
2655 fwohci_guidrom_init(struct fwohci_softc *sc)
2656 {
2657 int i, n, off;
2658 u_int32_t val1, val2;
2659
2660 /* Extract the Global UID
2661 */
2662 val1 = OHCI_CSR_READ(sc, OHCI_REG_GUIDHi);
2663 val2 = OHCI_CSR_READ(sc, OHCI_REG_GUIDLo);
2664
2665 if (val1 != 0 || val2 != 0) {
2666 sc->sc_sc1394.sc1394_guid[0] = (val1 >> 24) & 0xff;
2667 sc->sc_sc1394.sc1394_guid[1] = (val1 >> 16) & 0xff;
2668 sc->sc_sc1394.sc1394_guid[2] = (val1 >> 8) & 0xff;
2669 sc->sc_sc1394.sc1394_guid[3] = (val1 >> 0) & 0xff;
2670 sc->sc_sc1394.sc1394_guid[4] = (val2 >> 24) & 0xff;
2671 sc->sc_sc1394.sc1394_guid[5] = (val2 >> 16) & 0xff;
2672 sc->sc_sc1394.sc1394_guid[6] = (val2 >> 8) & 0xff;
2673 sc->sc_sc1394.sc1394_guid[7] = (val2 >> 0) & 0xff;
2674 } else {
2675 val1 = OHCI_CSR_READ(sc, OHCI_REG_Version);
2676 if ((val1 & OHCI_Version_GUID_ROM) == 0)
2677 return -1;
2678 OHCI_CSR_WRITE(sc, OHCI_REG_Guid_Rom, OHCI_Guid_AddrReset);
2679 for (i = 0; i < OHCI_LOOP; i++) {
2680 val1 = OHCI_CSR_READ(sc, OHCI_REG_Guid_Rom);
2681 if (!(val1 & OHCI_Guid_AddrReset))
2682 break;
2683 DELAY(10);
2684 }
2685 off = OHCI_BITVAL(val1, OHCI_Guid_MiniROM) + 4;
2686 val2 = 0;
2687 for (n = 0; n < off + sizeof(sc->sc_sc1394.sc1394_guid); n++) {
2688 OHCI_CSR_WRITE(sc, OHCI_REG_Guid_Rom,
2689 OHCI_Guid_RdStart);
2690 for (i = 0; i < OHCI_LOOP; i++) {
2691 val1 = OHCI_CSR_READ(sc, OHCI_REG_Guid_Rom);
2692 if (!(val1 & OHCI_Guid_RdStart))
2693 break;
2694 DELAY(10);
2695 }
2696 if (n < off)
2697 continue;
2698 val1 = OHCI_BITVAL(val1, OHCI_Guid_RdData);
2699 sc->sc_sc1394.sc1394_guid[n - off] = val1;
2700 val2 |= val1;
2701 }
2702 if (val2 == 0)
2703 return -1;
2704 }
2705 return 0;
2706 }
2707
2708 /*
2709 * Initialization for Configuration ROM (no DMA context)
2710 */
2711
2712 #define CFR_MAXUNIT 20
2713
2714 struct configromctx {
2715 u_int32_t *ptr;
2716 int curunit;
2717 struct {
2718 u_int32_t *start;
2719 int length;
2720 u_int32_t *refer;
2721 int refunit;
2722 } unit[CFR_MAXUNIT];
2723 };
2724
2725 #define CFR_PUT_DATA4(cfr, d1, d2, d3, d4) \
2726 (*(cfr)->ptr++ = (((d1)<<24) | ((d2)<<16) | ((d3)<<8) | (d4)))
2727
2728 #define CFR_PUT_DATA1(cfr, d) (*(cfr)->ptr++ = (d))
2729
2730 #define CFR_PUT_VALUE(cfr, key, d) (*(cfr)->ptr++ = ((key)<<24) | (d))
2731
2732 #define CFR_PUT_CRC(cfr, n) \
2733 (*(cfr)->unit[n].start = ((cfr)->unit[n].length << 16) | \
2734 fwohci_crc16((cfr)->unit[n].start + 1, (cfr)->unit[n].length))
2735
2736 #define CFR_START_UNIT(cfr, n) \
2737 do { \
2738 if ((cfr)->unit[n].refer != NULL) { \
2739 *(cfr)->unit[n].refer |= \
2740 (cfr)->ptr - (cfr)->unit[n].refer; \
2741 CFR_PUT_CRC(cfr, (cfr)->unit[n].refunit); \
2742 } \
2743 (cfr)->curunit = (n); \
2744 (cfr)->unit[n].start = (cfr)->ptr++; \
2745 } while (0 /* CONSTCOND */)
2746
2747 #define CFR_PUT_REFER(cfr, key, n) \
2748 do { \
2749 (cfr)->unit[n].refer = (cfr)->ptr; \
2750 (cfr)->unit[n].refunit = (cfr)->curunit; \
2751 *(cfr)->ptr++ = (key) << 24; \
2752 } while (0 /* CONSTCOND */)
2753
2754 #define CFR_END_UNIT(cfr) \
2755 do { \
2756 (cfr)->unit[(cfr)->curunit].length = (cfr)->ptr - \
2757 ((cfr)->unit[(cfr)->curunit].start + 1); \
2758 CFR_PUT_CRC(cfr, (cfr)->curunit); \
2759 } while (0 /* CONSTCOND */)
2760
2761 static u_int16_t
2762 fwohci_crc16(u_int32_t *ptr, int len)
2763 {
2764 int shift;
2765 u_int32_t crc, sum, data;
2766
2767 crc = 0;
2768 while (len-- > 0) {
2769 data = *ptr++;
2770 for (shift = 28; shift >= 0; shift -= 4) {
2771 sum = ((crc >> 12) ^ (data >> shift)) & 0x000f;
2772 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ sum;
2773 }
2774 crc &= 0xffff;
2775 }
2776 return crc;
2777 }
2778
2779 static void
2780 fwohci_configrom_init(struct fwohci_softc *sc)
2781 {
2782 int i, val;
2783 struct fwohci_buf *fb;
2784 u_int32_t *hdr;
2785 struct configromctx cfr;
2786
2787 fb = &sc->sc_buf_cnfrom;
2788 memset(&cfr, 0, sizeof(cfr));
2789 cfr.ptr = hdr = (u_int32_t *)fb->fb_buf;
2790
2791 /* headers */
2792 CFR_START_UNIT(&cfr, 0);
2793 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_BusId));
2794 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_BusOptions));
2795 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_GUIDHi));
2796 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_GUIDLo));
2797 CFR_END_UNIT(&cfr);
2798 /* copy info_length from crc_length */
2799 *hdr |= (*hdr & 0x00ff0000) << 8;
2800 OHCI_CSR_WRITE(sc, OHCI_REG_ConfigROMhdr, *hdr);
2801
2802 /* root directory */
2803 CFR_START_UNIT(&cfr, 1);
2804 CFR_PUT_VALUE(&cfr, 0x03, 0x00005e); /* vendor id */
2805 CFR_PUT_REFER(&cfr, 0x81, 2); /* textual descriptor offset */
2806 CFR_PUT_VALUE(&cfr, 0x0c, 0x0083c0); /* node capability */
2807 /* spt,64,fix,lst,drq */
2808 #ifdef INET
2809 CFR_PUT_REFER(&cfr, 0xd1, 3); /* IPv4 unit directory */
2810 #endif /* INET */
2811 #ifdef INET6
2812 CFR_PUT_REFER(&cfr, 0xd1, 4); /* IPv6 unit directory */
2813 #endif /* INET6 */
2814 CFR_END_UNIT(&cfr);
2815
2816 CFR_START_UNIT(&cfr, 2);
2817 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2818 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2819 CFR_PUT_DATA4(&cfr, 'N', 'e', 't', 'B');
2820 CFR_PUT_DATA4(&cfr, 'S', 'D', 0x00, 0x00);
2821 CFR_END_UNIT(&cfr);
2822
2823 #ifdef INET
2824 /* IPv4 unit directory */
2825 CFR_START_UNIT(&cfr, 3);
2826 CFR_PUT_VALUE(&cfr, 0x12, 0x00005e); /* unit spec id */
2827 CFR_PUT_REFER(&cfr, 0x81, 6); /* textual descriptor offset */
2828 CFR_PUT_VALUE(&cfr, 0x13, 0x000001); /* unit sw version */
2829 CFR_PUT_REFER(&cfr, 0x81, 7); /* textual descriptor offset */
2830 CFR_PUT_REFER(&cfr, 0x95, 8); /* Unit location */
2831 CFR_END_UNIT(&cfr);
2832
2833 CFR_START_UNIT(&cfr, 6);
2834 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2835 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2836 CFR_PUT_DATA4(&cfr, 'I', 'A', 'N', 'A');
2837 CFR_END_UNIT(&cfr);
2838
2839 CFR_START_UNIT(&cfr, 7);
2840 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2841 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2842 CFR_PUT_DATA4(&cfr, 'I', 'P', 'v', '4');
2843 CFR_END_UNIT(&cfr);
2844
2845 CFR_START_UNIT(&cfr, 8); /* Spec's valid addr range. */
2846 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2847 CFR_PUT_DATA1(&cfr, (FW_FIFO_LO | 0x1));
2848 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2849 CFR_PUT_DATA1(&cfr, FW_FIFO_LO);
2850 CFR_END_UNIT(&cfr);
2851
2852 #endif /* INET */
2853
2854 #ifdef INET6
2855 /* IPv6 unit directory */
2856 CFR_START_UNIT(&cfr, 4);
2857 CFR_PUT_VALUE(&cfr, 0x12, 0x00005e); /* unit spec id */
2858 CFR_PUT_REFER(&cfr, 0x81, 9); /* textual descriptor offset */
2859 CFR_PUT_VALUE(&cfr, 0x13, 0x000002); /* unit sw version */
2860 /* XXX: TBA by IANA */
2861 CFR_PUT_REFER(&cfr, 0x81, 10); /* textual descriptor offset */
2862 CFR_PUT_REFER(&cfr, 0x95, 11); /* Unit location */
2863 CFR_END_UNIT(&cfr);
2864
2865 CFR_START_UNIT(&cfr, 9);
2866 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2867 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2868 CFR_PUT_DATA4(&cfr, 'I', 'A', 'N', 'A');
2869 CFR_END_UNIT(&cfr);
2870
2871 CFR_START_UNIT(&cfr, 10);
2872 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2873 CFR_PUT_DATA1(&cfr, 0);
2874 CFR_PUT_DATA4(&cfr, 'I', 'P', 'v', '6');
2875 CFR_END_UNIT(&cfr);
2876
2877 CFR_START_UNIT(&cfr, 11); /* Spec's valid addr range. */
2878 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2879 CFR_PUT_DATA1(&cfr, (FW_FIFO_LO | 0x1));
2880 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2881 CFR_PUT_DATA1(&cfr, FW_FIFO_LO);
2882 CFR_END_UNIT(&cfr);
2883
2884 #endif /* INET6 */
2885
2886 fb->fb_off = cfr.ptr - hdr;
2887 #ifdef FW_DEBUG
2888 DPRINTF(("%s: Config ROM:", sc->sc_sc1394.sc1394_dev.dv_xname));
2889 for (i = 0; i < fb->fb_off; i++)
2890 DPRINTF(("%s%08x", i&7?" ":"\n ", hdr[i]));
2891 DPRINTF(("\n"));
2892 #endif /* FW_DEBUG */
2893
2894 /*
2895 * Make network byte order for DMA
2896 */
2897 for (i = 0; i < fb->fb_off; i++)
2898 HTONL(hdr[i]);
2899 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0,
2900 (caddr_t)cfr.ptr - fb->fb_buf, BUS_DMASYNC_PREWRITE);
2901
2902 OHCI_CSR_WRITE(sc, OHCI_REG_ConfigROMmap,
2903 fb->fb_dmamap->dm_segs[0].ds_addr);
2904
2905 /* This register is only valid on OHCI 1.1. */
2906 val = OHCI_CSR_READ(sc, OHCI_REG_Version);
2907 if ((OHCI_Version_GET_Version(val) == 1) &&
2908 (OHCI_Version_GET_Revision(val) == 1))
2909 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet,
2910 OHCI_HCControl_BIBImageValid);
2911
2912 /* Only allow quad reads of the rom. */
2913 for (i = 0; i < fb->fb_off; i++)
2914 fwohci_handler_set(sc, IEEE1394_TCODE_READ_REQ_QUAD,
2915 CSR_BASE_HI, CSR_BASE_LO + CSR_CONFIG_ROM + (i * 4), 0,
2916 fwohci_configrom_input, NULL);
2917 }
2918
2919 static int
2920 fwohci_configrom_input(struct fwohci_softc *sc, void *arg,
2921 struct fwohci_pkt *pkt)
2922 {
2923 struct fwohci_pkt res;
2924 u_int32_t loc, *rom;
2925
2926 /* This will be used as an array index so size accordingly. */
2927 loc = pkt->fp_hdr[2] - (CSR_BASE_LO + CSR_CONFIG_ROM);
2928 if ((loc & 0x03) != 0) {
2929 /* alignment error */
2930 return IEEE1394_RCODE_ADDRESS_ERROR;
2931 }
2932 else
2933 loc /= 4;
2934 rom = (u_int32_t *)sc->sc_buf_cnfrom.fb_buf;
2935
2936 DPRINTFN(1, ("fwohci_configrom_input: ConfigRom[0x%04x]: 0x%08x\n", loc,
2937 ntohl(rom[loc])));
2938
2939 memset(&res, 0, sizeof(res));
2940 res.fp_hdr[3] = rom[loc];
2941 fwohci_atrs_output(sc, IEEE1394_RCODE_COMPLETE, pkt, &res);
2942 return -1;
2943 }
2944
2945 /*
2946 * SelfID buffer (no DMA context)
2947 */
2948 static void
2949 fwohci_selfid_init(struct fwohci_softc *sc)
2950 {
2951 struct fwohci_buf *fb;
2952
2953 fb = &sc->sc_buf_selfid;
2954 #ifdef DIAGNOSTIC
2955 if ((fb->fb_dmamap->dm_segs[0].ds_addr & 0x7ff) != 0)
2956 panic("fwohci_selfid_init: not aligned: %ld (%ld) %p",
2957 (unsigned long)fb->fb_dmamap->dm_segs[0].ds_addr,
2958 (unsigned long)fb->fb_dmamap->dm_segs[0].ds_len, fb->fb_buf);
2959 #endif
2960 memset(fb->fb_buf, 0, fb->fb_dmamap->dm_segs[0].ds_len);
2961 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0,
2962 fb->fb_dmamap->dm_segs[0].ds_len, BUS_DMASYNC_PREREAD);
2963
2964 OHCI_CSR_WRITE(sc, OHCI_REG_SelfIDBuffer,
2965 fb->fb_dmamap->dm_segs[0].ds_addr);
2966 }
2967
2968 static int
2969 fwohci_selfid_input(struct fwohci_softc *sc)
2970 {
2971 int i;
2972 u_int32_t count, val, gen;
2973 u_int32_t *buf;
2974
2975 buf = (u_int32_t *)sc->sc_buf_selfid.fb_buf;
2976 val = OHCI_CSR_READ(sc, OHCI_REG_SelfIDCount);
2977 again:
2978 if (val & OHCI_SelfID_Error) {
2979 printf("%s: SelfID Error\n", sc->sc_sc1394.sc1394_dev.dv_xname);
2980 return -1;
2981 }
2982 count = OHCI_BITVAL(val, OHCI_SelfID_Size);
2983
2984 bus_dmamap_sync(sc->sc_dmat, sc->sc_buf_selfid.fb_dmamap,
2985 0, count << 2, BUS_DMASYNC_POSTREAD);
2986 gen = OHCI_BITVAL(buf[0], OHCI_SelfID_Gen);
2987
2988 #ifdef FW_DEBUG
2989 DPRINTFN(1, ("%s: SelfID: 0x%08x", sc->sc_sc1394.sc1394_dev.dv_xname,
2990 val));
2991 for (i = 0; i < count; i++)
2992 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ", buf[i]));
2993 DPRINTFN(1, ("\n"));
2994 #endif /* FW_DEBUG */
2995
2996 for (i = 1; i < count; i += 2) {
2997 if (buf[i] != ~buf[i + 1])
2998 break;
2999 if (buf[i] & 0x00000001)
3000 continue; /* more pkt */
3001 if (buf[i] & 0x00800000)
3002 continue; /* external id */
3003 sc->sc_rootid = (buf[i] & 0x3f000000) >> 24;
3004 if ((buf[i] & 0x00400800) == 0x00400800)
3005 sc->sc_irmid = sc->sc_rootid;
3006 }
3007
3008 val = OHCI_CSR_READ(sc, OHCI_REG_SelfIDCount);
3009 if (OHCI_BITVAL(val, OHCI_SelfID_Gen) != gen) {
3010 if (OHCI_BITVAL(val, OHCI_SelfID_Gen) !=
3011 OHCI_BITVAL(buf[0], OHCI_SelfID_Gen))
3012 goto again;
3013 DPRINTF(("%s: SelfID Gen mismatch (%d, %d)\n",
3014 sc->sc_sc1394.sc1394_dev.dv_xname, gen,
3015 OHCI_BITVAL(val, OHCI_SelfID_Gen)));
3016 return -1;
3017 }
3018 if (i != count) {
3019 printf("%s: SelfID corrupted (%d, 0x%08x, 0x%08x)\n",
3020 sc->sc_sc1394.sc1394_dev.dv_xname, i, buf[i], buf[i + 1]);
3021 #if 1
3022 if (i == 1 && buf[i] == 0 && buf[i + 1] == 0) {
3023 /*
3024 * XXX: CXD3222 sometimes fails to DMA
3025 * selfid packet??
3026 */
3027 sc->sc_rootid = (count - 1) / 2 - 1;
3028 sc->sc_irmid = sc->sc_rootid;
3029 } else
3030 #endif
3031 return -1;
3032 }
3033
3034 val = OHCI_CSR_READ(sc, OHCI_REG_NodeId);
3035 if ((val & OHCI_NodeId_IDValid) == 0) {
3036 sc->sc_nodeid = 0xffff; /* invalid */
3037 printf("%s: nodeid is invalid\n",
3038 sc->sc_sc1394.sc1394_dev.dv_xname);
3039 return -1;
3040 }
3041 sc->sc_nodeid = val & 0xffff;
3042 sc->sc_sc1394.sc1394_node_id = sc->sc_nodeid & OHCI_NodeId_NodeNumber;
3043
3044 DPRINTF(("%s: nodeid=0x%04x(%d), rootid=%d, irmid=%d\n",
3045 sc->sc_sc1394.sc1394_dev.dv_xname, sc->sc_nodeid,
3046 sc->sc_nodeid & OHCI_NodeId_NodeNumber, sc->sc_rootid,
3047 sc->sc_irmid));
3048
3049 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) > sc->sc_rootid)
3050 return -1;
3051
3052 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) == sc->sc_rootid)
3053 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlSet,
3054 OHCI_LinkControl_CycleMaster);
3055 else
3056 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlClear,
3057 OHCI_LinkControl_CycleMaster);
3058 return 0;
3059 }
3060
3061 /*
3062 * some CSRs are handled by driver.
3063 */
3064 static void
3065 fwohci_csr_init(struct fwohci_softc *sc)
3066 {
3067 int i;
3068 static u_int32_t csr[] = {
3069 CSR_STATE_CLEAR, CSR_STATE_SET, CSR_SB_CYCLE_TIME,
3070 CSR_SB_BUS_TIME, CSR_SB_BUSY_TIMEOUT, CSR_SB_BUS_MANAGER_ID,
3071 CSR_SB_CHANNEL_AVAILABLE_HI, CSR_SB_CHANNEL_AVAILABLE_LO,
3072 CSR_SB_BROADCAST_CHANNEL
3073 };
3074
3075 for (i = 0; i < sizeof(csr) / sizeof(csr[0]); i++) {
3076 fwohci_handler_set(sc, IEEE1394_TCODE_WRITE_REQ_QUAD,
3077 CSR_BASE_HI, CSR_BASE_LO + csr[i], 0, fwohci_csr_input,
3078 NULL);
3079 fwohci_handler_set(sc, IEEE1394_TCODE_READ_REQ_QUAD,
3080 CSR_BASE_HI, CSR_BASE_LO + csr[i], 0, fwohci_csr_input,
3081 NULL);
3082 }
3083 sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] = 31; /*XXX*/
3084 }
3085
3086 static int
3087 fwohci_csr_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3088 {
3089 struct fwohci_pkt res;
3090 u_int32_t reg;
3091
3092 /*
3093 * XXX need to do special functionality other than just r/w...
3094 */
3095 reg = pkt->fp_hdr[2] - CSR_BASE_LO;
3096
3097 if ((reg & 0x03) != 0) {
3098 /* alignment error */
3099 return IEEE1394_RCODE_ADDRESS_ERROR;
3100 }
3101 DPRINTFN(1, ("fwohci_csr_input: CSR[0x%04x]: 0x%08x", reg,
3102 *(u_int32_t *)(&sc->sc_csr[reg])));
3103 if (pkt->fp_tcode == IEEE1394_TCODE_WRITE_REQ_QUAD) {
3104 DPRINTFN(1, (" -> 0x%08x\n",
3105 ntohl(*(u_int32_t *)pkt->fp_iov[0].iov_base)));
3106 *(u_int32_t *)&sc->sc_csr[reg] =
3107 ntohl(*(u_int32_t *)pkt->fp_iov[0].iov_base);
3108 } else {
3109 DPRINTFN(1, ("\n"));
3110 res.fp_hdr[3] = htonl(*(u_int32_t *)&sc->sc_csr[reg]);
3111 res.fp_iov[0].iov_base = &res.fp_hdr[3];
3112 res.fp_iov[0].iov_len = 4;
3113 res.fp_uio.uio_resid = 4;
3114 res.fp_uio.uio_iovcnt = 1;
3115 fwohci_atrs_output(sc, IEEE1394_RCODE_COMPLETE, pkt, &res);
3116 return -1;
3117 }
3118 return IEEE1394_RCODE_COMPLETE;
3119 }
3120
3121 /*
3122 * Mapping between nodeid and unique ID (EUI-64).
3123 *
3124 * Track old mappings and simply update their devices with the new id's when
3125 * they match an existing EUI. This allows proper renumeration of the bus.
3126 */
3127 static void
3128 fwohci_uid_collect(struct fwohci_softc *sc)
3129 {
3130 int i;
3131 struct fwohci_uidtbl *fu;
3132 struct ieee1394_softc *iea;
3133
3134 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node)
3135 iea->sc1394_node_id = 0xffff;
3136
3137 if (sc->sc_uidtbl != NULL)
3138 free(sc->sc_uidtbl, M_DEVBUF);
3139 sc->sc_uidtbl = malloc(sizeof(*fu) * (sc->sc_rootid + 1), M_DEVBUF,
3140 M_NOWAIT|M_ZERO); /* XXX M_WAITOK requires locks */
3141 if (sc->sc_uidtbl == NULL)
3142 return;
3143
3144 for (i = 0, fu = sc->sc_uidtbl; i <= sc->sc_rootid; i++, fu++) {
3145 if (i == (sc->sc_nodeid & OHCI_NodeId_NodeNumber)) {
3146 memcpy(fu->fu_uid, sc->sc_sc1394.sc1394_guid, 8);
3147 fu->fu_valid = 3;
3148
3149 iea = (struct ieee1394_softc *)sc->sc_sc1394.sc1394_if;
3150 if (iea) {
3151 iea->sc1394_node_id = i;
3152 DPRINTF(("%s: Updating nodeid to %d\n",
3153 iea->sc1394_dev.dv_xname,
3154 iea->sc1394_node_id));
3155 }
3156 } else {
3157 fu->fu_valid = 0;
3158 fwohci_uid_req(sc, i);
3159 }
3160 }
3161 if (sc->sc_rootid == 0)
3162 fwohci_check_nodes(sc);
3163 }
3164
3165 static void
3166 fwohci_uid_req(struct fwohci_softc *sc, int phyid)
3167 {
3168 struct fwohci_pkt pkt;
3169
3170 memset(&pkt, 0, sizeof(pkt));
3171 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
3172 pkt.fp_hlen = 12;
3173 pkt.fp_dlen = 0;
3174 pkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
3175 (pkt.fp_tcode << 4);
3176 pkt.fp_hdr[1] = ((0xffc0 | phyid) << 16) | CSR_BASE_HI;
3177 pkt.fp_hdr[2] = CSR_BASE_LO + CSR_CONFIG_ROM + 12;
3178 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD, phyid,
3179 sc->sc_tlabel, 0, fwohci_uid_input, (void *)0);
3180 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3181 fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt);
3182
3183 pkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
3184 (pkt.fp_tcode << 4);
3185 pkt.fp_hdr[2] = CSR_BASE_LO + CSR_CONFIG_ROM + 16;
3186 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD, phyid,
3187 sc->sc_tlabel, 0, fwohci_uid_input, (void *)1);
3188 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3189 fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt);
3190 }
3191
3192 static int
3193 fwohci_uid_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *res)
3194 {
3195 struct fwohci_uidtbl *fu;
3196 struct ieee1394_softc *iea;
3197 struct ieee1394_attach_args fwa;
3198 int i, n, done, rcode, found;
3199
3200 found = 0;
3201
3202 n = (res->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber;
3203 rcode = (res->fp_hdr[1] & 0x0000f000) >> 12;
3204 if (rcode != IEEE1394_RCODE_COMPLETE ||
3205 sc->sc_uidtbl == NULL ||
3206 n > sc->sc_rootid)
3207 return 0;
3208 fu = &sc->sc_uidtbl[n];
3209 if (arg == 0) {
3210 memcpy(fu->fu_uid, res->fp_iov[0].iov_base, 4);
3211 fu->fu_valid |= 0x1;
3212 } else {
3213 memcpy(fu->fu_uid + 4, res->fp_iov[0].iov_base, 4);
3214 fu->fu_valid |= 0x2;
3215 }
3216 #ifdef FW_DEBUG
3217 if (fu->fu_valid == 0x3)
3218 DPRINTFN(1, ("fwohci_uid_input: "
3219 "Node %d, UID %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", n,
3220 fu->fu_uid[0], fu->fu_uid[1], fu->fu_uid[2], fu->fu_uid[3],
3221 fu->fu_uid[4], fu->fu_uid[5], fu->fu_uid[6], fu->fu_uid[7]));
3222 #endif
3223 if (fu->fu_valid == 0x3) {
3224 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node)
3225 if (memcmp(iea->sc1394_guid, fu->fu_uid, 8) == 0) {
3226 found = 1;
3227 iea->sc1394_node_id = n;
3228 DPRINTF(("%s: Updating nodeid to %d\n",
3229 iea->sc1394_dev.dv_xname,
3230 iea->sc1394_node_id));
3231 if (iea->sc1394_callback.sc1394_reset)
3232 iea->sc1394_callback.sc1394_reset(iea,
3233 iea->sc1394_callback.sc1394_resetarg);
3234 break;
3235 }
3236 if (!found) {
3237 strcpy(fwa.name, "fwnode");
3238 memcpy(fwa.uid, fu->fu_uid, 8);
3239 fwa.nodeid = n;
3240 iea = (struct ieee1394_softc *)
3241 config_found_sm_loc(&sc->sc_sc1394.sc1394_dev,
3242 "fwbus", NULL, &fwa,
3243 fwohci_print, fwohci_submatch);
3244 if (iea != NULL)
3245 LIST_INSERT_HEAD(&sc->sc_nodelist, iea,
3246 sc1394_node);
3247 }
3248 }
3249 done = 1;
3250
3251 for (i = 0; i < sc->sc_rootid + 1; i++) {
3252 fu = &sc->sc_uidtbl[i];
3253 if (fu->fu_valid != 0x3) {
3254 done = 0;
3255 break;
3256 }
3257 }
3258 if (done)
3259 fwohci_check_nodes(sc);
3260
3261 return 0;
3262 }
3263
3264 static void
3265 fwohci_check_nodes(struct fwohci_softc *sc)
3266 {
3267 struct device *detach = NULL;
3268 struct ieee1394_softc *iea;
3269
3270 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node) {
3271
3272 /*
3273 * Have to defer detachment until the next
3274 * loop iteration since config_detach
3275 * free's the softc and the loop iterator
3276 * needs data from the softc to move
3277 * forward.
3278 */
3279
3280 if (detach) {
3281 config_detach(detach, 0);
3282 detach = NULL;
3283 }
3284 if (iea->sc1394_node_id == 0xffff) {
3285 detach = (struct device *)iea;
3286 LIST_REMOVE(iea, sc1394_node);
3287 }
3288 }
3289 if (detach)
3290 config_detach(detach, 0);
3291 }
3292
3293 static int
3294 fwohci_uid_lookup(struct fwohci_softc *sc, const u_int8_t *uid)
3295 {
3296 struct fwohci_uidtbl *fu;
3297 int n;
3298 static const u_int8_t bcast[] =
3299 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3300
3301 fu = sc->sc_uidtbl;
3302 if (fu == NULL) {
3303 if (memcmp(uid, bcast, sizeof(bcast)) == 0)
3304 return IEEE1394_BCAST_PHY_ID;
3305 fwohci_uid_collect(sc); /* try to get */
3306 return -1;
3307 }
3308 for (n = 0; n <= sc->sc_rootid; n++, fu++) {
3309 if (fu->fu_valid == 0x3 && memcmp(fu->fu_uid, uid, 8) == 0)
3310 return n;
3311 }
3312 if (memcmp(uid, bcast, sizeof(bcast)) == 0)
3313 return IEEE1394_BCAST_PHY_ID;
3314 for (n = 0, fu = sc->sc_uidtbl; n <= sc->sc_rootid; n++, fu++) {
3315 if (fu->fu_valid != 0x3) {
3316 /*
3317 * XXX: need timer before retransmission
3318 */
3319 fwohci_uid_req(sc, n);
3320 }
3321 }
3322 return -1;
3323 }
3324
3325 /*
3326 * functions to support network interface
3327 */
3328 static int
3329 fwohci_if_inreg(struct device *self, u_int32_t offhi, u_int32_t offlo,
3330 void (*handler)(struct device *, struct mbuf *))
3331 {
3332 struct fwohci_softc *sc = (struct fwohci_softc *)self;
3333
3334 fwohci_handler_set(sc, IEEE1394_TCODE_WRITE_REQ_BLOCK, offhi, offlo, 0,
3335 handler ? fwohci_if_input : NULL, handler);
3336 fwohci_handler_set(sc, IEEE1394_TCODE_STREAM_DATA,
3337 (sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] & IEEE1394_ISOCH_MASK) |
3338 OHCI_ASYNC_STREAM,
3339 1 << IEEE1394_TAG_GASP, 0,
3340 handler ? fwohci_if_input : NULL, handler);
3341 return 0;
3342 }
3343
3344 static int
3345 fwohci_if_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3346 {
3347 int n, len;
3348 struct mbuf *m;
3349 struct iovec *iov;
3350 void (*handler)(struct device *, struct mbuf *) = arg;
3351
3352 #ifdef FW_DEBUG
3353 int i;
3354 DPRINTFN(1, ("fwohci_if_input: tcode=0x%x, dlen=%d", pkt->fp_tcode,
3355 pkt->fp_dlen));
3356 for (i = 0; i < pkt->fp_hlen/4; i++)
3357 DPRINTFN(2, ("%s%08x", i?" ":"\n ", pkt->fp_hdr[i]));
3358 DPRINTFN(2, ("$"));
3359 for (n = 0, len = pkt->fp_dlen; len > 0; len -= i, n++){
3360 iov = &pkt->fp_iov[n];
3361 for (i = 0; i < iov->iov_len; i++)
3362 DPRINTFN(2, ("%s%02x", (i%32)?((i%4)?"":" "):"\n ",
3363 ((u_int8_t *)iov->iov_base)[i]));
3364 DPRINTFN(2, ("$"));
3365 }
3366 DPRINTFN(1, ("\n"));
3367 #endif /* FW_DEBUG */
3368 len = pkt->fp_dlen;
3369 MGETHDR(m, M_DONTWAIT, MT_DATA);
3370 if (m == NULL)
3371 return IEEE1394_RCODE_COMPLETE;
3372 m->m_len = 16;
3373 if (len + m->m_len > MHLEN) {
3374 MCLGET(m, M_DONTWAIT);
3375 if ((m->m_flags & M_EXT) == 0) {
3376 m_freem(m);
3377 return IEEE1394_RCODE_COMPLETE;
3378 }
3379 }
3380 n = (pkt->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber;
3381 if (sc->sc_uidtbl == NULL || n > sc->sc_rootid ||
3382 sc->sc_uidtbl[n].fu_valid != 0x3) {
3383 printf("%s: packet from unknown node: phy id %d\n",
3384 sc->sc_sc1394.sc1394_dev.dv_xname, n);
3385 m_freem(m);
3386 fwohci_uid_req(sc, n);
3387 return IEEE1394_RCODE_COMPLETE;
3388 }
3389 memcpy(mtod(m, caddr_t), sc->sc_uidtbl[n].fu_uid, 8);
3390 if (pkt->fp_tcode == IEEE1394_TCODE_STREAM_DATA) {
3391 m->m_flags |= M_BCAST;
3392 mtod(m, u_int32_t *)[2] = mtod(m, u_int32_t *)[3] = 0;
3393 } else {
3394 mtod(m, u_int32_t *)[2] = htonl(pkt->fp_hdr[1]);
3395 mtod(m, u_int32_t *)[3] = htonl(pkt->fp_hdr[2]);
3396 }
3397 mtod(m, u_int8_t *)[8] = n; /*XXX: node id for debug */
3398 mtod(m, u_int8_t *)[9] =
3399 (*pkt->fp_trail >> (16 + OHCI_CTXCTL_SPD_BITPOS)) &
3400 ((1 << OHCI_CTXCTL_SPD_BITLEN) - 1);
3401
3402 m->m_pkthdr.rcvif = NULL; /* set in child */
3403 m->m_pkthdr.len = len + m->m_len;
3404 /*
3405 * We may use receive buffer by external mbuf instead of copy here.
3406 * But asynchronous receive buffer must be operate in buffer fill
3407 * mode, so that each receive buffer will shared by multiple mbufs.
3408 * If upper layer doesn't free mbuf soon, e.g. application program
3409 * is suspended, buffer must be reallocated.
3410 * Isochronous buffer must be operate in packet buffer mode, and
3411 * it is easy to map receive buffer to external mbuf. But it is
3412 * used for broadcast/multicast only, and is expected not so
3413 * performance sensitive for now.
3414 * XXX: The performance may be important for multicast case,
3415 * so we should revisit here later.
3416 * -- onoe
3417 */
3418 n = 0;
3419 iov = pkt->fp_uio.uio_iov;
3420 while (len > 0) {
3421 memcpy(mtod(m, caddr_t) + m->m_len, iov->iov_base,
3422 iov->iov_len);
3423 m->m_len += iov->iov_len;
3424 len -= iov->iov_len;
3425 iov++;
3426 }
3427 (*handler)(sc->sc_sc1394.sc1394_if, m);
3428 return IEEE1394_RCODE_COMPLETE;
3429 }
3430
3431 static int
3432 fwohci_if_input_iso(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3433 {
3434 int n, len;
3435 int chan, tag;
3436 struct mbuf *m;
3437 struct iovec *iov;
3438 void (*handler)(struct device *, struct mbuf *) = arg;
3439 #ifdef FW_DEBUG
3440 int i;
3441 #endif
3442
3443 chan = (pkt->fp_hdr[0] & 0x00003f00) >> 8;
3444 tag = (pkt->fp_hdr[0] & 0x0000c000) >> 14;
3445 #ifdef FW_DEBUG
3446 DPRINTFN(1, ("fwohci_if_input_iso: "
3447 "tcode=0x%x, chan=%d, tag=%x, dlen=%d",
3448 pkt->fp_tcode, chan, tag, pkt->fp_dlen));
3449 for (i = 0; i < pkt->fp_hlen/4; i++)
3450 DPRINTFN(2, ("%s%08x", i?" ":"\n\t", pkt->fp_hdr[i]));
3451 DPRINTFN(2, ("$"));
3452 for (n = 0, len = pkt->fp_dlen; len > 0; len -= i, n++){
3453 iov = &pkt->fp_iov[n];
3454 for (i = 0; i < iov->iov_len; i++)
3455 DPRINTFN(2, ("%s%02x",
3456 (i%32)?((i%4)?"":" "):"\n\t",
3457 ((u_int8_t *)iov->iov_base)[i]));
3458 DPRINTFN(2, ("$"));
3459 }
3460 DPRINTFN(2, ("\n"));
3461 #endif /* FW_DEBUG */
3462 len = pkt->fp_dlen;
3463 MGETHDR(m, M_DONTWAIT, MT_DATA);
3464 if (m == NULL)
3465 return IEEE1394_RCODE_COMPLETE;
3466 m->m_len = 16;
3467 if (m->m_len + len > MHLEN) {
3468 MCLGET(m, M_DONTWAIT);
3469 if ((m->m_flags & M_EXT) == 0) {
3470 m_freem(m);
3471 return IEEE1394_RCODE_COMPLETE;
3472 }
3473 }
3474
3475 m->m_flags |= M_BCAST;
3476
3477 if (tag == IEEE1394_TAG_GASP) {
3478 n = (pkt->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber;
3479 if (sc->sc_uidtbl == NULL || n > sc->sc_rootid ||
3480 sc->sc_uidtbl[n].fu_valid != 0x3) {
3481 printf("%s: packet from unknown node: phy id %d\n",
3482 sc->sc_sc1394.sc1394_dev.dv_xname, n);
3483 m_freem(m);
3484 return IEEE1394_RCODE_COMPLETE;
3485 }
3486 memcpy(mtod(m, caddr_t), sc->sc_uidtbl[n].fu_uid, 8);
3487 mtod(m, u_int32_t *)[2] = htonl(pkt->fp_hdr[1]);
3488 mtod(m, u_int32_t *)[3] = htonl(pkt->fp_hdr[2]);
3489 mtod(m, u_int8_t *)[8] = n; /*XXX: node id for debug */
3490 mtod(m, u_int8_t *)[9] =
3491 (*pkt->fp_trail >> (16 + OHCI_CTXCTL_SPD_BITPOS)) &
3492 ((1 << OHCI_CTXCTL_SPD_BITLEN) - 1);
3493 }
3494 mtod(m, u_int8_t *)[14] = chan;
3495 mtod(m, u_int8_t *)[15] = tag;
3496
3497
3498 m->m_pkthdr.rcvif = NULL; /* set in child */
3499 m->m_pkthdr.len = len + m->m_len;
3500 /*
3501 * We may use receive buffer by external mbuf instead of copy here.
3502 * But asynchronous receive buffer must be operate in buffer fill
3503 * mode, so that each receive buffer will shared by multiple mbufs.
3504 * If upper layer doesn't free mbuf soon, e.g. application program
3505 * is suspended, buffer must be reallocated.
3506 * Isochronous buffer must be operate in packet buffer mode, and
3507 * it is easy to map receive buffer to external mbuf. But it is
3508 * used for broadcast/multicast only, and is expected not so
3509 * performance sensitive for now.
3510 * XXX: The performance may be important for multicast case,
3511 * so we should revisit here later.
3512 * -- onoe
3513 */
3514 n = 0;
3515 iov = pkt->fp_uio.uio_iov;
3516 while (len > 0) {
3517 memcpy(mtod(m, caddr_t) + m->m_len, iov->iov_base,
3518 iov->iov_len);
3519 m->m_len += iov->iov_len;
3520 len -= iov->iov_len;
3521 iov++;
3522 }
3523 (*handler)(sc->sc_sc1394.sc1394_if, m);
3524 return IEEE1394_RCODE_COMPLETE;
3525 }
3526
3527
3528
3529 static int
3530 fwohci_if_output(struct device *self, struct mbuf *m0,
3531 void (*callback)(struct device *, struct mbuf *))
3532 {
3533 struct fwohci_softc *sc = (struct fwohci_softc *)self;
3534 struct fwohci_pkt pkt;
3535 u_int8_t *p;
3536 int n = 0, error, spd, hdrlen, maxrec; /* XXX: gcc */
3537 #ifdef FW_DEBUG
3538 struct mbuf *m;
3539 #endif
3540
3541 p = mtod(m0, u_int8_t *);
3542 if (m0->m_flags & (M_BCAST | M_MCAST)) {
3543 spd = IEEE1394_SPD_S100; /*XXX*/
3544 maxrec = 512; /*XXX*/
3545 hdrlen = 8;
3546 } else {
3547 n = fwohci_uid_lookup(sc, p);
3548 if (n < 0) {
3549 printf("%s: nodeid unknown:"
3550 " %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
3551 sc->sc_sc1394.sc1394_dev.dv_xname,
3552 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]);
3553 error = EHOSTUNREACH;
3554 goto end;
3555 }
3556 if (n == IEEE1394_BCAST_PHY_ID) {
3557 printf("%s: broadcast with !M_MCAST\n",
3558 sc->sc_sc1394.sc1394_dev.dv_xname);
3559 #ifdef FW_DEBUG
3560 DPRINTFN(2, ("packet:"));
3561 for (m = m0; m != NULL; m = m->m_next) {
3562 for (n = 0; n < m->m_len; n++)
3563 DPRINTFN(2, ("%s%02x", (n%32)?
3564 ((n%4)?"":" "):"\n ",
3565 mtod(m, u_int8_t *)[n]));
3566 DPRINTFN(2, ("$"));
3567 }
3568 DPRINTFN(2, ("\n"));
3569 #endif
3570 error = EHOSTUNREACH;
3571 goto end;
3572 }
3573 maxrec = 2 << p[8];
3574 spd = p[9];
3575 hdrlen = 0;
3576 }
3577 if (spd > sc->sc_sc1394.sc1394_link_speed) {
3578 DPRINTF(("fwohci_if_output: spd (%d) is faster than %d\n",
3579 spd, sc->sc_sc1394.sc1394_link_speed));
3580 spd = sc->sc_sc1394.sc1394_link_speed;
3581 }
3582 if (maxrec > (512 << spd)) {
3583 DPRINTF(("fwohci_if_output: maxrec (%d) is larger for spd (%d)"
3584 "\n", maxrec, spd));
3585 maxrec = 512 << spd;
3586 }
3587 while (maxrec > sc->sc_sc1394.sc1394_max_receive) {
3588 DPRINTF(("fwohci_if_output: maxrec (%d) is larger than"
3589 " %d\n", maxrec, sc->sc_sc1394.sc1394_max_receive));
3590 maxrec >>= 1;
3591 }
3592 if (maxrec < 512) {
3593 DPRINTF(("fwohci_if_output: maxrec (%d) is smaller than "
3594 "minimum\n", maxrec));
3595 maxrec = 512;
3596 }
3597
3598 m_adj(m0, 16 - hdrlen);
3599 if (m0->m_pkthdr.len > maxrec) {
3600 DPRINTF(("fwohci_if_output: packet too big: hdr %d, pktlen "
3601 "%d, maxrec %d\n", hdrlen, m0->m_pkthdr.len, maxrec));
3602 error = E2BIG; /*XXX*/
3603 goto end;
3604 }
3605
3606 memset(&pkt, 0, sizeof(pkt));
3607 pkt.fp_uio.uio_iov = pkt.fp_iov;
3608 pkt.fp_uio.uio_segflg = UIO_SYSSPACE;
3609 pkt.fp_uio.uio_rw = UIO_WRITE;
3610 if (m0->m_flags & (M_BCAST | M_MCAST)) {
3611 /* construct GASP header */
3612 p = mtod(m0, u_int8_t *);
3613 p[0] = sc->sc_nodeid >> 8;
3614 p[1] = sc->sc_nodeid & 0xff;
3615 p[2] = 0x00; p[3] = 0x00; p[4] = 0x5e;
3616 p[5] = 0x00; p[6] = 0x00; p[7] = 0x01;
3617 pkt.fp_tcode = IEEE1394_TCODE_STREAM_DATA;
3618 pkt.fp_hlen = 8;
3619 pkt.fp_hdr[0] = (spd << 16) | (IEEE1394_TAG_GASP << 14) |
3620 ((sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] &
3621 OHCI_NodeId_NodeNumber) << 8);
3622 pkt.fp_hdr[1] = m0->m_pkthdr.len << 16;
3623 } else {
3624 pkt.fp_tcode = IEEE1394_TCODE_WRITE_REQ_BLOCK;
3625 pkt.fp_hlen = 16;
3626 pkt.fp_hdr[0] = 0x00800100 | (sc->sc_tlabel << 10) |
3627 (spd << 16);
3628 pkt.fp_hdr[1] =
3629 (((sc->sc_nodeid & OHCI_NodeId_BusNumber) | n) << 16) |
3630 (p[10] << 8) | p[11];
3631 pkt.fp_hdr[2] = (p[12]<<24) | (p[13]<<16) | (p[14]<<8) | p[15];
3632 pkt.fp_hdr[3] = m0->m_pkthdr.len << 16;
3633 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3634 }
3635 pkt.fp_hdr[0] |= (pkt.fp_tcode << 4);
3636 pkt.fp_dlen = m0->m_pkthdr.len;
3637 pkt.fp_m = m0;
3638 pkt.fp_callback = callback;
3639 error = fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt);
3640 m0 = pkt.fp_m;
3641 end:
3642 if (m0 != NULL) {
3643 if (callback)
3644 (*callback)(sc->sc_sc1394.sc1394_if, m0);
3645 else
3646 m_freem(m0);
3647 }
3648 return error;
3649 }
3650
3651 /*
3652 * High level routines to provide abstraction to attaching layers to
3653 * send/receive data.
3654 */
3655
3656 /*
3657 * These break down into 4 routines as follows:
3658 *
3659 * int fwohci_read(struct ieee1394_abuf *)
3660 *
3661 * This routine will attempt to read a region from the requested node.
3662 * A callback must be provided which will be called when either the completed
3663 * read is done or an unrecoverable error occurs. This is mainly a convenience
3664 * routine since it will encapsulate retrying a region as quadlet vs. block
3665 * reads and recombining all the returned data. This could also be done with a
3666 * series of write/inreg's for each packet sent.
3667 *
3668 * int fwohci_write(struct ieee1394_abuf *)
3669 *
3670 * The work horse main entry point for putting packets on the bus. This is the
3671 * generalized interface for fwnode/etc code to put packets out onto the bus.
3672 * It accepts all standard ieee1394 tcodes (XXX: only a few today) and
3673 * optionally will callback via a func pointer to the calling code with the
3674 * resulting ACK code from the packet. If the ACK code is to be ignored (i.e.
3675 * no cb) then the write routine will take care of free'ing the abuf since the
3676 * fwnode/etc code won't have any knowledge of when to do this. This allows for
3677 * simple one-off packets to be sent from the upper-level code without worrying
3678 * about a callback for cleanup.
3679 *
3680 * int fwohci_inreg(struct ieee1394_abuf *, int)
3681 *
3682 * This is very simple. It evals the abuf passed in and registers an internal
3683 * handler as the callback for packets received for that operation.
3684 * The integer argument specifies whether on a block read/write operation to
3685 * allow sub-regions to be read/written (in block form) as well.
3686 *
3687 * XXX: This whole structure needs to be redone as a list of regions and
3688 * operations allowed on those regions.
3689 *
3690 * int fwohci_unreg(struct ieee1394_abuf *, int)
3691 *
3692 * This simply unregisters the respective callback done via inreg for items
3693 * which only need to register an area for a one-time operation (like a status
3694 * buffer a remote node will write to when the current operation is done). The
3695 * int argument specifies the same behavior as inreg, except in reverse (i.e.
3696 * it unregisters).
3697 */
3698
3699 static int
3700 fwohci_read(struct ieee1394_abuf *ab)
3701 {
3702 struct fwohci_pkt pkt;
3703 struct ieee1394_softc *sc = ab->ab_req;
3704 struct fwohci_softc *psc =
3705 (struct fwohci_softc *)sc->sc1394_dev.dv_parent;
3706 struct fwohci_cb *fcb;
3707 u_int32_t high, lo;
3708 int rv, tcode;
3709
3710 /* Have to have a callback when reading. */
3711 if (ab->ab_cb == NULL)
3712 return -1;
3713
3714 fcb = malloc(sizeof(struct fwohci_cb), M_DEVBUF, M_WAITOK);
3715 fcb->ab = ab;
3716 fcb->count = 0;
3717 fcb->abuf_valid = 1;
3718
3719 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
3720 lo = (ab->ab_addr & 0x00000000ffffffffULL);
3721
3722 memset(&pkt, 0, sizeof(pkt));
3723 pkt.fp_hdr[1] = ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
3724 pkt.fp_hdr[2] = lo;
3725 pkt.fp_dlen = 0;
3726
3727 if (ab->ab_length == 4) {
3728 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
3729 tcode = IEEE1394_TCODE_READ_RESP_QUAD;
3730 pkt.fp_hlen = 12;
3731 } else {
3732 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_BLOCK;
3733 pkt.fp_hlen = 16;
3734 tcode = IEEE1394_TCODE_READ_RESP_BLOCK;
3735 pkt.fp_hdr[3] = (ab->ab_length << 16);
3736 }
3737 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) |
3738 (psc->sc_tlabel << 10) | (pkt.fp_tcode << 4);
3739
3740 pkt.fp_statusarg = fcb;
3741 pkt.fp_statuscb = fwohci_read_resp;
3742
3743 rv = fwohci_handler_set(psc, tcode, ab->ab_req->sc1394_node_id,
3744 psc->sc_tlabel, 0, fwohci_read_resp, fcb);
3745 if (rv)
3746 return rv;
3747 rv = fwohci_at_output(psc, psc->sc_ctx_atrq, &pkt);
3748 if (rv)
3749 fwohci_handler_set(psc, tcode, ab->ab_req->sc1394_node_id,
3750 psc->sc_tlabel, 0, NULL, NULL);
3751 psc->sc_tlabel = (psc->sc_tlabel + 1) & 0x3f;
3752 fcb->count = 1;
3753 return rv;
3754 }
3755
3756 static int
3757 fwohci_write(struct ieee1394_abuf *ab)
3758 {
3759 struct fwohci_pkt pkt;
3760 struct ieee1394_softc *sc = ab->ab_req;
3761 struct fwohci_softc *psc =
3762 (struct fwohci_softc *)sc->sc1394_dev.dv_parent;
3763 u_int32_t high, lo;
3764 int rv;
3765
3766 if (ab->ab_tcode == IEEE1394_TCODE_WRITE_REQ_BLOCK) {
3767 if (ab->ab_length > IEEE1394_MAX_REC(sc->sc1394_max_receive)) {
3768 DPRINTF(("Packet too large: %d\n", ab->ab_length));
3769 return E2BIG;
3770 }
3771 }
3772
3773 if (ab->ab_length >
3774 IEEE1394_MAX_ASYNCH_FOR_SPEED(sc->sc1394_link_speed)) {
3775 DPRINTF(("Packet too large: %d\n", ab->ab_length));
3776 return E2BIG;
3777 }
3778
3779 if (ab->ab_data && ab->ab_uio)
3780 panic("Can't call with uio and data set");
3781 if ((ab->ab_data == NULL) && (ab->ab_uio == NULL))
3782 panic("One of either ab_data or ab_uio must be set");
3783
3784 memset(&pkt, 0, sizeof(pkt));
3785
3786 pkt.fp_tcode = ab->ab_tcode;
3787 if (ab->ab_data) {
3788 pkt.fp_uio.uio_iov = pkt.fp_iov;
3789 pkt.fp_uio.uio_segflg = UIO_SYSSPACE;
3790 pkt.fp_uio.uio_rw = UIO_WRITE;
3791 } else
3792 memcpy(&pkt.fp_uio, ab->ab_uio, sizeof(struct uio));
3793
3794 pkt.fp_statusarg = ab;
3795 pkt.fp_statuscb = fwohci_write_ack;
3796
3797 switch (ab->ab_tcode) {
3798 case IEEE1394_TCODE_WRITE_RESP:
3799 pkt.fp_hlen = 12;
3800 case IEEE1394_TCODE_READ_RESP_QUAD:
3801 case IEEE1394_TCODE_READ_RESP_BLOCK:
3802 if (!pkt.fp_hlen)
3803 pkt.fp_hlen = 16;
3804 high = ab->ab_retlen;
3805 ab->ab_retlen = 0;
3806 lo = 0;
3807 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) |
3808 (ab->ab_tlabel << 10) | (pkt.fp_tcode << 4);
3809 break;
3810 default:
3811 pkt.fp_hlen = 16;
3812 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
3813 lo = (ab->ab_addr & 0x00000000ffffffffULL);
3814 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) |
3815 (psc->sc_tlabel << 10) | (pkt.fp_tcode << 4);
3816 psc->sc_tlabel = (psc->sc_tlabel + 1) & 0x3f;
3817 break;
3818 }
3819
3820 pkt.fp_hdr[1] = ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
3821 pkt.fp_hdr[2] = lo;
3822 if (pkt.fp_hlen == 16) {
3823 if (ab->ab_length == 4) {
3824 pkt.fp_hdr[3] = ab->ab_data[0];
3825 pkt.fp_dlen = 0;
3826 } else {
3827 pkt.fp_hdr[3] = (ab->ab_length << 16);
3828 pkt.fp_dlen = ab->ab_length;
3829 if (ab->ab_data) {
3830 pkt.fp_uio.uio_iovcnt = 1;
3831 pkt.fp_uio.uio_resid = ab->ab_length;
3832 pkt.fp_iov[0].iov_base = ab->ab_data;
3833 pkt.fp_iov[0].iov_len = ab->ab_length;
3834 }
3835 }
3836 }
3837 switch (ab->ab_tcode) {
3838 case IEEE1394_TCODE_WRITE_RESP:
3839 case IEEE1394_TCODE_READ_RESP_QUAD:
3840 case IEEE1394_TCODE_READ_RESP_BLOCK:
3841 rv = fwohci_at_output(psc, psc->sc_ctx_atrs, &pkt);
3842 break;
3843 default:
3844 rv = fwohci_at_output(psc, psc->sc_ctx_atrq, &pkt);
3845 break;
3846 }
3847 return rv;
3848 }
3849
3850 static int
3851 fwohci_read_resp(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3852 {
3853 struct fwohci_cb *fcb = arg;
3854 struct ieee1394_abuf *ab = fcb->ab;
3855 struct fwohci_pkt newpkt;
3856 u_int32_t *cur, high, lo;
3857 int i, tcode, rcode, status, rv;
3858
3859 /*
3860 * Both the ACK handling and normal response callbacks are handled here.
3861 * The main reason for this is the various error conditions that can
3862 * occur trying to block read some areas and the ways that gets reported
3863 * back to calling station. This is a variety of ACK codes, responses,
3864 * etc which makes it much more difficult to process if both aren't
3865 * handled here.
3866 */
3867
3868 /* Check for status packet. */
3869
3870 if (pkt->fp_tcode == -1) {
3871 status = pkt->fp_status & OHCI_DESC_STATUS_ACK_MASK;
3872 rcode = -1;
3873 tcode = (pkt->fp_hdr[0] >> 4) & 0xf;
3874 if ((status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) &&
3875 (status != OHCI_CTXCTL_EVENT_ACK_PENDING))
3876 DPRINTFN(2, ("Got status packet: 0x%02x\n",
3877 (unsigned int)status));
3878 fcb->count--;
3879
3880 /*
3881 * Got all the ack's back and the buffer is invalid (i.e. the
3882 * callback has been called. Clean up.
3883 */
3884
3885 if (fcb->abuf_valid == 0) {
3886 if (fcb->count == 0)
3887 free(fcb, M_DEVBUF);
3888 return IEEE1394_RCODE_COMPLETE;
3889 }
3890 } else {
3891 status = -1;
3892 tcode = pkt->fp_tcode;
3893 rcode = (pkt->fp_hdr[1] & 0x0000f000) >> 12;
3894 }
3895
3896 /*
3897 * Some area's (like the config rom want to be read as quadlets only.
3898 *
3899 * The current ideas to try are:
3900 *
3901 * Got an ACK_TYPE_ERROR on a block read.
3902 *
3903 * Got either RCODE_TYPE or RCODE_ADDRESS errors in a block read
3904 * response.
3905 *
3906 * In all cases construct a new packet for a quadlet read and let
3907 * mutli_resp handle the iteration over the space.
3908 */
3909
3910 if (((status == OHCI_CTXCTL_EVENT_ACK_TYPE_ERROR) &&
3911 (tcode == IEEE1394_TCODE_READ_REQ_BLOCK)) ||
3912 (((rcode == IEEE1394_RCODE_TYPE_ERROR) ||
3913 (rcode == IEEE1394_RCODE_ADDRESS_ERROR)) &&
3914 (tcode == IEEE1394_TCODE_READ_RESP_BLOCK))) {
3915
3916 /* Read the area in quadlet chunks (internally track this). */
3917
3918 memset(&newpkt, 0, sizeof(newpkt));
3919
3920 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
3921 lo = (ab->ab_addr & 0x00000000ffffffffULL);
3922
3923 newpkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
3924 newpkt.fp_hlen = 12;
3925 newpkt.fp_dlen = 0;
3926 newpkt.fp_hdr[1] =
3927 ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
3928 newpkt.fp_hdr[2] = lo;
3929 newpkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
3930 (newpkt.fp_tcode << 4);
3931
3932 rv = fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD,
3933 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0,
3934 fwohci_read_multi_resp, fcb);
3935 if (rv) {
3936 (*ab->ab_cb)(ab, -1);
3937 goto cleanup;
3938 }
3939 newpkt.fp_statusarg = fcb;
3940 newpkt.fp_statuscb = fwohci_read_resp;
3941 rv = fwohci_at_output(sc, sc->sc_ctx_atrq, &newpkt);
3942 if (rv) {
3943 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD,
3944 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0, NULL,
3945 NULL);
3946 (*ab->ab_cb)(ab, -1);
3947 goto cleanup;
3948 }
3949 fcb->count++;
3950 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3951 return IEEE1394_RCODE_COMPLETE;
3952 } else if ((rcode != -1) || ((status != -1) &&
3953 (status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) &&
3954 (status != OHCI_CTXCTL_EVENT_ACK_PENDING))) {
3955
3956 /*
3957 * Recombine all the iov data into 1 chunk for higher
3958 * level code.
3959 */
3960
3961 if (rcode != -1) {
3962 cur = ab->ab_data;
3963 for (i = 0; i < pkt->fp_uio.uio_iovcnt; i++) {
3964 /*
3965 * Make sure and don't exceed the buffer
3966 * allocated for return.
3967 */
3968 if ((ab->ab_retlen + pkt->fp_iov[i].iov_len) >
3969 ab->ab_length) {
3970 memcpy(cur, pkt->fp_iov[i].iov_base,
3971 (ab->ab_length - ab->ab_retlen));
3972 ab->ab_retlen = ab->ab_length;
3973 break;
3974 }
3975 memcpy(cur, pkt->fp_iov[i].iov_base,
3976 pkt->fp_iov[i].iov_len);
3977 cur += pkt->fp_iov[i].iov_len;
3978 ab->ab_retlen += pkt->fp_iov[i].iov_len;
3979 }
3980 }
3981 if (status != -1)
3982 /* XXX: Need a complete tlabel interface. */
3983 for (i = 0; i < 64; i++)
3984 fwohci_handler_set(sc,
3985 IEEE1394_TCODE_READ_RESP_QUAD,
3986 ab->ab_req->sc1394_node_id, i, 0, NULL,
3987 NULL);
3988 (*ab->ab_cb)(ab, rcode);
3989 goto cleanup;
3990 } else
3991 /* Good ack packet. */
3992 return IEEE1394_RCODE_COMPLETE;
3993
3994 /* Can't get here unless ab->ab_cb has been called. */
3995
3996 cleanup:
3997 fcb->abuf_valid = 0;
3998 if (fcb->count == 0)
3999 free(fcb, M_DEVBUF);
4000 return IEEE1394_RCODE_COMPLETE;
4001 }
4002
4003 static int
4004 fwohci_read_multi_resp(struct fwohci_softc *sc, void *arg,
4005 struct fwohci_pkt *pkt)
4006 {
4007 struct fwohci_cb *fcb = arg;
4008 struct ieee1394_abuf *ab = fcb->ab;
4009 struct fwohci_pkt newpkt;
4010 u_int32_t high, lo;
4011 int rcode, rv;
4012
4013 /*
4014 * Bad return codes from the wire, just return what's already in the
4015 * buf.
4016 */
4017
4018 /* Make sure a response packet didn't arrive after a bad ACK. */
4019 if (fcb->abuf_valid == 0)
4020 return IEEE1394_RCODE_COMPLETE;
4021
4022 rcode = (pkt->fp_hdr[1] & 0x0000f000) >> 12;
4023
4024 if (rcode) {
4025 (*ab->ab_cb)(ab, rcode);
4026 goto cleanup;
4027 }
4028
4029 if ((ab->ab_retlen + pkt->fp_iov[0].iov_len) > ab->ab_length) {
4030 memcpy(((char *)ab->ab_data + ab->ab_retlen),
4031 pkt->fp_iov[0].iov_base, (ab->ab_length - ab->ab_retlen));
4032 ab->ab_retlen = ab->ab_length;
4033 } else {
4034 memcpy(((char *)ab->ab_data + ab->ab_retlen),
4035 pkt->fp_iov[0].iov_base, 4);
4036 ab->ab_retlen += 4;
4037 }
4038 /* Still more, loop and read 4 more bytes. */
4039 if (ab->ab_retlen < ab->ab_length) {
4040 memset(&newpkt, 0, sizeof(newpkt));
4041
4042 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
4043 lo = (ab->ab_addr & 0x00000000ffffffffULL) + ab->ab_retlen;
4044
4045 newpkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
4046 newpkt.fp_hlen = 12;
4047 newpkt.fp_dlen = 0;
4048 newpkt.fp_hdr[1] =
4049 ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
4050 newpkt.fp_hdr[2] = lo;
4051 newpkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
4052 (newpkt.fp_tcode << 4);
4053
4054 newpkt.fp_statusarg = fcb;
4055 newpkt.fp_statuscb = fwohci_read_resp;
4056
4057 /*
4058 * Bad return code. Just give up and return what's
4059 * come in now.
4060 */
4061 rv = fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD,
4062 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0,
4063 fwohci_read_multi_resp, fcb);
4064 if (rv)
4065 (*ab->ab_cb)(ab, -1);
4066 else {
4067 rv = fwohci_at_output(sc, sc->sc_ctx_atrq, &newpkt);
4068 if (rv) {
4069 fwohci_handler_set(sc,
4070 IEEE1394_TCODE_READ_RESP_QUAD,
4071 ab->ab_req->sc1394_node_id, sc->sc_tlabel,
4072 0, NULL, NULL);
4073 (*ab->ab_cb)(ab, -1);
4074 } else {
4075 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
4076 fcb->count++;
4077 return IEEE1394_RCODE_COMPLETE;
4078 }
4079 }
4080 } else
4081 (*ab->ab_cb)(ab, IEEE1394_RCODE_COMPLETE);
4082
4083 cleanup:
4084 /* Can't get here unless ab_cb has been called. */
4085 fcb->abuf_valid = 0;
4086 if (fcb->count == 0)
4087 free(fcb, M_DEVBUF);
4088 return IEEE1394_RCODE_COMPLETE;
4089 }
4090
4091 static int
4092 fwohci_write_ack(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
4093 {
4094 struct ieee1394_abuf *ab = arg;
4095 u_int16_t status;
4096
4097
4098 status = pkt->fp_status & OHCI_DESC_STATUS_ACK_MASK;
4099 if ((status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) &&
4100 (status != OHCI_CTXCTL_EVENT_ACK_PENDING))
4101 DPRINTF(("Got status packet: 0x%02x\n",
4102 (unsigned int)status));
4103
4104 /* No callback means this level should free the buffers. */
4105 if (ab->ab_cb)
4106 (*ab->ab_cb)(ab, status);
4107 else {
4108 if (ab->ab_data)
4109 free(ab->ab_data, M_1394DATA);
4110 free(ab, M_1394DATA);
4111 }
4112 return IEEE1394_RCODE_COMPLETE;
4113 }
4114
4115 static int
4116 fwohci_inreg(struct ieee1394_abuf *ab, int allow)
4117 {
4118 struct ieee1394_softc *sc = ab->ab_req;
4119 struct fwohci_softc *psc =
4120 (struct fwohci_softc *)sc->sc1394_dev.dv_parent;
4121 u_int32_t high, lo;
4122 int rv;
4123
4124 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
4125 lo = (ab->ab_addr & 0x00000000ffffffffULL);
4126
4127 rv = 0;
4128 switch (ab->ab_tcode) {
4129 case IEEE1394_TCODE_READ_REQ_QUAD:
4130 case IEEE1394_TCODE_WRITE_REQ_QUAD:
4131 if (ab->ab_cb)
4132 rv = fwohci_handler_set(psc, ab->ab_tcode, high, lo, 0,
4133 fwohci_parse_input, ab);
4134 else
4135 fwohci_handler_set(psc, ab->ab_tcode, high, lo, 0, NULL,
4136 NULL);
4137 break;
4138 case IEEE1394_TCODE_READ_REQ_BLOCK:
4139 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
4140 if (allow) {
4141 if (ab->ab_cb) {
4142 rv = fwohci_handler_set(psc, ab->ab_tcode,
4143 high, lo, ab->ab_length,
4144 fwohci_parse_input, ab);
4145 if (rv)
4146 fwohci_handler_set(psc, ab->ab_tcode,
4147 high, lo, ab->ab_length, NULL,
4148 NULL);
4149 ab->ab_subok = 1;
4150 } else
4151 fwohci_handler_set(psc, ab->ab_tcode, high, lo,
4152 ab->ab_length, NULL, NULL);
4153 } else {
4154 if (ab->ab_cb)
4155 rv = fwohci_handler_set(psc, ab->ab_tcode, high,
4156 lo, 0, fwohci_parse_input, ab);
4157 else
4158 fwohci_handler_set(psc, ab->ab_tcode, high, lo,
4159 0, NULL, NULL);
4160 }
4161 break;
4162 default:
4163 DPRINTF(("Invalid registration tcode: %d\n", ab->ab_tcode));
4164 return -1;
4165 break;
4166 }
4167 return rv;
4168 }
4169
4170 static int
4171 fwohci_unreg(struct ieee1394_abuf *ab, int allow)
4172 {
4173 void *save;
4174 int rv;
4175
4176 save = ab->ab_cb;
4177 ab->ab_cb = NULL;
4178 rv = fwohci_inreg(ab, allow);
4179 ab->ab_cb = save;
4180 return rv;
4181 }
4182
4183 static int
4184 fwohci_parse_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
4185 {
4186 struct ieee1394_abuf *ab = (struct ieee1394_abuf *)arg;
4187 u_int64_t addr;
4188 u_int8_t *cur;
4189 int i, count, ret;
4190
4191 ab->ab_tcode = (pkt->fp_hdr[0] >> 4) & 0xf;
4192 ab->ab_tlabel = (pkt->fp_hdr[0] >> 10) & 0x3f;
4193 addr = (((u_int64_t)(pkt->fp_hdr[1] & 0xffff) << 32) | pkt->fp_hdr[2]);
4194
4195 /* Make sure it's always 0 in case this gets reused multiple times. */
4196 ab->ab_retlen = 0;
4197
4198 switch (ab->ab_tcode) {
4199 case IEEE1394_TCODE_READ_REQ_QUAD:
4200 ab->ab_retlen = 4;
4201 /* Response's (if required) will come from callback code */
4202 ret = -1;
4203 break;
4204 case IEEE1394_TCODE_READ_REQ_BLOCK:
4205 ab->ab_retlen = (pkt->fp_hdr[3] >> 16) & 0xffff;
4206 if (ab->ab_subok) {
4207 if ((addr + ab->ab_retlen) >
4208 (ab->ab_addr + ab->ab_length))
4209 return IEEE1394_RCODE_ADDRESS_ERROR;
4210 } else
4211 if (ab->ab_retlen != ab->ab_length)
4212 return IEEE1394_RCODE_ADDRESS_ERROR;
4213 /* Response's (if required) will come from callback code */
4214 ret = -1;
4215 break;
4216 case IEEE1394_TCODE_WRITE_REQ_QUAD:
4217 ab->ab_retlen = 4;
4218 /* Fall through. */
4219
4220 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
4221 if (!ab->ab_retlen)
4222 ab->ab_retlen = (pkt->fp_hdr[3] >> 16) & 0xffff;
4223 if (ab->ab_subok) {
4224 if ((addr + ab->ab_retlen) >
4225 (ab->ab_addr + ab->ab_length))
4226 return IEEE1394_RCODE_ADDRESS_ERROR;
4227 } else
4228 if (ab->ab_retlen > ab->ab_length)
4229 return IEEE1394_RCODE_ADDRESS_ERROR;
4230
4231 if (ab->ab_tcode == IEEE1394_TCODE_WRITE_REQ_QUAD)
4232 ab->ab_data[0] = pkt->fp_hdr[3];
4233 else {
4234 count = 0;
4235 cur = (u_int8_t *)ab->ab_data + (addr - ab->ab_addr);
4236 for (i = 0; i < pkt->fp_uio.uio_iovcnt; i++) {
4237 memcpy(cur, pkt->fp_iov[i].iov_base,
4238 pkt->fp_iov[i].iov_len);
4239 cur += pkt->fp_iov[i].iov_len;
4240 count += pkt->fp_iov[i].iov_len;
4241 }
4242 if (ab->ab_retlen != count)
4243 panic("Packet claims %d length "
4244 "but only %d bytes returned\n",
4245 ab->ab_retlen, count);
4246 }
4247 ret = IEEE1394_RCODE_COMPLETE;
4248 break;
4249 default:
4250 panic("Got a callback for a tcode that wasn't requested: %d",
4251 ab->ab_tcode);
4252 break;
4253 }
4254 if (ab->ab_cb) {
4255 ab->ab_retaddr = addr;
4256 ab->ab_cb(ab, IEEE1394_RCODE_COMPLETE);
4257 }
4258 return ret;
4259 }
4260
4261 static int
4262 fwohci_submatch(struct device *parent, struct cfdata *cf,
4263 const locdesc_t *ldesc, void *aux)
4264 {
4265 struct ieee1394_attach_args *fwa = aux;
4266
4267 /* Both halves must be filled in for a match. */
4268 if ((cf->fwbuscf_idhi == FWBUS_UNK_IDHI &&
4269 cf->fwbuscf_idlo == FWBUS_UNK_IDLO) ||
4270 (cf->fwbuscf_idhi == ntohl(*((u_int32_t *)&fwa->uid[0])) &&
4271 cf->fwbuscf_idlo == ntohl(*((u_int32_t *)&fwa->uid[4]))))
4272 return (config_match(parent, cf, aux));
4273 return 0;
4274 }
4275
4276 int
4277 fwohci_detach(struct fwohci_softc *sc, int flags)
4278 {
4279 int rv = 0;
4280
4281 if (sc->sc_sc1394.sc1394_if != NULL)
4282 rv = config_detach(sc->sc_sc1394.sc1394_if, flags);
4283 if (rv != 0)
4284 return (rv);
4285
4286 callout_stop(&sc->sc_selfid_callout);
4287
4288 if (sc->sc_powerhook != NULL)
4289 powerhook_disestablish(sc->sc_powerhook);
4290 if (sc->sc_shutdownhook != NULL)
4291 shutdownhook_disestablish(sc->sc_shutdownhook);
4292
4293 return (rv);
4294 }
4295
4296 int
4297 fwohci_activate(struct device *self, enum devact act)
4298 {
4299 struct fwohci_softc *sc = (struct fwohci_softc *)self;
4300 int s, rv = 0;
4301
4302 s = splhigh();
4303 switch (act) {
4304 case DVACT_ACTIVATE:
4305 rv = EOPNOTSUPP;
4306 break;
4307
4308 case DVACT_DEACTIVATE:
4309 if (sc->sc_sc1394.sc1394_if != NULL)
4310 rv = config_deactivate(sc->sc_sc1394.sc1394_if);
4311 break;
4312 }
4313 splx(s);
4314
4315 return (rv);
4316 }
4317
4318 #ifdef FW_DEBUG
4319 static void
4320 fwohci_show_intr(struct fwohci_softc *sc, u_int32_t intmask)
4321 {
4322
4323 printf("%s: intmask=0x%08x:", sc->sc_sc1394.sc1394_dev.dv_xname,
4324 intmask);
4325 if (intmask & OHCI_Int_CycleTooLong)
4326 printf(" CycleTooLong");
4327 if (intmask & OHCI_Int_UnrecoverableError)
4328 printf(" UnrecoverableError");
4329 if (intmask & OHCI_Int_CycleInconsistent)
4330 printf(" CycleInconsistent");
4331 if (intmask & OHCI_Int_BusReset)
4332 printf(" BusReset");
4333 if (intmask & OHCI_Int_SelfIDComplete)
4334 printf(" SelfIDComplete");
4335 if (intmask & OHCI_Int_LockRespErr)
4336 printf(" LockRespErr");
4337 if (intmask & OHCI_Int_PostedWriteErr)
4338 printf(" PostedWriteErr");
4339 if (intmask & OHCI_Int_ReqTxComplete)
4340 printf(" ReqTxComplete(0x%04x)",
4341 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_REQUEST,
4342 OHCI_SUBREG_ContextControlClear));
4343 if (intmask & OHCI_Int_RespTxComplete)
4344 printf(" RespTxComplete(0x%04x)",
4345 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_RESPONSE,
4346 OHCI_SUBREG_ContextControlClear));
4347 if (intmask & OHCI_Int_ARRS)
4348 printf(" ARRS(0x%04x)",
4349 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
4350 OHCI_SUBREG_ContextControlClear));
4351 if (intmask & OHCI_Int_ARRQ)
4352 printf(" ARRQ(0x%04x)",
4353 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_REQUEST,
4354 OHCI_SUBREG_ContextControlClear));
4355 if (intmask & OHCI_Int_IsochRx)
4356 printf(" IsochRx(0x%08x)",
4357 OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntEventClear));
4358 if (intmask & OHCI_Int_IsochTx)
4359 printf(" IsochTx(0x%08x)",
4360 OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntEventClear));
4361 if (intmask & OHCI_Int_RQPkt)
4362 printf(" RQPkt(0x%04x)",
4363 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_REQUEST,
4364 OHCI_SUBREG_ContextControlClear));
4365 if (intmask & OHCI_Int_RSPkt)
4366 printf(" RSPkt(0x%04x)",
4367 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
4368 OHCI_SUBREG_ContextControlClear));
4369 printf("\n");
4370 }
4371
4372 static void
4373 fwohci_show_phypkt(struct fwohci_softc *sc, u_int32_t val)
4374 {
4375 u_int8_t key, phyid;
4376
4377 key = (val & 0xc0000000) >> 30;
4378 phyid = (val & 0x3f000000) >> 24;
4379 printf("%s: PHY packet from %d: ",
4380 sc->sc_sc1394.sc1394_dev.dv_xname, phyid);
4381 switch (key) {
4382 case 0:
4383 printf("PHY Config:");
4384 if (val & 0x00800000)
4385 printf(" ForceRoot");
4386 if (val & 0x00400000)
4387 printf(" Gap=%x", (val & 0x003f0000) >> 16);
4388 printf("\n");
4389 break;
4390 case 1:
4391 printf("Link-on\n");
4392 break;
4393 case 2:
4394 printf("SelfID:");
4395 if (val & 0x00800000) {
4396 printf(" #%d", (val & 0x00700000) >> 20);
4397 } else {
4398 if (val & 0x00400000)
4399 printf(" LinkActive");
4400 printf(" Gap=%x", (val & 0x003f0000) >> 16);
4401 printf(" Spd=S%d", 100 << ((val & 0x0000c000) >> 14));
4402 if (val & 0x00000800)
4403 printf(" Cont");
4404 if (val & 0x00000002)
4405 printf(" InitiateBusReset");
4406 }
4407 if (val & 0x00000001)
4408 printf(" +");
4409 printf("\n");
4410 break;
4411 default:
4412 printf("unknown: 0x%08x\n", val);
4413 break;
4414 }
4415 }
4416 #endif /* FW_DEBUG */
4417
4418 #if 0
4419 void fwohci_dumpreg(struct ieee1394_softc *, struct fwiso_regdump *);
4420
4421 void
4422 fwohci_dumpreg(struct ieee1394_softc *isc, struct fwiso_regdump *fr)
4423 {
4424 struct fwohci_softc *sc = (struct fwohci_softc *)isc;
4425 #if 0
4426 u_int32_t val;
4427
4428 printf("%s: dump reg\n", isc->sc1394_dev.dv_xname);
4429 printf("\tNodeID reg 0x%08x\n",
4430 OHCI_CSR_READ(sc, OHCI_REG_NodeId));
4431 val = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
4432 printf("\tIsoCounter 0x%08x, %d %d %d", val,
4433 (val >> 25) & 0xfe, (val >> 12) & 0x1fff, val & 0xfff);
4434 val = OHCI_CSR_READ(sc, OHCI_REG_IntMaskSet);
4435 printf(" IntMask 0x%08x, %s\n", val,
4436 val & OHCI_Int_IsochTx ? "isoTx" : "");
4437
4438 val = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_ContextControlSet);
4439 printf("\tIT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
4440 OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_CommandPtr),
4441 val,
4442 val & OHCI_CTXCTL_RUN ? " run" : "",
4443 val & OHCI_CTXCTL_WAKE ? " wake" : "",
4444 val & OHCI_CTXCTL_DEAD ? " dead" : "",
4445 val & OHCI_CTXCTL_ACTIVE ? " active" : "");
4446 #endif
4447
4448 fr->fr_nodeid = OHCI_CSR_READ(sc, OHCI_REG_NodeId);
4449 fr->fr_isocounter = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
4450 fr->fr_intmask = OHCI_CSR_READ(sc, OHCI_REG_IntMaskSet);
4451 fr->fr_it0_commandptr = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_CommandPtr);
4452 fr->fr_it0_contextctrl = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_ContextControlSet);
4453
4454
4455 }
4456 #endif
4457
4458
4459 u_int16_t
4460 fwohci_cycletimer(struct fwohci_softc *sc)
4461 {
4462 u_int32_t reg;
4463
4464 reg = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
4465
4466 return (reg >> 12)&0xffff;
4467 }
4468
4469
4470 u_int16_t
4471 fwohci_it_cycletimer(ieee1394_it_tag_t it)
4472 {
4473 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
4474
4475 return fwohci_cycletimer(itc->itc_sc);
4476 }
4477
4478
4479
4480
4481
4482 /*
4483 * return value: if positive value, number of DMA buffer segments. If
4484 * negative value, error happens. Never zero.
4485 */
4486 static int
4487 fwohci_misc_dmabuf_alloc(bus_dma_tag_t dmat, int dsize, int segno,
4488 bus_dma_segment_t *segp, bus_dmamap_t *dmapp, void **mapp,
4489 const char *xname)
4490 {
4491 int nsegs;
4492 int error;
4493
4494 printf("fwohci_misc_desc_alloc: dsize %d segno %d\n", dsize, segno);
4495
4496 if ((error = bus_dmamem_alloc(dmat, dsize, PAGE_SIZE, 0,
4497 segp, segno, &nsegs, 0)) != 0) {
4498 printf("%s: unable to allocate descriptor buffer, error = %d\n",
4499 xname, error);
4500 goto fail_0;
4501 }
4502
4503 DPRINTF(("fwohci_misc_desc_alloc: %d segment[s]\n", nsegs));
4504
4505 if ((error = bus_dmamem_map(dmat, segp, nsegs, dsize, (caddr_t *)mapp,
4506 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) {
4507 printf("%s: unable to map descriptor buffer, error = %d\n",
4508 xname, error);
4509 goto fail_1;
4510 }
4511
4512 DPRINTF(("fwohci_misc_desc_alloc: %s map ok\n", xname));
4513
4514 #ifdef FWOHCI_DEBUG
4515 {
4516 int loop;
4517
4518 for (loop = 0; loop < nsegs; ++loop) {
4519 printf("\t%.2d: 0x%lx - 0x%lx\n", loop,
4520 (long)segp[loop].ds_addr,
4521 (long)segp[loop].ds_addr + segp[loop].ds_len - 1);
4522 }
4523 }
4524 #endif /* FWOHCI_DEBUG */
4525
4526 if ((error = bus_dmamap_create(dmat, dsize, nsegs, dsize,
4527 0, BUS_DMA_WAITOK, dmapp)) != 0) {
4528 printf("%s: unable to create descriptor buffer DMA map, "
4529 "error = %d\n", xname, error);
4530 goto fail_2;
4531 }
4532
4533 DPRINTF(("fwohci_misc_dmabuf_alloc: bus_dmamem_create success\n"));
4534
4535 if ((error = bus_dmamap_load(dmat, *dmapp, *mapp, dsize, NULL,
4536 BUS_DMA_WAITOK)) != 0) {
4537 printf("%s: unable to load descriptor buffer DMA map, "
4538 "error = %d\n", xname, error);
4539 goto fail_3;
4540 }
4541
4542 DPRINTF(("fwohci_it_desc_alloc: bus_dmamem_load success\n"));
4543
4544 return nsegs;
4545
4546 fail_3:
4547 bus_dmamap_destroy(dmat, *dmapp);
4548 fail_2:
4549 bus_dmamem_unmap(dmat, *mapp, dsize);
4550 fail_1:
4551 bus_dmamem_free(dmat, segp, nsegs);
4552 fail_0:
4553 return error;
4554 }
4555
4556
4557 static void
4558 fwohci_misc_dmabuf_free(bus_dma_tag_t dmat, int dsize, int nsegs,
4559 bus_dma_segment_t *segp, bus_dmamap_t *dmapp, caddr_t map)
4560 {
4561 bus_dmamap_destroy(dmat, *dmapp);
4562 bus_dmamem_unmap(dmat, map, dsize);
4563 bus_dmamem_free(dmat, segp, nsegs);
4564 }
4565
4566
4567
4568
4569 /*
4570 * Isochronous receive service
4571 */
4572
4573 /*
4574 * static struct fwohci_ir_ctx *
4575 * fwohci_ir_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tagbm,
4576 * int bufnum, int maxsize, int flags)
4577 */
4578 static struct fwohci_ir_ctx *
4579 fwohci_ir_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tagbm,
4580 int bufnum, int maxsize, int flags)
4581 {
4582 struct fwohci_ir_ctx *irc;
4583 int i;
4584
4585 printf("fwohci_ir_construct(%s, %d, %d, %x, %d, %d\n",
4586 sc->sc_sc1394.sc1394_dev.dv_xname, no, ch, tagbm, bufnum, maxsize);
4587
4588 if ((irc = malloc(sizeof(*irc), M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) {
4589 return NULL;
4590 }
4591
4592 irc->irc_sc = sc;
4593
4594 irc->irc_num = no;
4595 irc->irc_status = 0;
4596
4597 irc->irc_channel = ch;
4598 irc->irc_tagbm = tagbm;
4599
4600 irc->irc_desc_num = bufnum;
4601
4602 irc->irc_flags = flags;
4603
4604 /* add header */
4605 maxsize += 8;
4606 /* rounding up */
4607 for (i = 32; i < maxsize; i <<= 1);
4608 printf("fwohci_ir_ctx_construct: maxsize %d => %d\n",
4609 maxsize, i);
4610
4611 maxsize = i;
4612
4613 irc->irc_maxsize = maxsize;
4614 irc->irc_buf_totalsize = bufnum * maxsize;
4615
4616 if (fwohci_ir_buf_setup(irc)) {
4617 /* cannot alloc descriptor */
4618 return NULL;
4619 }
4620
4621 irc->irc_readtop = irc->irc_desc_map;
4622 irc->irc_writeend = irc->irc_desc_map + irc->irc_desc_num - 1;
4623 irc->irc_savedbranch = irc->irc_writeend->fd_branch;
4624 irc->irc_writeend->fd_branch = 0;
4625 /* sync */
4626
4627 if (fwohci_ir_stop(irc) || fwohci_ir_init(irc)) {
4628 return NULL;
4629 }
4630
4631 irc->irc_status |= IRC_STATUS_READY;
4632
4633 return irc;
4634 }
4635
4636
4637
4638 /*
4639 * static void fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *irc)
4640 *
4641 * This function release all DMA buffers and itself.
4642 */
4643 static void
4644 fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *irc)
4645 {
4646 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat, irc->irc_buf_totalsize,
4647 irc->irc_buf_nsegs, irc->irc_buf_segs,
4648 &irc->irc_buf_dmamap, (caddr_t)irc->irc_buf);
4649 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4650 irc->irc_desc_size,
4651 irc->irc_desc_nsegs, &irc->irc_desc_seg,
4652 &irc->irc_desc_dmamap, (caddr_t)irc->irc_desc_map);
4653
4654 free(irc, M_DEVBUF);
4655 }
4656
4657
4658
4659
4660 /*
4661 * static int fwohci_ir_buf_setup(struct fwohci_ir_ctx *irc)
4662 *
4663 * Allocates descriptors for context DMA dedicated for
4664 * isochronous receive.
4665 *
4666 * This function returns 0 (zero) if it succeeds. Otherwise,
4667 * return negative value.
4668 */
4669 static int
4670 fwohci_ir_buf_setup(struct fwohci_ir_ctx *irc)
4671 {
4672 int nsegs;
4673 struct fwohci_desc *fd;
4674 u_int32_t branch;
4675 int bufno = 0; /* DMA segment */
4676 bus_size_t bufused = 0; /* offset in a DMA segment */
4677
4678 irc->irc_desc_size = irc->irc_desc_num * sizeof(struct fwohci_desc);
4679
4680 nsegs = fwohci_misc_dmabuf_alloc(irc->irc_sc->sc_dmat,
4681 irc->irc_desc_size, 1, &irc->irc_desc_seg, &irc->irc_desc_dmamap,
4682 (void **)&irc->irc_desc_map,
4683 irc->irc_sc->sc_sc1394.sc1394_dev.dv_xname);
4684
4685 if (nsegs < 0) {
4686 printf("fwohci_ir_buf_alloc: cannot get descriptor\n");
4687 return -1;
4688 }
4689 irc->irc_desc_nsegs = nsegs;
4690
4691 nsegs = fwohci_misc_dmabuf_alloc(irc->irc_sc->sc_dmat,
4692 irc->irc_buf_totalsize, 16, irc->irc_buf_segs,
4693 &irc->irc_buf_dmamap, (void **)&irc->irc_buf,
4694 irc->irc_sc->sc_sc1394.sc1394_dev.dv_xname);
4695
4696 if (nsegs < 0) {
4697 printf("fwohci_ir_buf_alloc: cannot get DMA buffer\n");
4698 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4699 irc->irc_desc_size,
4700 irc->irc_desc_nsegs, &irc->irc_desc_seg,
4701 &irc->irc_desc_dmamap, (caddr_t)irc->irc_desc_map);
4702 return -1;
4703 }
4704 irc->irc_buf_nsegs = nsegs;
4705
4706 branch = irc->irc_desc_dmamap->dm_segs[0].ds_addr
4707 + sizeof(struct fwohci_desc);
4708 bufno = 0;
4709 bufused = 0;
4710
4711 for (fd = irc->irc_desc_map;
4712 fd < irc->irc_desc_map + irc->irc_desc_num; ++fd) {
4713 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_LAST
4714 | OHCI_DESC_STATUS | OHCI_DESC_BRANCH;
4715 if (irc->irc_flags & IEEE1394_IR_SHORTDELAY) {
4716 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
4717 }
4718 #if 0
4719 if ((fd - irc->irc_desc_map) % 64 == 0) {
4720 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
4721 }
4722 #endif
4723 fd->fd_reqcount = irc->irc_maxsize;
4724 fd->fd_status = fd->fd_rescount = 0;
4725
4726 fd->fd_branch = branch | 0x01;
4727 branch += sizeof(struct fwohci_desc);
4728
4729 /* physical addr to data? */
4730 fd->fd_data =
4731 (u_int32_t)((irc->irc_buf_segs[bufno].ds_addr + bufused));
4732 bufused += irc->irc_maxsize;
4733 if (bufused > irc->irc_buf_segs[bufno].ds_len) {
4734 bufused = 0;
4735 if (++bufno == irc->irc_buf_nsegs) {
4736 /* fail */
4737 printf("fwohci_ir_buf_setup fail\n");
4738
4739 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4740 irc->irc_desc_size,
4741 irc->irc_desc_nsegs, &irc->irc_desc_seg,
4742 &irc->irc_desc_dmamap,
4743 (caddr_t)irc->irc_desc_map);
4744 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4745 irc->irc_buf_totalsize,
4746 irc->irc_buf_nsegs, irc->irc_buf_segs,
4747 &irc->irc_buf_dmamap,
4748 (caddr_t)irc->irc_buf);
4749 return -1;
4750 }
4751 }
4752
4753 #ifdef FWOHCI_DEBUG
4754 if (fd < irc->irc_desc_map + 4
4755 || (fd > irc->irc_desc_map + irc->irc_desc_num - 4)) {
4756 printf("fwohci_ir_buf_setup: desc %d %p buf %08x"
4757 " size %d branch %08x\n",
4758 fd - irc->irc_desc_map, fd, fd->fd_data,
4759 fd->fd_reqcount, fd->fd_branch);
4760 }
4761 #endif /* FWOHCI_DEBUG */
4762 }
4763
4764 --fd;
4765 fd->fd_branch = irc->irc_desc_dmamap->dm_segs[0].ds_addr | 1;
4766 DPRINTF(("fwohci_ir_buf_setup: desc %d %p buf %08x size %d branch %08x\n",
4767 (int)(fd - irc->irc_desc_map), fd, fd->fd_data, fd->fd_reqcount,
4768 fd->fd_branch));
4769
4770 return 0;
4771 }
4772
4773
4774
4775 /*
4776 * static void fwohci_ir_init(struct fwohci_ir_ctx *irc)
4777 *
4778 * This function initialise DMA engine.
4779 */
4780 static int
4781 fwohci_ir_init(struct fwohci_ir_ctx *irc)
4782 {
4783 struct fwohci_softc *sc = irc->irc_sc;
4784 int n = irc->irc_num;
4785 u_int32_t ctxmatch;
4786
4787 ctxmatch = irc->irc_channel & IEEE1394_ISO_CHANNEL_MASK;
4788
4789 if (irc->irc_channel & IEEE1394_ISO_CHANNEL_ANY) {
4790 OHCI_SYNC_RX_DMA_WRITE(sc, n,
4791 OHCI_SUBREG_ContextControlSet,
4792 OHCI_CTXCTL_RX_MULTI_CHAN_MODE);
4793
4794 /* Receive all the isochronous channels */
4795 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiSet, 0xffffffff);
4796 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoSet, 0xffffffff);
4797 ctxmatch = 0;
4798 }
4799
4800 ctxmatch |= ((irc->irc_tagbm & 0x0f) << OHCI_CTXMATCH_TAG_BITPOS);
4801 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextMatch, ctxmatch);
4802
4803 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlClear,
4804 OHCI_CTXCTL_RX_BUFFER_FILL | OHCI_CTXCTL_RX_CYCLE_MATCH_ENABLE);
4805 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlSet,
4806 OHCI_CTXCTL_RX_ISOCH_HEADER);
4807
4808 printf("fwohci_ir_init\n");
4809
4810 return 0;
4811 }
4812
4813
4814 /*
4815 * static int fwohci_ir_start(struct fwohci_ir_ctx *irc)
4816 *
4817 * This function starts DMA engine. This function must call
4818 * after fwohci_ir_init() and active bit of context control
4819 * register negated. This function will not check it.
4820 */
4821 static int
4822 fwohci_ir_start(struct fwohci_ir_ctx *irc)
4823 {
4824 struct fwohci_softc *sc = irc->irc_sc;
4825 int startidx = irc->irc_readtop - irc->irc_desc_map;
4826 u_int32_t startaddr;
4827
4828 startaddr = irc->irc_desc_dmamap->dm_segs[0].ds_addr
4829 + sizeof(struct fwohci_desc)*startidx;
4830
4831 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num, OHCI_SUBREG_CommandPtr,
4832 startaddr | 1);
4833 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear,
4834 (1 << irc->irc_num));
4835 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num,
4836 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
4837
4838 printf("fwohci_ir_start: CmdPtr %08x Ctx %08x startidx %d\n",
4839 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, OHCI_SUBREG_CommandPtr),
4840 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, OHCI_SUBREG_ContextControlSet),
4841 startidx);
4842
4843 irc->irc_status &= ~IRC_STATUS_READY;
4844 irc->irc_status |= IRC_STATUS_RUN;
4845
4846 if ((irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC) == 0) {
4847 irc->irc_status |= IRC_STATUS_RECEIVE;
4848 }
4849
4850 return 0;
4851 }
4852
4853
4854
4855 /*
4856 * static int fwohci_ir_stop(struct fwohci_ir_ctx *irc)
4857 *
4858 * This function stops DMA engine.
4859 */
4860 static int
4861 fwohci_ir_stop(struct fwohci_ir_ctx *irc)
4862 {
4863 struct fwohci_softc *sc = irc->irc_sc;
4864 int i;
4865
4866 printf("fwohci_ir_stop\n");
4867
4868 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num,
4869 OHCI_SUBREG_ContextControlClear,
4870 OHCI_CTXCTL_RUN | OHCI_CTXCTL_DEAD);
4871
4872 i = 0;
4873 while (OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4874 OHCI_SUBREG_ContextControlSet) & OHCI_CTXCTL_ACTIVE) {
4875 #if 0
4876 u_int32_t reg = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4877 OHCI_SUBREG_ContextControlClear);
4878
4879 printf("%s: %d intr IR_CommandPtr 0x%08x "
4880 "ContextCtrl 0x%08x%s%s%s%s\n",
4881 sc->sc_sc1394.sc1394_dev.dv_xname, i,
4882 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4883 OHCI_SUBREG_CommandPtr),
4884 reg,
4885 reg & OHCI_CTXCTL_RUN ? " run" : "",
4886 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
4887 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
4888 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
4889 #endif
4890 if (i > 20) {
4891 printf("fwohci_ir_stop: %s does not stop\n",
4892 sc->sc_sc1394.sc1394_dev.dv_xname);
4893 return 1;
4894 }
4895 DELAY(10);
4896 }
4897
4898 irc->irc_status &= ~IRC_STATUS_RUN;
4899
4900 return 0;
4901 }
4902
4903
4904
4905
4906
4907
4908 static void
4909 fwohci_ir_intr(struct fwohci_softc *sc, struct fwohci_ir_ctx *irc)
4910 {
4911 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname;
4912 u_int32_t cmd, ctx;
4913 int idx;
4914 struct fwohci_desc *fd;
4915
4916 sc->sc_isocnt.ev_count++;
4917
4918 if (!(irc->irc_status & IRC_STATUS_RUN)) {
4919 printf("fwohci_ir_intr: not running\n");
4920 return;
4921 }
4922
4923 bus_dmamap_sync(sc->sc_dmat, irc->irc_desc_dmamap,
4924 0, irc->irc_desc_size, BUS_DMASYNC_PREREAD);
4925
4926 ctx = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4927 OHCI_SUBREG_ContextControlSet);
4928
4929 cmd = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4930 OHCI_SUBREG_CommandPtr);
4931
4932 #define OHCI_CTXCTL_RUNNING (OHCI_CTXCTL_RUN|OHCI_CTXCTL_ACTIVE)
4933 #define OHCI_CTXCTL_RUNNING_MASK (OHCI_CTXCTL_RUNNING|OHCI_CTXCTL_DEAD)
4934
4935 idx = (cmd & 0xfffffff8) - (u_int32_t)irc->irc_desc_dmamap->dm_segs[0].ds_addr;
4936 idx /= sizeof(struct fwohci_desc);
4937
4938 if ((ctx & OHCI_CTXCTL_RUNNING_MASK) == OHCI_CTXCTL_RUNNING) {
4939 if (irc->irc_waitchan != NULL) {
4940 DPRINTF(("fwohci_ir_intr: wakeup "
4941 "ctx %d CmdPtr %08x Ctxctl %08x idx %d\n",
4942 irc->irc_num, cmd, ctx, idx));
4943 #ifdef FWOHCI_WAIT_DEBUG
4944 irc->irc_cycle[1] = fwohci_cycletimer(irc->irc_sc);
4945 #endif
4946 wakeup((void *)irc->irc_waitchan);
4947 }
4948 selwakeup(&irc->irc_sel);
4949 return;
4950 }
4951
4952 fd = irc->irc_desc_map + idx;
4953
4954 printf("fwohci_ir_intr: %s error "
4955 "ctx %d CmdPtr %08x Ctxctl %08x idx %d\n", xname,
4956 irc->irc_num, cmd, ctx, idx);
4957 printf("\tfd flag %x branch %x stat %x rescnt %x total pkt %d\n",
4958 fd->fd_flags, fd->fd_branch, fd->fd_status,fd->fd_rescount,
4959 irc->irc_pktcount);
4960 }
4961
4962
4963
4964
4965 /*
4966 * static int fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *irc)
4967 *
4968 * This function obtains the lenth of descriptors with data.
4969 */
4970 static int
4971 fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *irc)
4972 {
4973 struct fwohci_desc *fd = irc->irc_readtop;
4974 int i = 0;
4975
4976 /* XXX SYNC */
4977 while (fd->fd_status != 0) {
4978 if (fd == irc->irc_readtop && i > 0) {
4979 printf("descriptor filled %d at %d\n", i,
4980 irc->irc_pktcount);
4981 #ifdef FWOHCI_WAIT_DEBUG
4982 irc->irc_cycle[2] = fwohci_cycletimer(irc->irc_sc);
4983 printf("cycletimer %d:%d %d:%d %d:%d\n",
4984 irc->irc_cycle[0]>>13, irc->irc_cycle[0]&0x1fff,
4985 irc->irc_cycle[1]>>13, irc->irc_cycle[1]&0x1fff,
4986 irc->irc_cycle[2]>>13, irc->irc_cycle[2]&0x1fff);
4987 #endif
4988
4989 break;
4990 }
4991
4992 ++i;
4993 ++fd;
4994 if (fd == irc->irc_desc_map + irc->irc_desc_num) {
4995 fd = irc->irc_desc_map;
4996 }
4997
4998 }
4999
5000 return i;
5001 }
5002
5003
5004
5005
5006 /*
5007 * int fwohci_ir_read(struct device *dev, ieee1394_ir_tag_t tag,
5008 * struct uio *uio, int headoffs, int flags)
5009 *
5010 * This function reads data from fwohci's isochronous receive
5011 * buffer.
5012 */
5013 int
5014 fwohci_ir_read(struct device *dev, ieee1394_ir_tag_t tag, struct uio *uio,
5015 int headoffs, int flags)
5016 {
5017 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag;
5018 int packetnum;
5019 int copylen, hdrshim, fwisohdrsiz;
5020 struct fwohci_desc *fd, *fdprev = NULL; /* XXX fdprev use is suspect */
5021 u_int8_t *data;
5022 int status = 0;
5023 u_int32_t tmpbranch;
5024 int pktcount_prev = irc->irc_pktcount;
5025 #ifdef FW_DEBUG
5026 int totalread = 0;
5027 #endif
5028
5029 if (irc->irc_status & IRC_STATUS_READY) {
5030 printf("fwohci_ir_read: starting iso read engine\n");
5031 fwohci_ir_start(irc);
5032 }
5033
5034 packetnum = fwohci_ir_ctx_packetnum(irc);
5035
5036 DPRINTF(("fwohci_ir_read resid %lu DMA buf %d\n",
5037 (unsigned long)uio->uio_resid, packetnum));
5038
5039 if (packetnum == 0) {
5040 return EAGAIN;
5041 }
5042
5043 #ifdef USEDRAIN
5044 if (packetnum > irc->irc_desc_num - irc->irc_desc_num/4) {
5045 packetnum -= fwohci_ir_ctx_drain(irc);
5046 if (irc->irc_pktcount != 0) {
5047 printf("fwohci_ir_read overrun %d\n",
5048 irc->irc_pktcount);
5049 }
5050 }
5051 #endif /* USEDRAIN */
5052
5053 fd = irc->irc_readtop;
5054
5055 #if 0
5056 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0
5057 && irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC) {
5058 unsigned int s;
5059 int i = 0;
5060
5061 fdprev = fd;
5062 while (fd->fd_status != 0) {
5063 s = data[14] << 8;
5064 s |= data[15];
5065
5066 if (s != 0x0000ffffu) {
5067 DPRINTF(("find header %x at %d\n",
5068 s, irc->irc_pktcount));
5069 irc->irc_status |= IRC_STATUS_RECEIVE;
5070 break;
5071 }
5072
5073 fd->fd_rescount = 0;
5074 fd->fd_status = 0;
5075
5076 fdprev = fd;
5077 if (++fd == irc->irc_desc_map + irc->irc_desc_num) {
5078 fd = irc->irc_desc_map;
5079 data = irc->irc_buf;
5080 }
5081 ++i;
5082 }
5083
5084 /* XXX SYNC */
5085 if (i > 0) {
5086 tmpbranch = fdprev->fd_branch;
5087 fdprev->fd_branch = 0;
5088 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5089 irc->irc_writeend = fdprev;
5090 irc->irc_savedbranch = tmpbranch;
5091 }
5092 /* XXX SYNC */
5093
5094 if (fd->fd_status == 0) {
5095 return EAGAIN;
5096 }
5097 }
5098 #endif
5099
5100 hdrshim = 8;
5101 fwisohdrsiz = 0;
5102 data = irc->irc_buf + (fd - irc->irc_desc_map) * irc->irc_maxsize;
5103 if (irc->irc_flags & IEEE1394_IR_NEEDHEADER) {
5104 fwisohdrsiz = sizeof(struct fwiso_header);
5105 }
5106
5107 while (fd->fd_status != 0 &&
5108 (copylen = fd->fd_reqcount - fd->fd_rescount - hdrshim - headoffs)
5109 + fwisohdrsiz <= uio->uio_resid) {
5110
5111 DPRINTF(("pkt %04x:%04x uiomove %p, %d\n",
5112 fd->fd_status, fd->fd_rescount,
5113 (void *)(data + 8 + headoffs), copylen));
5114 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0) {
5115 DPRINTF(("[%d]", copylen));
5116 if (irc->irc_pktcount > 1000) {
5117 printf("no header found\n");
5118 status = EIO;
5119 break; /* XXX */
5120 }
5121 } else {
5122 DPRINTF(("<%d>", copylen));
5123 }
5124
5125 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0
5126 && irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC
5127 && copylen > 0) {
5128 unsigned int s;
5129
5130 s = data[14] << 8;
5131 s |= data[15];
5132
5133 if (s != 0x0000ffffu) {
5134 DPRINTF(("find header %x at %d\n",
5135 s, irc->irc_pktcount));
5136 irc->irc_status |= IRC_STATUS_RECEIVE;
5137 }
5138 }
5139
5140 if (irc->irc_status & IRC_STATUS_RECEIVE) {
5141 if (copylen > 0) {
5142 if (irc->irc_flags & IEEE1394_IR_NEEDHEADER) {
5143 struct fwiso_header fh;
5144
5145 fh.fh_timestamp = htonl((*(u_int32_t *)data) & 0xffff);
5146 fh.fh_speed = htonl((fd->fd_status >> 5)& 0x00000007);
5147 fh.fh_capture_size = htonl(copylen + 4);
5148 fh.fh_iso_header = htonl(*(u_int32_t *)(data + 4));
5149 status = uiomove((void *)&fh,
5150 sizeof(fh), uio);
5151 if (status != 0) {
5152 /* An error happens */
5153 printf("uio error in hdr\n");
5154 break;
5155 }
5156 }
5157 status = uiomove((void *)(data + 8 + headoffs),
5158 copylen, uio);
5159 if (status != 0) {
5160 /* An error happens */
5161 printf("uio error\n");
5162 break;
5163 }
5164 #ifdef FW_DEBUG
5165 totalread += copylen;
5166 #endif
5167 }
5168 }
5169
5170 fd->fd_rescount = 0;
5171 fd->fd_status = 0;
5172
5173 #if 0
5174 /* advance writeend pointer and fill branch */
5175
5176 tmpbranch = fd->fd_branch;
5177 fd->fd_branch = 0;
5178 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5179 irc->irc_writeend = fd;
5180 irc->irc_savedbranch = tmpbranch;
5181 #endif
5182 fdprev = fd;
5183
5184 data += irc->irc_maxsize;
5185 if (++fd == irc->irc_desc_map + irc->irc_desc_num) {
5186 fd = irc->irc_desc_map;
5187 data = irc->irc_buf;
5188 }
5189 ++irc->irc_pktcount;
5190 }
5191
5192 #if 1
5193 if (irc->irc_pktcount != pktcount_prev) {
5194 /* XXX SYNC */
5195 tmpbranch = fdprev->fd_branch;
5196 fdprev->fd_branch = 0;
5197 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5198 irc->irc_writeend = fdprev;
5199 irc->irc_savedbranch = tmpbranch;
5200 /* XXX SYNC */
5201 }
5202 #endif
5203
5204 if (!(OHCI_SYNC_RX_DMA_READ(irc->irc_sc, irc->irc_num,
5205 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE)) {
5206 /* do wake */
5207 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5208 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
5209 }
5210
5211 if (packetnum > irc->irc_maxqueuelen) {
5212 irc->irc_maxqueuelen = packetnum;
5213 irc->irc_maxqueuepos = irc->irc_pktcount;
5214 }
5215
5216 if (irc->irc_pktcount == pktcount_prev) {
5217 #if 0
5218 printf("fwohci_ir_read: process 0 packet, total %d\n",
5219 irc->irc_pktcount);
5220 if (++pktfail > 30) {
5221 return 0;
5222 }
5223 #endif
5224 return EAGAIN;
5225 }
5226
5227 irc->irc_readtop = fd;
5228
5229 DPRINTF(("fwochi_ir_read: process %d packet, total %d\n",
5230 totalread, irc->irc_pktcount));
5231
5232 return status;
5233 }
5234
5235
5236
5237
5238 /*
5239 * int fwohci_ir_wait(struct device *dev, ieee1394_ir_tag_t tag,
5240 * void *wchan, char *name)
5241 *
5242 * This function waits till new data comes.
5243 */
5244 int
5245 fwohci_ir_wait(struct device *dev, ieee1394_ir_tag_t tag, void *wchan, char *name)
5246 {
5247 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag;
5248 struct fwohci_desc *fd;
5249 int pktnum;
5250 int stat;
5251
5252 if ((pktnum = fwohci_ir_ctx_packetnum(irc)) > 4) {
5253 DPRINTF(("fwohci_ir_wait enough data %d\n", pktnum));
5254 return 0;
5255 }
5256
5257 fd = irc->irc_readtop + 32;
5258 if (fd >= irc->irc_desc_map + irc->irc_desc_num) {
5259 fd -= irc->irc_desc_num;
5260 }
5261
5262 irc->irc_waitchan = wchan;
5263 if ((irc->irc_flags & IEEE1394_IR_SHORTDELAY) == 0) {
5264 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
5265 DPRINTF(("fwohci_ir_wait stops %d set intr %d\n",
5266 (int)(irc->irc_readtop - irc->irc_desc_map),
5267 (int)(fd - irc->irc_desc_map)));
5268 /* XXX SYNC */
5269 }
5270
5271 #ifdef FWOHCI_WAIT_DEBUG
5272 irc->irc_cycle[0] = fwohci_cycletimer(irc->irc_sc);
5273 #endif
5274
5275 irc->irc_status |= IRC_STATUS_SLEEPING;
5276 if ((stat = tsleep(wchan, PCATCH|PRIBIO, name, hz*10)) != 0) {
5277 irc->irc_waitchan = NULL;
5278 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
5279 if (stat == EWOULDBLOCK) {
5280 printf("fwohci_ir_wait: timeout\n");
5281 return EIO;
5282 } else {
5283 return EINTR;
5284 }
5285 }
5286
5287 irc->irc_waitchan = NULL;
5288 if ((irc->irc_flags & IEEE1394_IR_SHORTDELAY) == 0) {
5289 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
5290 /* XXX SYNC */
5291 }
5292
5293 DPRINTF(("fwohci_ir_wait: wakeup\n"));
5294
5295 return 0;
5296 }
5297
5298
5299
5300
5301 /*
5302 * int fwohci_ir_select(struct device *dev, ieee1394_ir_tag_t tag,
5303 * struct proc *p)
5304 *
5305 * This function returns the number of packets in queue.
5306 */
5307 int
5308 fwohci_ir_select(struct device *dev, ieee1394_ir_tag_t tag, struct proc *p)
5309 {
5310 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag;
5311 int pktnum;
5312
5313 if (irc->irc_status & IRC_STATUS_READY) {
5314 printf("fwohci_ir_select: starting iso read engine\n");
5315 fwohci_ir_start(irc);
5316 }
5317
5318 if ((pktnum = fwohci_ir_ctx_packetnum(irc)) == 0) {
5319 selrecord(p, &irc->irc_sel);
5320 }
5321
5322 return pktnum;
5323 }
5324
5325
5326
5327 #ifdef USEDRAIN
5328 /*
5329 * int fwohci_ir_ctx_drain(struct fwohci_ir_ctx *irc)
5330 *
5331 * This function will drain all the packets in receive DMA
5332 * buffer.
5333 */
5334 static int
5335 fwohci_ir_ctx_drain(struct fwohci_ir_ctx *irc)
5336 {
5337 struct fwohci_desc *fd = irc->irc_readtop;
5338 u_int32_t reg;
5339 int count = 0;
5340
5341 reg = OHCI_SYNC_RX_DMA_READ(irc->irc_sc, irc->irc_num,
5342 OHCI_SUBREG_ContextControlClear);
5343
5344 printf("fwohci_ir_ctx_drain ctx%s%s%s%s\n",
5345 reg & OHCI_CTXCTL_RUN ? " run" : "",
5346 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5347 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5348 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5349
5350 if ((reg & OHCI_CTXCTL_RUNNING_MASK) == OHCI_CTXCTL_RUN) {
5351 /* DMA engine is stopped */
5352 u_int32_t startadr;
5353
5354 for (fd = irc->irc_desc_map;
5355 fd < irc->irc_desc_map + irc->irc_desc_num;
5356 ++fd) {
5357 fd->fd_status = 0;
5358 }
5359
5360 /* Restore branch addr of the last descriptor */
5361 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5362
5363 irc->irc_readtop = irc->irc_desc_map;
5364 irc->irc_writeend = irc->irc_desc_map + irc->irc_desc_num - 1;
5365 irc->irc_savedbranch = irc->irc_writeend->fd_branch;
5366 irc->irc_writeend->fd_branch = 0;
5367
5368 count = irc->irc_desc_num;
5369
5370 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5371 OHCI_SUBREG_ContextControlClear,
5372 OHCI_CTXCTL_RUN | OHCI_CTXCTL_DEAD);
5373
5374 startadr = (u_int32_t)irc->irc_desc_dmamap->dm_segs[0].ds_addr;
5375
5376 printf("fwohci_ir_ctx_drain: remove %d pkts\n", count);
5377
5378 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5379 OHCI_SUBREG_CommandPtr, startadr | 1);
5380
5381 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5382 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
5383 } else {
5384 const int removecount = irc->irc_desc_num/2;
5385 u_int32_t tmpbranch;
5386
5387 for (count = 0; count < removecount; ++count) {
5388 if (fd->fd_status == 0) {
5389 break;
5390 }
5391
5392 fd->fd_status = 0;
5393
5394 tmpbranch = fd->fd_branch;
5395 fd->fd_branch = 0;
5396 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5397 irc->irc_writeend = fd;
5398 irc->irc_savedbranch = tmpbranch;
5399
5400 if (++fd == irc->irc_desc_map + irc->irc_desc_num) {
5401 fd = irc->irc_desc_map;
5402 }
5403 ++count;
5404 }
5405
5406 printf("fwohci_ir_ctx_drain: remove %d pkts\n", count);
5407 }
5408
5409 return count;
5410 }
5411 #endif /* USEDRAIN */
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421 /*
5422 * service routines for isochronous transmit
5423 */
5424
5425
5426 struct fwohci_it_ctx *
5427 fwohci_it_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tag, int maxsize)
5428 {
5429 struct fwohci_it_ctx *itc;
5430 size_t dmastrsize;
5431 struct fwohci_it_dmabuf *dmastr;
5432 struct fwohci_desc *desc;
5433 bus_addr_t descphys;
5434 int nodesc;
5435 int i, j;
5436
5437 if ((itc = malloc(sizeof(*itc), M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) {
5438 return itc;
5439 }
5440
5441 itc->itc_num = no;
5442 itc->itc_flags = 0;
5443 itc->itc_sc = sc;
5444 itc->itc_bufnum = FWOHCI_IT_BUFNUM;
5445
5446 itc->itc_channel = ch;
5447 itc->itc_tag = tag;
5448 itc->itc_speed = OHCI_CTXCTL_SPD_100; /* XXX */
5449
5450 itc->itc_outpkt = 0;
5451
5452 itc->itc_maxsize = maxsize;
5453
5454 dmastrsize = sizeof(struct fwohci_it_dmabuf)*itc->itc_bufnum;
5455
5456 if ((dmastr = malloc(dmastrsize, M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) {
5457 goto error_1;
5458 }
5459 itc->itc_buf = dmastr;
5460
5461 /*
5462 * Get memory for descriptors. One buffer will have 256
5463 * packet entry and 1 trailing descriptor for writing scratch.
5464 * 4-byte space for scratch.
5465 */
5466 itc->itc_descsize = (256*3 + 1)*itc->itc_bufnum;
5467
5468 if (fwohci_it_desc_alloc(itc)) {
5469 printf("%s: cannot get enough memory for descriptor\n",
5470 sc->sc_sc1394.sc1394_dev.dv_xname);
5471 goto error_2;
5472 }
5473
5474 /* prepare DMA buffer */
5475 nodesc = itc->itc_descsize/itc->itc_bufnum;
5476 desc = (struct fwohci_desc *)itc->itc_descmap;
5477 descphys = itc->itc_dseg.ds_addr;
5478
5479 for (i = 0; i < itc->itc_bufnum; ++i) {
5480
5481 if (fwohci_itd_construct(itc, &dmastr[i], i, desc,
5482 descphys, nodesc,
5483 itc->itc_maxsize, itc->itc_scratch_paddr)) {
5484 goto error_3;
5485 }
5486 desc += nodesc;
5487 descphys += sizeof(struct fwohci_desc)*nodesc;
5488 }
5489
5490 #if 1
5491 itc->itc_buf_start = itc->itc_buf;
5492 itc->itc_buf_end = itc->itc_buf;
5493 itc->itc_buf_linkend = itc->itc_buf;
5494 #else
5495 itc->itc_bufidx_start = 0;
5496 itc->itc_bufidx_end = 0;
5497 itc->itc_bufidx_linkend = 0;
5498 #endif
5499 itc->itc_buf_cnt = 0;
5500 itc->itc_waitchan = NULL;
5501 *itc->itc_scratch = 0xffffffff;
5502
5503 return itc;
5504
5505 error_3:
5506 for (j = 0; j < i; ++j) {
5507 fwohci_itd_destruct(&dmastr[j]);
5508 }
5509 fwohci_it_desc_free(itc);
5510 error_2:
5511 free(itc->itc_buf, M_DEVBUF);
5512 error_1:
5513 free(itc, M_DEVBUF);
5514
5515 return NULL;
5516 }
5517
5518
5519
5520 void
5521 fwohci_it_ctx_destruct(struct fwohci_it_ctx *itc)
5522 {
5523 int i;
5524
5525 for (i = 0; i < itc->itc_bufnum; ++i) {
5526 fwohci_itd_destruct(&itc->itc_buf[i]);
5527 }
5528
5529 fwohci_it_desc_free(itc);
5530 free(itc, M_DEVBUF);
5531 }
5532
5533
5534 /*
5535 * static int fwohci_it_desc_alloc(struct fwohci_it_ctx *itc)
5536 *
5537 * Allocates descriptors for context DMA dedicated for
5538 * isochronous transmit.
5539 *
5540 * This function returns 0 (zero) if it succeeds. Otherwise,
5541 * return negative value.
5542 */
5543 static int
5544 fwohci_it_desc_alloc(struct fwohci_it_ctx *itc)
5545 {
5546 bus_dma_tag_t dmat = itc->itc_sc->sc_dmat;
5547 const char *xname = itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
5548 int error, dsize;
5549
5550 /* add for scratch */
5551 itc->itc_descsize++;
5552
5553 /* rounding up to 256 */
5554 if ((itc->itc_descsize & 0x0ff) != 0) {
5555 itc->itc_descsize =
5556 (itc->itc_descsize & ~0x0ff) + 0x100;
5557 }
5558 /* remove for scratch */
5559
5560 itc->itc_descsize--;
5561 printf("%s: fwohci_it_desc_alloc will allocate %d descs\n",
5562 xname, itc->itc_descsize);
5563
5564 /*
5565 * allocate descriptor buffer
5566 */
5567 dsize = sizeof(struct fwohci_desc) * itc->itc_descsize;
5568
5569 printf("%s: fwohci_it_desc_alloc: descriptor %d, dsize %d\n",
5570 xname, itc->itc_descsize, dsize);
5571
5572 if ((error = bus_dmamem_alloc(dmat, dsize, PAGE_SIZE, 0,
5573 &itc->itc_dseg, 1, &itc->itc_dnsegs, 0)) != 0) {
5574 printf("%s: unable to allocate descriptor buffer, error = %d\n",
5575 xname, error);
5576 goto fail_0;
5577 }
5578
5579 printf("fwohci_it_desc_alloc: %d segment[s]\n", itc->itc_dnsegs);
5580
5581 if ((error = bus_dmamem_map(dmat, &itc->itc_dseg,
5582 itc->itc_dnsegs, dsize, (caddr_t *)&itc->itc_descmap,
5583 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) {
5584 printf("%s: unable to map descriptor buffer, error = %d\n",
5585 xname, error);
5586 goto fail_1;
5587 }
5588
5589 printf("fwohci_it_desc_alloc: bus_dmamem_map success dseg %lx:%lx\n",
5590 (long)itc->itc_dseg.ds_addr, (long)itc->itc_dseg.ds_len);
5591
5592 if ((error = bus_dmamap_create(dmat, dsize, itc->itc_dnsegs,
5593 dsize, 0, BUS_DMA_WAITOK, &itc->itc_ddmamap)) != 0) {
5594 printf("%s: unable to create descriptor buffer DMA map, "
5595 "error = %d\n", xname, error);
5596 goto fail_2;
5597 }
5598
5599 printf("fwohci_it_desc_alloc: bus_dmamem_create success\n");
5600
5601 {
5602 int loop;
5603
5604 for (loop = 0; loop < itc->itc_ddmamap->dm_nsegs; ++loop) {
5605 printf("\t%.2d: 0x%lx - 0x%lx\n", loop,
5606 (long)itc->itc_ddmamap->dm_segs[loop].ds_addr,
5607 (long)itc->itc_ddmamap->dm_segs[loop].ds_addr +
5608 (long)itc->itc_ddmamap->dm_segs[loop].ds_len - 1);
5609 }
5610 }
5611
5612 if ((error = bus_dmamap_load(dmat, itc->itc_ddmamap,
5613 itc->itc_descmap, dsize, NULL, BUS_DMA_WAITOK)) != 0) {
5614 printf("%s: unable to load descriptor buffer DMA map, "
5615 "error = %d\n", xname, error);
5616 goto fail_3;
5617 }
5618
5619 printf("%s: fwohci_it_desc_alloc: get DMA memory phys:0x%08x vm:%p\n",
5620 xname, (int)itc->itc_ddmamap->dm_segs[0].ds_addr, itc->itc_descmap);
5621
5622 itc->itc_scratch = (u_int32_t *)(itc->itc_descmap
5623 + (sizeof(struct fwohci_desc))*itc->itc_descsize);
5624 itc->itc_scratch_paddr =
5625 itc->itc_ddmamap->dm_segs[0].ds_addr
5626 + (sizeof(struct fwohci_desc))*itc->itc_descsize;
5627
5628 printf("%s: scratch %p, 0x%x\n", xname, itc->itc_scratch,
5629 (int)itc->itc_scratch_paddr);
5630
5631 /* itc->itc_scratch_paddr = vtophys(itc->itc_scratch); */
5632
5633 return 0;
5634
5635 fail_3:
5636 bus_dmamap_destroy(dmat, itc->itc_ddmamap);
5637 fail_2:
5638 bus_dmamem_unmap(dmat, (caddr_t)itc->itc_descmap, dsize);
5639 fail_1:
5640 bus_dmamem_free(dmat, &itc->itc_dseg, itc->itc_dnsegs);
5641 fail_0:
5642 itc->itc_dnsegs = 0;
5643 itc->itc_descmap = NULL;
5644 return error;
5645 }
5646
5647
5648 static void
5649 fwohci_it_desc_free(struct fwohci_it_ctx *itc)
5650 {
5651 bus_dma_tag_t dmat = itc->itc_sc->sc_dmat;
5652 int dsize = sizeof(struct fwohci_desc) * itc->itc_descsize + 4;
5653
5654 bus_dmamap_destroy(dmat, itc->itc_ddmamap);
5655 bus_dmamem_unmap(dmat, (caddr_t)itc->itc_descmap, dsize);
5656 bus_dmamem_free(dmat, &itc->itc_dseg, itc->itc_dnsegs);
5657
5658 itc->itc_dnsegs = 0;
5659 itc->itc_descmap = NULL;
5660 }
5661
5662
5663
5664 /*
5665 * int fwohci_it_ctx_writedata(ieee1394_it_tag_t it, int ndata,
5666 * struct ieee1394_it_datalist *itdata, int flags)
5667 *
5668 * This function will write packet data to DMA buffer in the
5669 * context. This function will parse ieee1394_it_datalist
5670 * command and fill DMA buffer. This function will return the
5671 * number of written packets, or error code if the return value
5672 * is negative.
5673 *
5674 * When this funtion returns positive value but smaller than
5675 * ndata, it reaches at the ent of DMA buffer.
5676 */
5677 int
5678 fwohci_it_ctx_writedata(ieee1394_it_tag_t it, int ndata,
5679 struct ieee1394_it_datalist *itdata, int flags)
5680 {
5681 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
5682 int rv;
5683 int writepkt = 0;
5684 struct fwohci_it_dmabuf *itd;
5685 int i = 0;
5686
5687 itd = itc->itc_buf_end;
5688
5689 while (ndata > 0) {
5690 int s;
5691
5692 if (fwohci_itd_isfull(itd) || fwohci_itd_islocked(itd)) {
5693 if (itc->itc_buf_cnt == itc->itc_bufnum) {
5694 /* no space to write */
5695 printf("sleeping: start linkend end %d %d %d "
5696 "bufcnt %d\n",
5697 itc->itc_buf_start->itd_num,
5698 itc->itc_buf_linkend->itd_num,
5699 itc->itc_buf_end->itd_num,
5700 itc->itc_buf_cnt);
5701
5702 itc->itc_waitchan = itc;
5703 if (tsleep((void *)itc->itc_waitchan,
5704 PCATCH, "fwohci it", 0) == EWOULDBLOCK) {
5705 itc->itc_waitchan = NULL;
5706 printf("fwohci0 signal\n");
5707 break;
5708 }
5709 printf("waking: start linkend end %d %d %d\n",
5710 itc->itc_buf_start->itd_num,
5711 itc->itc_buf_linkend->itd_num,
5712 itc->itc_buf_end->itd_num);
5713
5714 itc->itc_waitchan = itc;
5715 i = 0;
5716 } else {
5717 /*
5718 * Use next buffer. This DMA buffer is full
5719 * or locked.
5720 */
5721 INC_BUF(itc, itd);
5722 }
5723 }
5724
5725 if (++i > 10) {
5726 panic("why loop so much %d", itc->itc_buf_cnt);
5727 break;
5728 }
5729
5730 s = splbio();
5731
5732 if (fwohci_itd_hasdata(itd) == 0) {
5733 ++itc->itc_buf_cnt;
5734 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt));
5735 }
5736
5737 rv = fwohci_itd_writedata(itd, ndata, itdata);
5738 DPRINTF(("fwohci_it_ctx_writedata: buf %d ndata %d rv %d\n",
5739 itd->itd_num, ndata, rv));
5740
5741 if (itc->itc_buf_start == itc->itc_buf_linkend
5742 && (itc->itc_flags & ITC_FLAGS_RUN) != 0) {
5743
5744 #ifdef DEBUG_USERADD
5745 printf("fwohci_it_ctx_writedata: emergency!\n");
5746 #endif
5747 if (itc->itc_buf_linkend != itc->itc_buf_end
5748 && fwohci_itd_hasdata(itc->itc_buf_end)) {
5749 struct fwohci_it_dmabuf *itdn = itc->itc_buf_linkend;
5750
5751 INC_BUF(itc, itdn);
5752 printf("connecting %d after %d\n",
5753 itdn->itd_num,
5754 itc->itc_buf_linkend->itd_num);
5755 if (fwohci_itd_link(itc->itc_buf_linkend, itdn)) {
5756 printf("fwohci_it_ctx_writedata:"
5757 " cannot link correctly\n");
5758 splx(s);
5759 return -1;
5760 }
5761 itc->itc_buf_linkend = itdn;
5762 }
5763 }
5764
5765 splx(s);
5766
5767 if (rv < 0) {
5768 /* some errors happend */
5769 break;
5770 }
5771
5772 writepkt += rv;
5773 ndata -= rv;
5774 itdata += rv;
5775 itc->itc_buf_end = itd;
5776 }
5777
5778 /* Start DMA engine if stopped */
5779 if ((itc->itc_flags & ITC_FLAGS_RUN) == 0) {
5780 if (itc->itc_buf_cnt > itc->itc_bufnum - 1 || flags) {
5781 /* run */
5782 printf("fwohci_itc_ctl_writedata: DMA engine start\n");
5783 fwohci_it_ctx_run(itc);
5784 }
5785 }
5786
5787 return writepkt;
5788 }
5789
5790
5791
5792 static void
5793 fwohci_it_ctx_run(struct fwohci_it_ctx *itc)
5794 {
5795 struct fwohci_softc *sc = itc->itc_sc;
5796 int ctx = itc->itc_num;
5797 struct fwohci_it_dmabuf *itd
5798 = (struct fwohci_it_dmabuf *)itc->itc_buf_start;
5799 u_int32_t reg;
5800 int i;
5801
5802 if (itc->itc_flags & ITC_FLAGS_RUN) {
5803 return;
5804 }
5805 itc->itc_flags |= ITC_FLAGS_RUN;
5806
5807 /*
5808 * dirty, but I can't imagine better place to save branch addr
5809 * of top DMA buffer and substitute 0 to it.
5810 */
5811 itd->itd_savedbranch = itd->itd_lastdesc->fd_branch;
5812 itd->itd_lastdesc->fd_branch = 0;
5813
5814 if (itc->itc_buf_cnt > 1) {
5815 struct fwohci_it_dmabuf *itdn = itd;
5816
5817 #if 0
5818 INC_BUF(itc, itdn);
5819
5820 if (fwohci_itd_link(itd, itdn)) {
5821 printf("fwohci_it_ctx_run: cannot link correctly\n");
5822 return;
5823 }
5824 itc->itc_buf_linkend = itdn;
5825 #else
5826 for (;;) {
5827 INC_BUF(itc, itdn);
5828
5829 if (itdn == itc->itc_buf_end) {
5830 break;
5831 }
5832 if (fwohci_itd_link(itd, itdn)) {
5833 printf("fwohci_it_ctx_run: cannot link\n");
5834 return;
5835 }
5836 itd = itdn;
5837 }
5838 itc->itc_buf_linkend = itd;
5839 #endif
5840 } else {
5841 itd->itd_lastdesc->fd_flags |= OHCI_DESC_INTR_ALWAYS;
5842 itc->itc_buf_linkend = itc->itc_buf_end;
5843 itc->itc_buf_end->itd_flags |= ITD_FLAGS_LOCK;
5844
5845 /* sanity check */
5846 if (itc->itc_buf_end != itc->itc_buf_start) {
5847 printf("buf start & end differs %p %p\n",
5848 itc->itc_buf_end, itc->itc_buf_start);
5849 }
5850 #if 0
5851 {
5852 u_int32_t *fdp;
5853 u_int32_t adr;
5854 int i;
5855
5856 printf("fwohci_it_ctx_run: itc_buf_cnt 1, DMA buf %d\n",
5857 itd->itd_num);
5858 printf(" last desc %p npacket %d, %d 0x%04x%04x",
5859 itd->itd_lastdesc, itd->itd_npacket,
5860 (itd->itd_lastdesc - itd->itd_desc)/3,
5861 itd->itd_lastdesc->fd_flags,
5862 itd->itd_lastdesc->fd_reqcount);
5863 fdp = (u_int32_t *)itd->itd_desc;
5864 adr = (u_int32_t)itd->itd_desc_phys; /* XXX */
5865
5866 for (i = 0; i < 7*4; ++i) {
5867 if (i % 4 == 0) {
5868 printf("\n%x:", adr + 4*i);
5869 }
5870 printf(" %08x", fdp[i]);
5871 }
5872
5873 if (itd->itd_npacket > 4) {
5874 printf("\n...");
5875 i = (itd->itd_npacket - 2)*12 + 4;
5876 } else {
5877 i = 2*12 + 4;
5878 }
5879 for (;i < itd->itd_npacket*12 + 4; ++i) {
5880 if (i % 4 == 0) {
5881 printf("\n%x:", adr + 4*i);
5882 }
5883 printf(" %08x", fdp[i]);
5884 }
5885 printf("\n");
5886 }
5887 #endif
5888 }
5889 {
5890 struct fwohci_desc *fd;
5891
5892 printf("fwohci_it_ctx_run: link start linkend end %d %d %d\n",
5893 itc->itc_buf_start->itd_num,
5894 itc->itc_buf_linkend->itd_num,
5895 itc->itc_buf_end->itd_num);
5896
5897 fd = itc->itc_buf_start->itd_desc;
5898 if ((fd->fd_flags & 0xff00) != OHCI_DESC_STORE_VALUE) {
5899 printf("fwohci_it_ctx_run: start buf not with STORE\n");
5900 }
5901 fd += 3;
5902 if ((fd->fd_flags & OHCI_DESC_INTR_ALWAYS) == 0) {
5903 printf("fwohci_it_ctx_run: start buf does not have intr\n");
5904 }
5905
5906 fd = itc->itc_buf_linkend->itd_desc;
5907 if ((fd->fd_flags & 0xff00) != OHCI_DESC_STORE_VALUE) {
5908 printf("fwohci_it_ctx_run: linkend buf not with STORE\n");
5909 }
5910 fd += 3;
5911 if ((fd->fd_flags & OHCI_DESC_INTR_ALWAYS) == 0) {
5912 printf("fwohci_it_ctx_run: linkend buf does not have intr\n");
5913 }
5914 }
5915
5916 *itc->itc_scratch = 0xffffffff;
5917
5918 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlClear,
5919 0xffff0000);
5920 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet);
5921
5922 printf("fwohci_it_ctx_run start for ctx %d\n", ctx);
5923 printf("%s: bfr IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
5924 sc->sc_sc1394.sc1394_dev.dv_xname,
5925 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr),
5926 reg,
5927 reg & OHCI_CTXCTL_RUN ? " run" : "",
5928 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5929 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5930 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5931
5932 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlClear,
5933 OHCI_CTXCTL_RUN);
5934
5935 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet);
5936 i = 0;
5937 while (reg & (OHCI_CTXCTL_ACTIVE | OHCI_CTXCTL_RUN)) {
5938 delay(100);
5939 if (++i > 1000) {
5940 printf("%s: cannot stop iso transmit engine\n",
5941 sc->sc_sc1394.sc1394_dev.dv_xname);
5942 break;
5943 }
5944 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx,
5945 OHCI_SUBREG_ContextControlSet);
5946 }
5947
5948 printf("%s: itm IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
5949 sc->sc_sc1394.sc1394_dev.dv_xname,
5950 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr),
5951 reg,
5952 reg & OHCI_CTXCTL_RUN ? " run" : "",
5953 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5954 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5955 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5956
5957 printf("%s: writing CommandPtr to 0x%08x\n",
5958 sc->sc_sc1394.sc1394_dev.dv_xname,
5959 (int)itc->itc_buf_start->itd_desc_phys);
5960 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_CommandPtr,
5961 fwohci_itd_list_head(itc->itc_buf_start) | 4);
5962
5963 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlSet,
5964 OHCI_CTXCTL_RUN | OHCI_CTXCTL_WAKE);
5965
5966 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet);
5967
5968 printf("%s: aft IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
5969 sc->sc_sc1394.sc1394_dev.dv_xname,
5970 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr),
5971 reg,
5972 reg & OHCI_CTXCTL_RUN ? " run" : "",
5973 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5974 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5975 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5976 }
5977
5978
5979
5980 int
5981 fwohci_it_ctx_flush(ieee1394_it_tag_t it)
5982 {
5983 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
5984 int rv = 0;
5985
5986 if ((itc->itc_flags & ITC_FLAGS_RUN) == 0
5987 && itc->itc_buf_cnt > 0) {
5988 printf("fwohci_it_ctx_flush: %s flushing\n",
5989 itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname);
5990
5991 fwohci_it_ctx_run(itc);
5992 rv = 1;
5993 }
5994
5995 return rv;
5996 }
5997
5998
5999 /*
6000 * static void fwohci_it_intr(struct fwohci_softc *sc,
6001 * struct fwochi_it_ctx *itc)
6002 *
6003 * This function is the interrupt handler for isochronous
6004 * transmit interrupt. This function will 1) unlink used
6005 * (already transmitted) buffers, 2) link new filled buffers, if
6006 * necessary and 3) say some free DMA buffers exist to
6007 * fwiso_write()
6008 */
6009 static void
6010 fwohci_it_intr(struct fwohci_softc *sc, struct fwohci_it_ctx *itc)
6011 {
6012 struct fwohci_it_dmabuf *itd, *newstartbuf;
6013 u_int16_t scratchval;
6014 u_int32_t reg;
6015
6016 reg = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
6017 OHCI_SUBREG_ContextControlSet);
6018
6019 /* print out debug info */
6020 #ifdef FW_DEBUG
6021 printf("fwohci_it_intr: CTX %d\n", itc->itc_num);
6022
6023 printf("fwohci_it_intr: %s: IT_CommandPtr 0x%08x "
6024 "ContextCtrl 0x%08x%s%s%s%s\n",
6025 sc->sc_sc1394.sc1394_dev.dv_xname,
6026 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, OHCI_SUBREG_CommandPtr),
6027 reg,
6028 reg & OHCI_CTXCTL_RUN ? " run" : "",
6029 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
6030 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
6031 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
6032 printf("fwohci_it_intr: %s: scratch %x start %d end %d valid %d\n",
6033 sc->sc_sc1394.sc1394_dev.dv_xname, *itc->itc_scratch,
6034 itc->itc_buf_start->itd_num, itc->itc_buf_end->itd_num,
6035 itc->itc_buf_cnt);
6036 {
6037 u_int32_t reg
6038 = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
6039 printf("\t\tIsoCounter 0x%08x, %d %d %d\n", reg,
6040 (reg >> 25) & 0xfe, (reg >> 12) & 0x1fff, reg & 0xfff);
6041 }
6042 #endif /* FW_DEBUG */
6043 /* end print out debug info */
6044
6045 scratchval = (*itc->itc_scratch) & 0x0000ffff;
6046 *itc->itc_scratch = 0xffffffff;
6047
6048 if ((reg & OHCI_CTXCTL_ACTIVE) == 0 && scratchval != 0xffff) {
6049 /* DMA engine has been stopped */
6050 printf("DMA engine stopped\n");
6051 printf("fwohci_it_intr: %s: IT_CommandPtr 0x%08x "
6052 "ContextCtrl 0x%08x%s%s%s%s\n",
6053 sc->sc_sc1394.sc1394_dev.dv_xname,
6054 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, OHCI_SUBREG_CommandPtr),
6055 reg,
6056 reg & OHCI_CTXCTL_RUN ? " run" : "",
6057 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
6058 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
6059 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
6060 printf("fwohci_it_intr: %s: scratch %x start %d end %d valid %d\n",
6061 sc->sc_sc1394.sc1394_dev.dv_xname, *itc->itc_scratch,
6062 itc->itc_buf_start->itd_num, itc->itc_buf_end->itd_num,
6063 itc->itc_buf_cnt);
6064 {
6065 u_int32_t reg
6066 = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
6067 printf("\t\tIsoCounter 0x%08x, %d %d %d\n", reg,
6068 (reg >> 25) & 0xfe, (reg >> 12) & 0x1fff, reg & 0xfff);
6069 }
6070 printf("\t\tbranch of lastdesc 0x%08x\n",
6071 itc->itc_buf_start->itd_lastdesc->fd_branch);
6072
6073 scratchval = 0xffff;
6074 itc->itc_flags &= ~ITC_FLAGS_RUN;
6075 }
6076
6077 /* unlink old buffers */
6078 if (scratchval != 0xffff) {
6079 /* normal path */
6080 newstartbuf = &itc->itc_buf[scratchval];
6081 } else {
6082 /* DMA engine stopped */
6083 newstartbuf = itc->itc_buf_linkend;
6084 INC_BUF(itc, newstartbuf);
6085 }
6086
6087 itd = (struct fwohci_it_dmabuf *)itc->itc_buf_start;
6088 itc->itc_buf_start = newstartbuf;
6089 while (itd != newstartbuf) {
6090 itc->itc_outpkt += itd->itd_npacket;
6091 fwohci_itd_unlink(itd);
6092 INC_BUF(itc, itd);
6093 --itc->itc_buf_cnt;
6094 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt));
6095 }
6096
6097 #ifdef DEBUG_USERADD
6098 if (scratchval != 0xffff) {
6099 printf("fwohci0: intr start %d dataend %d %d\n", scratchval,
6100 itc->itc_buf_end->itd_num, itc->itc_outpkt);
6101 }
6102 #endif
6103
6104 if (scratchval == 0xffff) {
6105 /* no data supplied */
6106 printf("fwohci_it_intr: no it data. output total %d\n",
6107 itc->itc_outpkt);
6108
6109 if (itc->itc_buf_cnt > 0) {
6110 printf("fwohci_it_intr: it DMA stops "
6111 "w/ valid databuf %d buf %d data %d"
6112 " intr reg 0x%08x\n",
6113 itc->itc_buf_cnt,
6114 itc->itc_buf_end->itd_num,
6115 fwohci_itd_hasdata(itc->itc_buf_end),
6116 OHCI_CSR_READ(sc, OHCI_REG_IntEventSet));
6117 } else {
6118 /* All the data gone */
6119 itc->itc_buf_start
6120 = itc->itc_buf_end
6121 = itc->itc_buf_linkend
6122 = &itc->itc_buf[0];
6123 printf("fwohci_it_intr: all packets gone\n");
6124 }
6125
6126 itc->itc_flags &= ~ITC_FLAGS_RUN;
6127
6128 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num,
6129 OHCI_SUBREG_ContextControlClear, 0xffffffff);
6130 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num,
6131 OHCI_SUBREG_CommandPtr, 0);
6132 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num,
6133 OHCI_SUBREG_ContextControlClear, 0x1f);
6134
6135 /* send message */
6136 if (itc->itc_waitchan != NULL) {
6137 wakeup((void *)itc->itc_waitchan);
6138 }
6139
6140 return;
6141 }
6142
6143 #if 0
6144 /* unlink old buffers */
6145 newstartbuf = &itc->itc_buf[scratchval];
6146
6147 itd = (struct fwohci_it_dmabuf *)itc->itc_buf_start;
6148 itc->itc_buf_start = newstartbuf;
6149 while (itd != newstartbuf) {
6150 itc->itc_outpkt += itd->itd_npacket;
6151 fwohci_itd_unlink(itd);
6152 INC_BUF(itc, itd);
6153 --itc->itc_buf_cnt;
6154 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt));
6155 }
6156 #endif
6157
6158 /* sanity check */
6159 {
6160 int startidx, endidx, linkendidx;
6161
6162 startidx = itc->itc_buf_start->itd_num;
6163 endidx = itc->itc_buf_end->itd_num;
6164 linkendidx = itc->itc_buf_linkend->itd_num;
6165
6166 if (startidx < endidx) {
6167 if (linkendidx < startidx
6168 || endidx < linkendidx) {
6169 printf("funny, linkend is not between start "
6170 "and end [%d, %d]: %d\n",
6171 startidx, endidx, linkendidx);
6172 }
6173 } else if (startidx > endidx) {
6174 if (linkendidx < startidx
6175 && endidx < linkendidx) {
6176 printf("funny, linkend is not between start "
6177 "and end [%d, %d]: %d\n",
6178 startidx, endidx, linkendidx);
6179 }
6180 } else {
6181 if (linkendidx != startidx) {
6182 printf("funny, linkend is not between start "
6183 "and end [%d, %d]: %d\n",
6184 startidx, endidx, linkendidx);
6185 }
6186
6187 }
6188 }
6189
6190 /* link if some valid DMA buffers exist */
6191 if (itc->itc_buf_cnt > 1
6192 && itc->itc_buf_linkend != itc->itc_buf_end) {
6193 struct fwohci_it_dmabuf *itdprev;
6194 int i;
6195
6196 DPRINTF(("CTX %d: start linkend dataend bufs %d, %d, %d, %d\n",
6197 itc->itc_num,
6198 itc->itc_buf_start->itd_num,
6199 itc->itc_buf_linkend->itd_num,
6200 itc->itc_buf_end->itd_num,
6201 itc->itc_buf_cnt));
6202
6203 itd = itdprev = itc->itc_buf_linkend;
6204 INC_BUF(itc, itd);
6205
6206 #if 0
6207 if (fwohci_itd_isfilled(itd) || itc->itc_buf_cnt == 2) {
6208 while (itdprev != itc->itc_buf_end) {
6209
6210 if (fwohci_itd_link(itdprev, itd)) {
6211 break;
6212 }
6213
6214 itdprev = itd;
6215 INC_BUF(itc, itd);
6216 }
6217 itc->itc_buf_linkend = itdprev;
6218 }
6219 #endif
6220 i = 0;
6221 while (itdprev != itc->itc_buf_end) {
6222 if (!fwohci_itd_isfilled(itd) && itc->itc_buf_cnt > 2) {
6223 break;
6224 }
6225
6226 if (fwohci_itd_link(itdprev, itd)) {
6227 break;
6228 }
6229
6230 itdprev = itd;
6231 INC_BUF(itc, itd);
6232
6233 itc->itc_buf_linkend = itdprev;
6234 ++i;
6235 }
6236
6237 if (i > 0) {
6238 DPRINTF(("CTX %d: start linkend dataend bufs %d, %d, %d, %d\n",
6239 itc->itc_num,
6240 itc->itc_buf_start->itd_num,
6241 itc->itc_buf_linkend->itd_num,
6242 itc->itc_buf_end->itd_num,
6243 itc->itc_buf_cnt));
6244 }
6245 } else {
6246 struct fwohci_it_dmabuf *le;
6247
6248 le = itc->itc_buf_linkend;
6249
6250 printf("CTX %d: start linkend dataend bufs %d, %d, %d, %d no buffer added\n",
6251 itc->itc_num,
6252 itc->itc_buf_start->itd_num,
6253 itc->itc_buf_linkend->itd_num,
6254 itc->itc_buf_end->itd_num,
6255 itc->itc_buf_cnt);
6256 printf("\tlast descriptor %s %04x %08x\n",
6257 le->itd_lastdesc->fd_flags & OHCI_DESC_INTR_ALWAYS ? "intr" : "",
6258 le->itd_lastdesc->fd_flags,
6259 le->itd_lastdesc->fd_branch);
6260 }
6261
6262 /* send message */
6263 if (itc->itc_waitchan != NULL) {
6264 /* */
6265 wakeup((void *)itc->itc_waitchan);
6266 }
6267 }
6268
6269
6270
6271 /*
6272 * int fwohci_itd_construct(struct fwohci_it_ctx *itc,
6273 * struct fwohci_it_dmabuf *itd, int num,
6274 * struct fwohci_desc *desc, bus_addr_t phys,
6275 * int descsize, int maxsize, paddr_t scratch)
6276 *
6277 *
6278 *
6279 */
6280 int
6281 fwohci_itd_construct(struct fwohci_it_ctx *itc, struct fwohci_it_dmabuf *itd,
6282 int num, struct fwohci_desc *desc, bus_addr_t phys, int descsize,
6283 int maxsize, paddr_t scratch)
6284 {
6285 const char *xname = itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
6286 struct fwohci_desc *fd;
6287 struct fwohci_desc *descend;
6288 int npkt;
6289 int bufno = 0; /* DMA segment */
6290 bus_size_t bufused = 0; /* offset in a DMA segment */
6291 int roundsize;
6292 int tag = itc->itc_tag;
6293 int ch = itc->itc_channel;
6294
6295 itd->itd_ctx = itc;
6296 itd->itd_num = num;
6297
6298 if (descsize > 1024*3) {
6299 printf("%s: fwohci_itd_construct[%d] descsize %d too big\n",
6300 xname, num, descsize);
6301 return -1;
6302 }
6303
6304 itd->itd_desc = desc;
6305 itd->itd_descsize = descsize;
6306 itd->itd_desc_phys = phys;
6307
6308 itd->itd_lastdesc = desc;
6309 itd->itd_npacket = 0;
6310
6311 printf("%s: fwohci_itd_construct[%d] desc %p descsize %d, maxsize %d\n",
6312 xname, itd->itd_num, itd->itd_desc, itd->itd_descsize, maxsize);
6313
6314 if (descsize < 4) {
6315 /* too small descriptor array. at least 4 */
6316 return -1;
6317 }
6318
6319 /* count up how many packet can handle */
6320 itd->itd_maxpacket = (descsize - 1)/3;
6321
6322 /* rounding up to power of 2. minimum 16 */
6323 roundsize = 16;
6324 for (roundsize = 16; roundsize < maxsize; roundsize <<= 1);
6325 itd->itd_maxsize = roundsize;
6326
6327 printf("\t\tdesc%d [%x, %lx]\n", itd->itd_num,
6328 (u_int32_t)phys,
6329 (unsigned long)((u_int32_t)phys
6330 + (itd->itd_maxpacket*3 + 1)*sizeof(struct fwohci_desc)));
6331 printf("%s: fwohci_itd_construct[%d] npkt %d maxsize round up to %d\n",
6332 xname, itd->itd_num, itd->itd_maxpacket, itd->itd_maxsize);
6333
6334 /* obtain DMA buffer */
6335 if (fwohci_itd_dmabuf_alloc(itd)) {
6336 /* cannot allocate memory for DMA buffer */
6337 return -1;
6338 }
6339
6340 /*
6341 * make descriptor chain
6342 *
6343 * First descriptor group has a STORE_VALUE, OUTPUT_IMMEDIATE
6344 * and OUTPUT_LAST descriptors Second and after that, a
6345 * descriptor group has an OUTPUT_IMMEDIATE and an OUTPUT_LAST
6346 * descriptor.
6347 */
6348 descend = desc + descsize;
6349
6350 /* set store value descriptor for 1st descriptor group */
6351 desc->fd_flags = OHCI_DESC_STORE_VALUE;
6352 desc->fd_reqcount = num; /* write number of DMA buffer class */
6353 desc->fd_data = scratch; /* at physical memory 'scratch' */
6354 desc->fd_branch = 0;
6355 desc->fd_status = desc->fd_rescount = 0;
6356
6357 itd->itd_store = desc;
6358 itd->itd_store_phys = phys;
6359
6360 ++desc;
6361 phys += 16;
6362
6363 npkt = 0;
6364 /* make OUTPUT_DESC chain for packets */
6365 for (fd = desc; fd + 2 < descend; fd += 3, ++npkt) {
6366 struct fwohci_desc *fi = fd;
6367 struct fwohci_desc *fl = fd + 2;
6368 u_int32_t *fi_data = (u_int32_t *)(fd + 1);
6369
6370 #if 0
6371 if (npkt > itd->itd_maxpacket - 3) {
6372 printf("%s: %3d fi fl %p %p\n", xname, npkt, fi,fl);
6373 }
6374 #endif
6375
6376 fi->fd_reqcount = 8; /* data size for OHCI command */
6377 fi->fd_flags = OHCI_DESC_IMMED;
6378 fi->fd_data = 0;
6379 fi->fd_branch = 0; /* branch for error */
6380 fi->fd_status = fi->fd_rescount = 0;
6381
6382 /* channel and tag is unchanged */
6383 *fi_data = OHCI_ITHEADER_VAL(TAG, tag) |
6384 OHCI_ITHEADER_VAL(CHAN, ch) |
6385 OHCI_ITHEADER_VAL(TCODE, IEEE1394_TCODE_STREAM_DATA);
6386 *++fi_data = 0;
6387 *++fi_data = 0;
6388 *++fi_data = 0;
6389
6390 fl->fd_flags = OHCI_DESC_OUTPUT | OHCI_DESC_LAST |
6391 OHCI_DESC_BRANCH;
6392 fl->fd_branch =
6393 (phys + sizeof(struct fwohci_desc)*(npkt + 1)*3) | 0x03;
6394 fl->fd_status = fl->fd_rescount = 0;
6395
6396 #ifdef FW_DEBUG
6397 if (npkt > itd->itd_maxpacket - 3) {
6398 DPRINTF(("%s: %3d fi fl fl branch %p %p 0x%x\n",
6399 xname, npkt, fi, fl, (int)fl->fd_branch));
6400 }
6401 #endif
6402
6403 /* physical addr to data? */
6404 fl->fd_data =
6405 (u_int32_t)((itd->itd_seg[bufno].ds_addr + bufused));
6406 bufused += itd->itd_maxsize;
6407 if (bufused > itd->itd_seg[bufno].ds_len) {
6408 bufused = 0;
6409 if (++bufno == itd->itd_nsegs) {
6410 /* fail */
6411 break;
6412 }
6413 }
6414 }
6415
6416 #if 0
6417 if (itd->itd_num == 0) {
6418 u_int32_t *fdp;
6419 u_int32_t adr;
6420 int i = 0;
6421
6422 fdp = (u_int32_t *)itd->itd_desc;
6423 adr = (u_int32_t)itd->itd_desc_phys; /* XXX */
6424
6425 printf("fwohci_itd_construct: audit DMA desc chain. %d\n",
6426 itd->itd_maxpacket);
6427 for (i = 0; i < itd->itd_maxpacket*12 + 4; ++i) {
6428 if (i % 4 == 0) {
6429 printf("\n%x:", adr + 4*i);
6430 }
6431 printf(" %08x", fdp[i]);
6432 }
6433 printf("\n");
6434
6435 }
6436 #endif
6437 /* last branch should be 0 */
6438 --fd;
6439 fd->fd_branch = 0;
6440
6441 printf("%s: pkt %d %d maxdesc %p\n",
6442 xname, npkt, itd->itd_maxpacket, descend);
6443
6444 return 0;
6445 }
6446
6447 void
6448 fwohci_itd_destruct(struct fwohci_it_dmabuf *itd)
6449 {
6450 const char *xname = itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
6451
6452 printf("%s: fwohci_itd_destruct %d\n", xname, itd->itd_num);
6453
6454 fwohci_itd_dmabuf_free(itd);
6455 }
6456
6457
6458 /*
6459 * static int fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *itd)
6460 *
6461 * This function allocates DMA memory for fwohci_it_dmabuf. This
6462 * function will return 0 when it succeeds and return non-zero
6463 * value when it fails.
6464 */
6465 static int
6466 fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *itd)
6467 {
6468 const char *xname = itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
6469 bus_dma_tag_t dmat = itd->itd_ctx->itc_sc->sc_dmat;
6470
6471 int dmasize = itd->itd_maxsize * itd->itd_maxpacket;
6472 int error;
6473
6474 DPRINTF(("%s: fwohci_itd_dmabuf_alloc[%d] dmasize %d maxpkt %d\n",
6475 xname, itd->itd_num, dmasize, itd->itd_maxpacket));
6476
6477 if ((error = bus_dmamem_alloc(dmat, dmasize, PAGE_SIZE, 0,
6478 itd->itd_seg, FWOHCI_MAX_ITDATASEG, &itd->itd_nsegs, 0)) != 0) {
6479 printf("%s: unable to allocate data buffer, error = %d\n",
6480 xname, error);
6481 goto fail_0;
6482 }
6483
6484 /* checking memory range */
6485 #ifdef FW_DEBUG
6486 {
6487 int loop;
6488
6489 for (loop = 0; loop < itd->itd_nsegs; ++loop) {
6490 DPRINTF(("\t%.2d: 0x%lx - 0x%lx\n", loop,
6491 (long)itd->itd_seg[loop].ds_addr,
6492 (long)itd->itd_seg[loop].ds_addr
6493 + (long)itd->itd_seg[loop].ds_len - 1));
6494 }
6495 }
6496 #endif
6497
6498 if ((error = bus_dmamem_map(dmat, itd->itd_seg, itd->itd_nsegs,
6499 dmasize, (caddr_t *)&itd->itd_buf,
6500 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) {
6501 printf("%s: unable to map data buffer, error = %d\n",
6502 xname, error);
6503 goto fail_1;
6504 }
6505
6506 DPRINTF(("fwohci_it_data_alloc[%d]: bus_dmamem_map addr %p\n",
6507 itd->itd_num, itd->itd_buf));
6508
6509 if ((error = bus_dmamap_create(dmat, /*chunklen*/dmasize,
6510 itd->itd_nsegs, dmasize, 0, BUS_DMA_WAITOK,
6511 &itd->itd_dmamap)) != 0) {
6512 printf("%s: unable to create data buffer DMA map, "
6513 "error = %d\n", xname, error);
6514 goto fail_2;
6515 }
6516
6517 DPRINTF(("fwohci_it_data_alloc: bus_dmamem_create\n"));
6518
6519 if ((error = bus_dmamap_load(dmat, itd->itd_dmamap,
6520 itd->itd_buf, dmasize, NULL, BUS_DMA_WAITOK)) != 0) {
6521 printf("%s: unable to load data buffer DMA map, error = %d\n",
6522 xname, error);
6523 goto fail_3;
6524 }
6525
6526 DPRINTF(("fwohci_itd_dmabuf_alloc: load DMA memory vm %p\n",
6527 itd->itd_buf));
6528 DPRINTF(("\tmapsize %ld nsegs %d\n",
6529 (long)itd->itd_dmamap->dm_mapsize, itd->itd_dmamap->dm_nsegs));
6530
6531 #ifdef FW_DEBUG
6532 {
6533 int loop;
6534
6535 for (loop = 0; loop < itd->itd_dmamap->dm_nsegs; ++loop) {
6536 DPRINTF(("\t%.2d: 0x%lx - 0x%lx\n", loop,
6537 (long)itd->itd_dmamap->dm_segs[loop].ds_addr,
6538 (long)itd->itd_dmamap->dm_segs[loop].ds_addr +
6539 (long)itd->itd_dmamap->dm_segs[loop].ds_len - 1));
6540 }
6541 }
6542 #endif
6543
6544 return 0;
6545
6546 fail_3:
6547 bus_dmamap_destroy(dmat, itd->itd_dmamap);
6548 fail_2:
6549 bus_dmamem_unmap(dmat, (caddr_t)itd->itd_buf, dmasize);
6550 fail_1:
6551 bus_dmamem_free(dmat, itd->itd_seg, itd->itd_nsegs);
6552 fail_0:
6553 itd->itd_nsegs = 0;
6554 itd->itd_maxpacket = 0;
6555 return error;
6556 }
6557
6558 /*
6559 * static void fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *itd)
6560 *
6561 * This function will release memory resource allocated by
6562 * fwohci_itd_dmabuf_alloc().
6563 */
6564 static void
6565 fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *itd)
6566 {
6567 bus_dma_tag_t dmat = itd->itd_ctx->itc_sc->sc_dmat;
6568 int dmasize = itd->itd_maxsize * itd->itd_maxpacket;
6569
6570 bus_dmamap_destroy(dmat, itd->itd_dmamap);
6571 bus_dmamem_unmap(dmat, (caddr_t)itd->itd_buf, dmasize);
6572 bus_dmamem_free(dmat, itd->itd_seg, itd->itd_nsegs);
6573
6574 itd->itd_nsegs = 0;
6575 itd->itd_maxpacket = 0;
6576 }
6577
6578
6579
6580 /*
6581 * int fwohci_itd_link(struct fwohci_it_dmabuf *itd,
6582 * struct fwohci_it_dmabuf *itdc)
6583 *
6584 * This function will concatinate two descriptor chains in dmabuf
6585 * itd and itdc. The descriptor link in itdc follows one in itd.
6586 * This function will move interrrupt packet from the end of itd
6587 * to the top of itdc.
6588 *
6589 * This function will return 0 whel this funcion suceeds. If an
6590 * error happens, return a negative value.
6591 */
6592 int
6593 fwohci_itd_link(struct fwohci_it_dmabuf *itd, struct fwohci_it_dmabuf *itdc)
6594 {
6595 struct fwohci_desc *fd1, *fdc;
6596
6597 if (itdc->itd_lastdesc == itdc->itd_desc) {
6598 /* no valid data */
6599 printf("fwohci_itd_link: no data\n");
6600 return -1;
6601 }
6602
6603 if (itdc->itd_flags & ITD_FLAGS_LOCK) {
6604 /* used already */
6605 printf("fwohci_itd_link: link locked\n");
6606 return -1;
6607 }
6608 itdc->itd_flags |= ITD_FLAGS_LOCK;
6609 /* for the first one */
6610 itd->itd_flags |= ITD_FLAGS_LOCK;
6611
6612 DPRINTF(("linking %d after %d: add %d pkts\n",
6613 itdc->itd_num, itd->itd_num, itdc->itd_npacket));
6614
6615 /* XXX: should sync cache */
6616
6617 fd1 = itd->itd_lastdesc;
6618 fdc = itdc->itd_desc + 3; /* OUTPUT_LAST in the first descriptor */
6619
6620 /* sanity check */
6621 #define OUTPUT_LAST_DESC (OHCI_DESC_OUTPUT | OHCI_DESC_LAST | OHCI_DESC_BRANCH)
6622 if ((fd1->fd_flags & OUTPUT_LAST_DESC) != OUTPUT_LAST_DESC) {
6623 printf("funny! not OUTPUT_LAST descriptor %p\n", fd1);
6624 }
6625 if (itd->itd_lastdesc - itd->itd_desc != 3 * itd->itd_npacket) {
6626 printf("funny! packet number inconsistency %ld <=> %ld\n",
6627 (long)(itd->itd_lastdesc - itd->itd_desc),
6628 (long)(3*itd->itd_npacket));
6629 }
6630
6631 fd1->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6632 fdc->fd_flags |= OHCI_DESC_INTR_ALWAYS;
6633 fd1->fd_branch = itdc->itd_desc_phys | 4;
6634
6635 itdc->itd_lastdesc->fd_flags |= OHCI_DESC_INTR_ALWAYS;
6636 /* save branch addr of lastdesc and substitute 0 to it */
6637 itdc->itd_savedbranch = itdc->itd_lastdesc->fd_branch;
6638 itdc->itd_lastdesc->fd_branch = 0;
6639
6640 DPRINTF(("%s: link (%d %d), add pkt %d/%d branch 0x%x next saved 0x%x\n",
6641 itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname,
6642 itd->itd_num, itdc->itd_num,
6643 itdc->itd_npacket, itdc->itd_maxpacket,
6644 (int)fd1->fd_branch, (int)itdc->itd_savedbranch));
6645
6646 /* XXX: should sync cache */
6647
6648 return 0;
6649 }
6650
6651
6652 /*
6653 * int fwohci_itd_unlink(struct fwohci_it_dmabuf *itd)
6654 *
6655 * This function will unlink the descriptor chain from valid link
6656 * of descriptors. The target descriptor is specified by the
6657 * arguent.
6658 */
6659 int
6660 fwohci_itd_unlink(struct fwohci_it_dmabuf *itd)
6661 {
6662 struct fwohci_desc *fd;
6663
6664 /* XXX: should sync cache */
6665
6666 fd = itd->itd_lastdesc;
6667
6668 fd->fd_branch = itd->itd_savedbranch;
6669 DPRINTF(("%s: unlink buf %d branch restored 0x%x\n",
6670 itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname,
6671 itd->itd_num, (int)fd->fd_branch));
6672
6673 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6674 itd->itd_lastdesc = itd->itd_desc;
6675
6676 fd = itd->itd_desc + 3; /* 1st OUTPUT_LAST */
6677 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6678
6679 /* XXX: should sync cache */
6680
6681 itd->itd_npacket = 0;
6682 itd->itd_lastdesc = itd->itd_desc;
6683 itd->itd_flags &= ~ITD_FLAGS_LOCK;
6684
6685 return 0;
6686 }
6687
6688
6689 /*
6690 * static int fwohci_itd_writedata(struct fwohci_it_dmabuf *, int ndata,
6691 * struct ieee1394_it_datalist *);
6692 *
6693 * This function will return the number of written data, or
6694 * negative value if an error happens
6695 */
6696 int
6697 fwohci_itd_writedata(struct fwohci_it_dmabuf *itd, int ndata,
6698 struct ieee1394_it_datalist *itdata)
6699 {
6700 int writepkt;
6701 int i;
6702 u_int8_t *p;
6703 struct fwohci_desc *fd;
6704 u_int32_t *fd_idata;
6705 const int dspace =
6706 itd->itd_maxpacket - itd->itd_npacket < ndata ?
6707 itd->itd_maxpacket - itd->itd_npacket : ndata;
6708
6709 if (itd->itd_flags & ITD_FLAGS_LOCK || dspace == 0) {
6710 /* it is locked: cannot write anything */
6711 if (itd->itd_flags & ITD_FLAGS_LOCK) {
6712 DPRINTF(("fwohci_itd_writedata: buf %d lock flag %s,"
6713 " dspace %d\n",
6714 itd->itd_num,
6715 itd->itd_flags & ITD_FLAGS_LOCK ? "ON" : "OFF",
6716 dspace));
6717 return 0; /* not an error */
6718 }
6719 }
6720
6721 /* sanity check */
6722 if (itd->itd_maxpacket < itd->itd_npacket) {
6723 printf("fwohci_itd_writedata: funny! # pkt > maxpkt"
6724 "%d %d\n", itd->itd_npacket, itd->itd_maxpacket);
6725 }
6726
6727 p = itd->itd_buf + itd->itd_maxsize * itd->itd_npacket;
6728 fd = itd->itd_lastdesc;
6729
6730 DPRINTF(("fwohci_itd_writedata(%d[%p], %d, 0x%p) invoked:\n",
6731 itd->itd_num, itd, ndata, itdata));
6732
6733 for (writepkt = 0; writepkt < dspace; ++writepkt) {
6734 u_int8_t *p1 = p;
6735 int cpysize;
6736 int totalsize = 0;
6737
6738 DPRINTF(("writing %d ", writepkt));
6739
6740 for (i = 0; i < 4; ++i) {
6741 switch (itdata->it_cmd[i]&IEEE1394_IT_CMD_MASK) {
6742 case IEEE1394_IT_CMD_IMMED:
6743 memcpy(p1, &itdata->it_u[i].id_data, 8);
6744 p1 += 8;
6745 totalsize += 8;
6746 break;
6747 case IEEE1394_IT_CMD_PTR:
6748 cpysize = itdata->it_cmd[i]&IEEE1394_IT_CMD_SIZE;
6749 DPRINTF(("fwohci_itd_writedata: cpy %d %p\n",
6750 cpysize, itdata->it_u[i].id_addr));
6751 if (totalsize + cpysize > itd->itd_maxsize) {
6752 /* error: too big size */
6753 break;
6754 }
6755 memcpy(p1, itdata->it_u[i].id_addr, cpysize);
6756 totalsize += cpysize;
6757 break;
6758 case IEEE1394_IT_CMD_NOP:
6759 break;
6760 default:
6761 /* unknown command */
6762 break;
6763 }
6764 }
6765
6766 /* only for DV test */
6767 if (totalsize != 488) {
6768 printf("error: totalsize %d at %d\n",
6769 totalsize, writepkt);
6770 }
6771
6772 DPRINTF(("totalsize %d ", totalsize));
6773
6774 /* fill iso command in OUTPUT_IMMED descriptor */
6775
6776 /* XXX: sync cache */
6777 fd += 2; /* next to first descriptor */
6778 fd_idata = (u_int32_t *)fd;
6779
6780 /*
6781 * Umm, should tag, channel and tcode be written
6782 * previously in itd_construct?
6783 */
6784 #if 0
6785 *fd_idata = OHCI_ITHEADER_VAL(TAG, tag) |
6786 OHCI_ITHEADER_VAL(CHAN, ch) |
6787 OHCI_ITHEADER_VAL(TCODE, IEEE1394_TCODE_STREAM_DATA);
6788 #endif
6789 *++fd_idata = totalsize << 16;
6790
6791 /* fill data in OUTPUT_LAST descriptor */
6792 ++fd;
6793 /* intr check... */
6794 if (fd->fd_flags & OHCI_DESC_INTR_ALWAYS) {
6795 printf("uncleared INTR flag in desc %ld\n",
6796 (long)(fd - itd->itd_desc - 1)/3);
6797 }
6798 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6799
6800 if ((fd - itd->itd_desc - 1)/3 != itd->itd_maxpacket - 1) {
6801 u_int32_t bcal;
6802
6803 bcal = (fd - itd->itd_desc + 1)*sizeof(struct fwohci_desc) + (u_int32_t)itd->itd_desc_phys;
6804 if (bcal != (fd->fd_branch & 0xfffffff0)) {
6805
6806 printf("uum, branch differ at %d, %x %x %ld/%d\n",
6807 itd->itd_num,
6808 bcal,
6809 fd->fd_branch,
6810 (long)((fd - itd->itd_desc - 1)/3),
6811 itd->itd_maxpacket);
6812 }
6813 } else {
6814 /* the last pcaket */
6815 if (fd->fd_branch != 0) {
6816 printf("uum, branch differ at %d, %x %x %ld/%d\n",
6817 itd->itd_num,
6818 0,
6819 fd->fd_branch,
6820 (long)((fd - itd->itd_desc - 1)/3),
6821 itd->itd_maxpacket);
6822 }
6823 }
6824
6825 /* sanity check */
6826 if (fd->fd_flags != OUTPUT_LAST_DESC) {
6827 printf("fwohci_itd_writedata: dmabuf %d desc inconsistent %d\n",
6828 itd->itd_num, writepkt + itd->itd_npacket);
6829 break;
6830 }
6831 fd->fd_reqcount = totalsize;
6832 /* XXX: sync cache */
6833
6834 ++itdata;
6835 p += itd->itd_maxsize;
6836 }
6837
6838 DPRINTF(("loop start %d, %d times %d\n",
6839 itd->itd_npacket, dspace, writepkt));
6840
6841 itd->itd_npacket += writepkt;
6842 itd->itd_lastdesc = fd;
6843
6844 return writepkt;
6845 }
6846
6847
6848
6849
6850
6851 int
6852 fwohci_itd_isfilled(struct fwohci_it_dmabuf *itd)
6853 {
6854
6855 return itd->itd_npacket*2 > itd->itd_maxpacket ? 1 : 0;
6856 }
6857